• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_SPACES_H_
6 #define V8_SPACES_H_
7 
8 #include "src/allocation.h"
9 #include "src/base/atomicops.h"
10 #include "src/hashmap.h"
11 #include "src/list.h"
12 #include "src/log.h"
13 #include "src/platform/mutex.h"
14 #include "src/utils.h"
15 
16 namespace v8 {
17 namespace internal {
18 
19 class Isolate;
20 
21 // -----------------------------------------------------------------------------
22 // Heap structures:
23 //
24 // A JS heap consists of a young generation, an old generation, and a large
25 // object space. The young generation is divided into two semispaces. A
26 // scavenger implements Cheney's copying algorithm. The old generation is
27 // separated into a map space and an old object space. The map space contains
28 // all (and only) map objects, the rest of old objects go into the old space.
29 // The old generation is collected by a mark-sweep-compact collector.
30 //
31 // The semispaces of the young generation are contiguous.  The old and map
32 // spaces consists of a list of pages. A page has a page header and an object
33 // area.
34 //
35 // There is a separate large object space for objects larger than
36 // Page::kMaxHeapObjectSize, so that they do not have to move during
37 // collection. The large object space is paged. Pages in large object space
38 // may be larger than the page size.
39 //
40 // A store-buffer based write barrier is used to keep track of intergenerational
41 // references.  See store-buffer.h.
42 //
43 // During scavenges and mark-sweep collections we sometimes (after a store
44 // buffer overflow) iterate intergenerational pointers without decoding heap
45 // object maps so if the page belongs to old pointer space or large object
46 // space it is essential to guarantee that the page does not contain any
47 // garbage pointers to new space: every pointer aligned word which satisfies
48 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
49 // new space. Thus objects in old pointer and large object spaces should have a
50 // special layout (e.g. no bare integer fields). This requirement does not
51 // apply to map space which is iterated in a special fashion. However we still
52 // require pointer fields of dead maps to be cleaned.
53 //
54 // To enable lazy cleaning of old space pages we can mark chunks of the page
55 // as being garbage.  Garbage sections are marked with a special map.  These
56 // sections are skipped when scanning the page, even if we are otherwise
57 // scanning without regard for object boundaries.  Garbage sections are chained
58 // together to form a free list after a GC.  Garbage sections created outside
59 // of GCs by object trunctation etc. may not be in the free list chain.  Very
60 // small free spaces are ignored, they need only be cleaned of bogus pointers
61 // into new space.
62 //
63 // Each page may have up to one special garbage section.  The start of this
64 // section is denoted by the top field in the space.  The end of the section
65 // is denoted by the limit field in the space.  This special garbage section
66 // is not marked with a free space map in the data.  The point of this section
67 // is to enable linear allocation without having to constantly update the byte
68 // array every time the top field is updated and a new object is created.  The
69 // special garbage section is not in the chain of garbage sections.
70 //
71 // Since the top and limit fields are in the space, not the page, only one page
72 // has a special garbage section, and if the top and limit are equal then there
73 // is no special garbage section.
74 
75 // Some assertion macros used in the debugging mode.
76 
77 #define ASSERT_PAGE_ALIGNED(address)                                           \
78   ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
79 
80 #define ASSERT_OBJECT_ALIGNED(address)                                         \
81   ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
82 
83 #define ASSERT_OBJECT_SIZE(size)                                               \
84   ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
85 
86 #define ASSERT_PAGE_OFFSET(offset)                                             \
87   ASSERT((Page::kObjectStartOffset <= offset)                                  \
88       && (offset <= Page::kPageSize))
89 
90 #define ASSERT_MAP_PAGE_INDEX(index)                                           \
91   ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
92 
93 
94 class PagedSpace;
95 class MemoryAllocator;
96 class AllocationInfo;
97 class Space;
98 class FreeList;
99 class MemoryChunk;
100 
101 class MarkBit {
102  public:
103   typedef uint32_t CellType;
104 
MarkBit(CellType * cell,CellType mask,bool data_only)105   inline MarkBit(CellType* cell, CellType mask, bool data_only)
106       : cell_(cell), mask_(mask), data_only_(data_only) { }
107 
cell()108   inline CellType* cell() { return cell_; }
mask()109   inline CellType mask() { return mask_; }
110 
111 #ifdef DEBUG
112   bool operator==(const MarkBit& other) {
113     return cell_ == other.cell_ && mask_ == other.mask_;
114   }
115 #endif
116 
Set()117   inline void Set() { *cell_ |= mask_; }
Get()118   inline bool Get() { return (*cell_ & mask_) != 0; }
Clear()119   inline void Clear() { *cell_ &= ~mask_; }
120 
data_only()121   inline bool data_only() { return data_only_; }
122 
Next()123   inline MarkBit Next() {
124     CellType new_mask = mask_ << 1;
125     if (new_mask == 0) {
126       return MarkBit(cell_ + 1, 1, data_only_);
127     } else {
128       return MarkBit(cell_, new_mask, data_only_);
129     }
130   }
131 
132  private:
133   CellType* cell_;
134   CellType mask_;
135   // This boolean indicates that the object is in a data-only space with no
136   // pointers.  This enables some optimizations when marking.
137   // It is expected that this field is inlined and turned into control flow
138   // at the place where the MarkBit object is created.
139   bool data_only_;
140 };
141 
142 
143 // Bitmap is a sequence of cells each containing fixed number of bits.
144 class Bitmap {
145  public:
146   static const uint32_t kBitsPerCell = 32;
147   static const uint32_t kBitsPerCellLog2 = 5;
148   static const uint32_t kBitIndexMask = kBitsPerCell - 1;
149   static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
150   static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
151 
152   static const size_t kLength =
153     (1 << kPageSizeBits) >> (kPointerSizeLog2);
154 
155   static const size_t kSize =
156     (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
157 
158 
CellsForLength(int length)159   static int CellsForLength(int length) {
160     return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
161   }
162 
CellsCount()163   int CellsCount() {
164     return CellsForLength(kLength);
165   }
166 
SizeFor(int cells_count)167   static int SizeFor(int cells_count) {
168     return sizeof(MarkBit::CellType) * cells_count;
169   }
170 
INLINE(static uint32_t IndexToCell (uint32_t index))171   INLINE(static uint32_t IndexToCell(uint32_t index)) {
172     return index >> kBitsPerCellLog2;
173   }
174 
INLINE(static uint32_t CellToIndex (uint32_t index))175   INLINE(static uint32_t CellToIndex(uint32_t index)) {
176     return index << kBitsPerCellLog2;
177   }
178 
INLINE(static uint32_t CellAlignIndex (uint32_t index))179   INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
180     return (index + kBitIndexMask) & ~kBitIndexMask;
181   }
182 
INLINE(MarkBit::CellType * cells ())183   INLINE(MarkBit::CellType* cells()) {
184     return reinterpret_cast<MarkBit::CellType*>(this);
185   }
186 
INLINE(Address address ())187   INLINE(Address address()) {
188     return reinterpret_cast<Address>(this);
189   }
190 
INLINE(static Bitmap * FromAddress (Address addr))191   INLINE(static Bitmap* FromAddress(Address addr)) {
192     return reinterpret_cast<Bitmap*>(addr);
193   }
194 
195   inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
196     MarkBit::CellType mask = 1 << (index & kBitIndexMask);
197     MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
198     return MarkBit(cell, mask, data_only);
199   }
200 
201   static inline void Clear(MemoryChunk* chunk);
202 
203   static void PrintWord(uint32_t word, uint32_t himask = 0) {
204     for (uint32_t mask = 1; mask != 0; mask <<= 1) {
205       if ((mask & himask) != 0) PrintF("[");
206       PrintF((mask & word) ? "1" : "0");
207       if ((mask & himask) != 0) PrintF("]");
208     }
209   }
210 
211   class CellPrinter {
212    public:
CellPrinter()213     CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
214 
Print(uint32_t pos,uint32_t cell)215     void Print(uint32_t pos, uint32_t cell) {
216       if (cell == seq_type) {
217         seq_length++;
218         return;
219       }
220 
221       Flush();
222 
223       if (IsSeq(cell)) {
224         seq_start = pos;
225         seq_length = 0;
226         seq_type = cell;
227         return;
228       }
229 
230       PrintF("%d: ", pos);
231       PrintWord(cell);
232       PrintF("\n");
233     }
234 
Flush()235     void Flush() {
236       if (seq_length > 0) {
237         PrintF("%d: %dx%d\n",
238                seq_start,
239                seq_type == 0 ? 0 : 1,
240                seq_length * kBitsPerCell);
241         seq_length = 0;
242       }
243     }
244 
IsSeq(uint32_t cell)245     static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
246 
247    private:
248     uint32_t seq_start;
249     uint32_t seq_type;
250     uint32_t seq_length;
251   };
252 
Print()253   void Print() {
254     CellPrinter printer;
255     for (int i = 0; i < CellsCount(); i++) {
256       printer.Print(i, cells()[i]);
257     }
258     printer.Flush();
259     PrintF("\n");
260   }
261 
IsClean()262   bool IsClean() {
263     for (int i = 0; i < CellsCount(); i++) {
264       if (cells()[i] != 0) {
265         return false;
266       }
267     }
268     return true;
269   }
270 };
271 
272 
273 class SkipList;
274 class SlotsBuffer;
275 
276 // MemoryChunk represents a memory region owned by a specific space.
277 // It is divided into the header and the body. Chunk start is always
278 // 1MB aligned. Start of the body is aligned so it can accommodate
279 // any heap object.
280 class MemoryChunk {
281  public:
282   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)283   static MemoryChunk* FromAddress(Address a) {
284     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
285   }
286 
287   // Only works for addresses in pointer spaces, not data or code spaces.
288   static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
289 
address()290   Address address() { return reinterpret_cast<Address>(this); }
291 
is_valid()292   bool is_valid() { return address() != NULL; }
293 
next_chunk()294   MemoryChunk* next_chunk() const {
295     return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
296   }
297 
prev_chunk()298   MemoryChunk* prev_chunk() const {
299     return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
300   }
301 
set_next_chunk(MemoryChunk * next)302   void set_next_chunk(MemoryChunk* next) {
303     base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
304   }
305 
set_prev_chunk(MemoryChunk * prev)306   void set_prev_chunk(MemoryChunk* prev) {
307     base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
308   }
309 
owner()310   Space* owner() const {
311     if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
312         kFailureTag) {
313       return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
314                                       kFailureTag);
315     } else {
316       return NULL;
317     }
318   }
319 
set_owner(Space * space)320   void set_owner(Space* space) {
321     ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
322     owner_ = reinterpret_cast<Address>(space) + kFailureTag;
323     ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
324            kFailureTag);
325   }
326 
reserved_memory()327   VirtualMemory* reserved_memory() {
328     return &reservation_;
329   }
330 
InitializeReservedMemory()331   void InitializeReservedMemory() {
332     reservation_.Reset();
333   }
334 
set_reserved_memory(VirtualMemory * reservation)335   void set_reserved_memory(VirtualMemory* reservation) {
336     ASSERT_NOT_NULL(reservation);
337     reservation_.TakeControl(reservation);
338   }
339 
scan_on_scavenge()340   bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
initialize_scan_on_scavenge(bool scan)341   void initialize_scan_on_scavenge(bool scan) {
342     if (scan) {
343       SetFlag(SCAN_ON_SCAVENGE);
344     } else {
345       ClearFlag(SCAN_ON_SCAVENGE);
346     }
347   }
348   inline void set_scan_on_scavenge(bool scan);
349 
store_buffer_counter()350   int store_buffer_counter() { return store_buffer_counter_; }
set_store_buffer_counter(int counter)351   void set_store_buffer_counter(int counter) {
352     store_buffer_counter_ = counter;
353   }
354 
Contains(Address addr)355   bool Contains(Address addr) {
356     return addr >= area_start() && addr < area_end();
357   }
358 
359   // Checks whether addr can be a limit of addresses in this page.
360   // It's a limit if it's in the page, or if it's just after the
361   // last byte of the page.
ContainsLimit(Address addr)362   bool ContainsLimit(Address addr) {
363     return addr >= area_start() && addr <= area_end();
364   }
365 
366   // Every n write barrier invocations we go to runtime even though
367   // we could have handled it in generated code.  This lets us check
368   // whether we have hit the limit and should do some more marking.
369   static const int kWriteBarrierCounterGranularity = 500;
370 
371   enum MemoryChunkFlags {
372     IS_EXECUTABLE,
373     ABOUT_TO_BE_FREED,
374     POINTERS_TO_HERE_ARE_INTERESTING,
375     POINTERS_FROM_HERE_ARE_INTERESTING,
376     SCAN_ON_SCAVENGE,
377     IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
378     IN_TO_SPACE,    // All pages in new space has one of these two set.
379     NEW_SPACE_BELOW_AGE_MARK,
380     CONTAINS_ONLY_DATA,
381     EVACUATION_CANDIDATE,
382     RESCAN_ON_EVACUATION,
383 
384     // Pages swept precisely can be iterated, hitting only the live objects.
385     // Whereas those swept conservatively cannot be iterated over. Both flags
386     // indicate that marking bits have been cleared by the sweeper, otherwise
387     // marking bits are still intact.
388     WAS_SWEPT_PRECISELY,
389     WAS_SWEPT_CONSERVATIVELY,
390 
391     // Large objects can have a progress bar in their page header. These object
392     // are scanned in increments and will be kept black while being scanned.
393     // Even if the mutator writes to them they will be kept black and a white
394     // to grey transition is performed in the value.
395     HAS_PROGRESS_BAR,
396 
397     // Last flag, keep at bottom.
398     NUM_MEMORY_CHUNK_FLAGS
399   };
400 
401 
402   static const int kPointersToHereAreInterestingMask =
403       1 << POINTERS_TO_HERE_ARE_INTERESTING;
404 
405   static const int kPointersFromHereAreInterestingMask =
406       1 << POINTERS_FROM_HERE_ARE_INTERESTING;
407 
408   static const int kEvacuationCandidateMask =
409       1 << EVACUATION_CANDIDATE;
410 
411   static const int kSkipEvacuationSlotsRecordingMask =
412       (1 << EVACUATION_CANDIDATE) |
413       (1 << RESCAN_ON_EVACUATION) |
414       (1 << IN_FROM_SPACE) |
415       (1 << IN_TO_SPACE);
416 
417 
SetFlag(int flag)418   void SetFlag(int flag) {
419     flags_ |= static_cast<uintptr_t>(1) << flag;
420   }
421 
ClearFlag(int flag)422   void ClearFlag(int flag) {
423     flags_ &= ~(static_cast<uintptr_t>(1) << flag);
424   }
425 
SetFlagTo(int flag,bool value)426   void SetFlagTo(int flag, bool value) {
427     if (value) {
428       SetFlag(flag);
429     } else {
430       ClearFlag(flag);
431     }
432   }
433 
IsFlagSet(int flag)434   bool IsFlagSet(int flag) {
435     return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
436   }
437 
438   // Set or clear multiple flags at a time. The flags in the mask
439   // are set to the value in "flags", the rest retain the current value
440   // in flags_.
SetFlags(intptr_t flags,intptr_t mask)441   void SetFlags(intptr_t flags, intptr_t mask) {
442     flags_ = (flags_ & ~mask) | (flags & mask);
443   }
444 
445   // Return all current flags.
GetFlags()446   intptr_t GetFlags() { return flags_; }
447 
448 
449   // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
450   // sweeping must not be performed on that page.
451   // PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this
452   // page and will not touch the page memory anymore.
453   // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a
454   // sweeper thread.
455   // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
456   enum ParallelSweepingState {
457     PARALLEL_SWEEPING_DONE,
458     PARALLEL_SWEEPING_FINALIZE,
459     PARALLEL_SWEEPING_IN_PROGRESS,
460     PARALLEL_SWEEPING_PENDING
461   };
462 
parallel_sweeping()463   ParallelSweepingState parallel_sweeping() {
464     return static_cast<ParallelSweepingState>(
465         base::Acquire_Load(&parallel_sweeping_));
466   }
467 
set_parallel_sweeping(ParallelSweepingState state)468   void set_parallel_sweeping(ParallelSweepingState state) {
469     base::Release_Store(&parallel_sweeping_, state);
470   }
471 
TryParallelSweeping()472   bool TryParallelSweeping() {
473     return base::Acquire_CompareAndSwap(
474                &parallel_sweeping_, PARALLEL_SWEEPING_PENDING,
475                PARALLEL_SWEEPING_IN_PROGRESS) == PARALLEL_SWEEPING_PENDING;
476   }
477 
478   // Manage live byte count (count of bytes known to be live,
479   // because they are marked black).
ResetLiveBytes()480   void ResetLiveBytes() {
481     if (FLAG_gc_verbose) {
482       PrintF("ResetLiveBytes:%p:%x->0\n",
483              static_cast<void*>(this), live_byte_count_);
484     }
485     live_byte_count_ = 0;
486   }
IncrementLiveBytes(int by)487   void IncrementLiveBytes(int by) {
488     if (FLAG_gc_verbose) {
489       printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
490              static_cast<void*>(this), live_byte_count_,
491              ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
492              live_byte_count_ + by);
493     }
494     live_byte_count_ += by;
495     ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
496   }
LiveBytes()497   int LiveBytes() {
498     ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
499     return live_byte_count_;
500   }
501 
write_barrier_counter()502   int write_barrier_counter() {
503     return static_cast<int>(write_barrier_counter_);
504   }
505 
set_write_barrier_counter(int counter)506   void set_write_barrier_counter(int counter) {
507     write_barrier_counter_ = counter;
508   }
509 
progress_bar()510   int progress_bar() {
511     ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
512     return progress_bar_;
513   }
514 
set_progress_bar(int progress_bar)515   void set_progress_bar(int progress_bar) {
516     ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
517     progress_bar_ = progress_bar;
518   }
519 
ResetProgressBar()520   void ResetProgressBar() {
521     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
522       set_progress_bar(0);
523       ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
524     }
525   }
526 
IsLeftOfProgressBar(Object ** slot)527   bool IsLeftOfProgressBar(Object** slot) {
528     Address slot_address = reinterpret_cast<Address>(slot);
529     ASSERT(slot_address > this->address());
530     return (slot_address - (this->address() + kObjectStartOffset)) <
531            progress_bar();
532   }
533 
IncrementLiveBytesFromGC(Address address,int by)534   static void IncrementLiveBytesFromGC(Address address, int by) {
535     MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
536   }
537 
538   static void IncrementLiveBytesFromMutator(Address address, int by);
539 
540   static const intptr_t kAlignment =
541       (static_cast<uintptr_t>(1) << kPageSizeBits);
542 
543   static const intptr_t kAlignmentMask = kAlignment - 1;
544 
545   static const intptr_t kSizeOffset = 0;
546 
547   static const intptr_t kLiveBytesOffset =
548      kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
549      kPointerSize + kPointerSize +
550      kPointerSize + kPointerSize + kPointerSize + kIntSize;
551 
552   static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
553 
554   static const size_t kWriteBarrierCounterOffset =
555       kSlotsBufferOffset + kPointerSize + kPointerSize;
556 
557   static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
558                                     kIntSize + kIntSize + kPointerSize +
559                                     5 * kPointerSize +
560                                     kPointerSize + kPointerSize;
561 
562   static const int kBodyOffset =
563       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
564 
565   // The start offset of the object area in a page. Aligned to both maps and
566   // code alignment to be suitable for both.  Also aligned to 32 words because
567   // the marking bitmap is arranged in 32 bit chunks.
568   static const int kObjectStartAlignment = 32 * kPointerSize;
569   static const int kObjectStartOffset = kBodyOffset - 1 +
570       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
571 
size()572   size_t size() const { return size_; }
573 
set_size(size_t size)574   void set_size(size_t size) {
575     size_ = size;
576   }
577 
SetArea(Address area_start,Address area_end)578   void SetArea(Address area_start, Address area_end) {
579     area_start_ = area_start;
580     area_end_ = area_end;
581   }
582 
executable()583   Executability executable() {
584     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
585   }
586 
ContainsOnlyData()587   bool ContainsOnlyData() {
588     return IsFlagSet(CONTAINS_ONLY_DATA);
589   }
590 
InNewSpace()591   bool InNewSpace() {
592     return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
593   }
594 
InToSpace()595   bool InToSpace() {
596     return IsFlagSet(IN_TO_SPACE);
597   }
598 
InFromSpace()599   bool InFromSpace() {
600     return IsFlagSet(IN_FROM_SPACE);
601   }
602 
603   // ---------------------------------------------------------------------
604   // Markbits support
605 
markbits()606   inline Bitmap* markbits() {
607     return Bitmap::FromAddress(address() + kHeaderSize);
608   }
609 
PrintMarkbits()610   void PrintMarkbits() { markbits()->Print(); }
611 
AddressToMarkbitIndex(Address addr)612   inline uint32_t AddressToMarkbitIndex(Address addr) {
613     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
614   }
615 
FastAddressToMarkbitIndex(Address addr)616   inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
617     const intptr_t offset =
618         reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
619 
620     return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
621   }
622 
MarkbitIndexToAddress(uint32_t index)623   inline Address MarkbitIndexToAddress(uint32_t index) {
624     return this->address() + (index << kPointerSizeLog2);
625   }
626 
627   void InsertAfter(MemoryChunk* other);
628   void Unlink();
629 
heap()630   inline Heap* heap() { return heap_; }
631 
632   static const int kFlagsOffset = kPointerSize;
633 
IsEvacuationCandidate()634   bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
635 
ShouldSkipEvacuationSlotRecording()636   bool ShouldSkipEvacuationSlotRecording() {
637     return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
638   }
639 
skip_list()640   inline SkipList* skip_list() {
641     return skip_list_;
642   }
643 
set_skip_list(SkipList * skip_list)644   inline void set_skip_list(SkipList* skip_list) {
645     skip_list_ = skip_list;
646   }
647 
slots_buffer()648   inline SlotsBuffer* slots_buffer() {
649     return slots_buffer_;
650   }
651 
slots_buffer_address()652   inline SlotsBuffer** slots_buffer_address() {
653     return &slots_buffer_;
654   }
655 
MarkEvacuationCandidate()656   void MarkEvacuationCandidate() {
657     ASSERT(slots_buffer_ == NULL);
658     SetFlag(EVACUATION_CANDIDATE);
659   }
660 
ClearEvacuationCandidate()661   void ClearEvacuationCandidate() {
662     ASSERT(slots_buffer_ == NULL);
663     ClearFlag(EVACUATION_CANDIDATE);
664   }
665 
area_start()666   Address area_start() { return area_start_; }
area_end()667   Address area_end() { return area_end_; }
area_size()668   int area_size() {
669     return static_cast<int>(area_end() - area_start());
670   }
671   bool CommitArea(size_t requested);
672 
673   // Approximate amount of physical memory committed for this chunk.
CommittedPhysicalMemory()674   size_t CommittedPhysicalMemory() {
675     return high_water_mark_;
676   }
677 
678   static inline void UpdateHighWaterMark(Address mark);
679 
680  protected:
681   size_t size_;
682   intptr_t flags_;
683 
684   // Start and end of allocatable memory on this chunk.
685   Address area_start_;
686   Address area_end_;
687 
688   // If the chunk needs to remember its memory reservation, it is stored here.
689   VirtualMemory reservation_;
690   // The identity of the owning space.  This is tagged as a failure pointer, but
691   // no failure can be in an object, so this can be distinguished from any entry
692   // in a fixed array.
693   Address owner_;
694   Heap* heap_;
695   // Used by the store buffer to keep track of which pages to mark scan-on-
696   // scavenge.
697   int store_buffer_counter_;
698   // Count of bytes marked black on page.
699   int live_byte_count_;
700   SlotsBuffer* slots_buffer_;
701   SkipList* skip_list_;
702   intptr_t write_barrier_counter_;
703   // Used by the incremental marker to keep track of the scanning progress in
704   // large objects that have a progress bar and are scanned in increments.
705   int progress_bar_;
706   // Assuming the initial allocation on a page is sequential,
707   // count highest number of bytes ever allocated on the page.
708   int high_water_mark_;
709 
710   base::AtomicWord parallel_sweeping_;
711 
712   // PagedSpace free-list statistics.
713   intptr_t available_in_small_free_list_;
714   intptr_t available_in_medium_free_list_;
715   intptr_t available_in_large_free_list_;
716   intptr_t available_in_huge_free_list_;
717   intptr_t non_available_small_blocks_;
718 
719   static MemoryChunk* Initialize(Heap* heap,
720                                  Address base,
721                                  size_t size,
722                                  Address area_start,
723                                  Address area_end,
724                                  Executability executable,
725                                  Space* owner);
726 
727  private:
728   // next_chunk_ holds a pointer of type MemoryChunk
729   base::AtomicWord next_chunk_;
730   // prev_chunk_ holds a pointer of type MemoryChunk
731   base::AtomicWord prev_chunk_;
732 
733   friend class MemoryAllocator;
734 };
735 
736 
737 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
738 
739 
740 // -----------------------------------------------------------------------------
741 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
742 //
743 // The only way to get a page pointer is by calling factory methods:
744 //   Page* p = Page::FromAddress(addr); or
745 //   Page* p = Page::FromAllocationTop(top);
746 class Page : public MemoryChunk {
747  public:
748   // Returns the page containing a given address. The address ranges
749   // from [page_addr .. page_addr + kPageSize[
750   // This only works if the object is in fact in a page.  See also MemoryChunk::
751   // FromAddress() and FromAnyAddress().
INLINE(static Page * FromAddress (Address a))752   INLINE(static Page* FromAddress(Address a)) {
753     return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
754   }
755 
756   // Returns the page containing an allocation top. Because an allocation
757   // top address can be the upper bound of the page, we need to subtract
758   // it with kPointerSize first. The address ranges from
759   // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
INLINE(static Page * FromAllocationTop (Address top))760   INLINE(static Page* FromAllocationTop(Address top)) {
761     Page* p = FromAddress(top - kPointerSize);
762     return p;
763   }
764 
765   // Returns the next page in the chain of pages owned by a space.
766   inline Page* next_page();
767   inline Page* prev_page();
768   inline void set_next_page(Page* page);
769   inline void set_prev_page(Page* page);
770 
771   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address a)772   static bool IsAlignedToPageSize(Address a) {
773     return 0 == (OffsetFrom(a) & kPageAlignmentMask);
774   }
775 
776   // Returns the offset of a given address to this page.
INLINE(int Offset (Address a))777   INLINE(int Offset(Address a)) {
778     int offset = static_cast<int>(a - address());
779     return offset;
780   }
781 
782   // Returns the address for a given offset to the this page.
OffsetToAddress(int offset)783   Address OffsetToAddress(int offset) {
784     ASSERT_PAGE_OFFSET(offset);
785     return address() + offset;
786   }
787 
788   // ---------------------------------------------------------------------
789 
790   // Page size in bytes.  This must be a multiple of the OS page size.
791   static const int kPageSize = 1 << kPageSizeBits;
792 
793   // Maximum object size that fits in a page. Objects larger than that size
794   // are allocated in large object space and are never moved in memory. This
795   // also applies to new space allocation, since objects are never migrated
796   // from new space to large object space.  Takes double alignment into account.
797   static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
798 
799   // Page size mask.
800   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
801 
802   inline void ClearGCFields();
803 
804   static inline Page* Initialize(Heap* heap,
805                                  MemoryChunk* chunk,
806                                  Executability executable,
807                                  PagedSpace* owner);
808 
809   void InitializeAsAnchor(PagedSpace* owner);
810 
WasSweptPrecisely()811   bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
WasSweptConservatively()812   bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
WasSwept()813   bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
814 
MarkSweptPrecisely()815   void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
MarkSweptConservatively()816   void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
817 
ClearSweptPrecisely()818   void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
ClearSweptConservatively()819   void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
820 
821   void ResetFreeListStatistics();
822 
823 #define FRAGMENTATION_STATS_ACCESSORS(type, name) \
824   type name() { return name##_; }                 \
825   void set_##name(type name) { name##_ = name; }  \
826   void add_##name(type name) { name##_ += name; }
827 
828   FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
829   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
830   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
831   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
832   FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
833 
834 #undef FRAGMENTATION_STATS_ACCESSORS
835 
836 #ifdef DEBUG
837   void Print();
838 #endif  // DEBUG
839 
840   friend class MemoryAllocator;
841 };
842 
843 
844 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
845 
846 
847 class LargePage : public MemoryChunk {
848  public:
GetObject()849   HeapObject* GetObject() {
850     return HeapObject::FromAddress(area_start());
851   }
852 
next_page()853   inline LargePage* next_page() const {
854     return static_cast<LargePage*>(next_chunk());
855   }
856 
set_next_page(LargePage * page)857   inline void set_next_page(LargePage* page) {
858     set_next_chunk(page);
859   }
860  private:
861   static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
862 
863   friend class MemoryAllocator;
864 };
865 
866 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
867 
868 // ----------------------------------------------------------------------------
869 // Space is the abstract superclass for all allocation spaces.
870 class Space : public Malloced {
871  public:
Space(Heap * heap,AllocationSpace id,Executability executable)872   Space(Heap* heap, AllocationSpace id, Executability executable)
873       : heap_(heap), id_(id), executable_(executable) {}
874 
~Space()875   virtual ~Space() {}
876 
heap()877   Heap* heap() const { return heap_; }
878 
879   // Does the space need executable memory?
executable()880   Executability executable() { return executable_; }
881 
882   // Identity used in error reporting.
identity()883   AllocationSpace identity() { return id_; }
884 
885   // Returns allocated size.
886   virtual intptr_t Size() = 0;
887 
888   // Returns size of objects. Can differ from the allocated size
889   // (e.g. see LargeObjectSpace).
SizeOfObjects()890   virtual intptr_t SizeOfObjects() { return Size(); }
891 
RoundSizeDownToObjectAlignment(int size)892   virtual int RoundSizeDownToObjectAlignment(int size) {
893     if (id_ == CODE_SPACE) {
894       return RoundDown(size, kCodeAlignment);
895     } else {
896       return RoundDown(size, kPointerSize);
897     }
898   }
899 
900 #ifdef DEBUG
901   virtual void Print() = 0;
902 #endif
903 
904  private:
905   Heap* heap_;
906   AllocationSpace id_;
907   Executability executable_;
908 };
909 
910 
911 // ----------------------------------------------------------------------------
912 // All heap objects containing executable code (code objects) must be allocated
913 // from a 2 GB range of memory, so that they can call each other using 32-bit
914 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
915 // displacements cover the entire 4GB virtual address space.  On 64-bit
916 // platforms, we support this using the CodeRange object, which reserves and
917 // manages a range of virtual memory.
918 class CodeRange {
919  public:
920   explicit CodeRange(Isolate* isolate);
~CodeRange()921   ~CodeRange() { TearDown(); }
922 
923   // Reserves a range of virtual memory, but does not commit any of it.
924   // Can only be called once, at heap initialization time.
925   // Returns false on failure.
926   bool SetUp(size_t requested_size);
927 
928   // Frees the range of virtual memory, and frees the data structures used to
929   // manage it.
930   void TearDown();
931 
valid()932   bool valid() { return code_range_ != NULL; }
start()933   Address start() {
934     ASSERT(valid());
935     return static_cast<Address>(code_range_->address());
936   }
contains(Address address)937   bool contains(Address address) {
938     if (!valid()) return false;
939     Address start = static_cast<Address>(code_range_->address());
940     return start <= address && address < start + code_range_->size();
941   }
942 
943   // Allocates a chunk of memory from the large-object portion of
944   // the code range.  On platforms with no separate code range, should
945   // not be called.
946   MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
947                                             const size_t commit_size,
948                                             size_t* allocated);
949   bool CommitRawMemory(Address start, size_t length);
950   bool UncommitRawMemory(Address start, size_t length);
951   void FreeRawMemory(Address buf, size_t length);
952 
953  private:
954   Isolate* isolate_;
955 
956   // The reserved range of virtual memory that all code objects are put in.
957   VirtualMemory* code_range_;
958   // Plain old data class, just a struct plus a constructor.
959   class FreeBlock {
960    public:
FreeBlock(Address start_arg,size_t size_arg)961     FreeBlock(Address start_arg, size_t size_arg)
962         : start(start_arg), size(size_arg) {
963       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
964       ASSERT(size >= static_cast<size_t>(Page::kPageSize));
965     }
FreeBlock(void * start_arg,size_t size_arg)966     FreeBlock(void* start_arg, size_t size_arg)
967         : start(static_cast<Address>(start_arg)), size(size_arg) {
968       ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
969       ASSERT(size >= static_cast<size_t>(Page::kPageSize));
970     }
971 
972     Address start;
973     size_t size;
974   };
975 
976   // Freed blocks of memory are added to the free list.  When the allocation
977   // list is exhausted, the free list is sorted and merged to make the new
978   // allocation list.
979   List<FreeBlock> free_list_;
980   // Memory is allocated from the free blocks on the allocation list.
981   // The block at current_allocation_block_index_ is the current block.
982   List<FreeBlock> allocation_list_;
983   int current_allocation_block_index_;
984 
985   // Finds a block on the allocation list that contains at least the
986   // requested amount of memory.  If none is found, sorts and merges
987   // the existing free memory blocks, and searches again.
988   // If none can be found, returns false.
989   bool GetNextAllocationBlock(size_t requested);
990   // Compares the start addresses of two free blocks.
991   static int CompareFreeBlockAddress(const FreeBlock* left,
992                                      const FreeBlock* right);
993 
994   DISALLOW_COPY_AND_ASSIGN(CodeRange);
995 };
996 
997 
998 class SkipList {
999  public:
SkipList()1000   SkipList() {
1001     Clear();
1002   }
1003 
Clear()1004   void Clear() {
1005     for (int idx = 0; idx < kSize; idx++) {
1006       starts_[idx] = reinterpret_cast<Address>(-1);
1007     }
1008   }
1009 
StartFor(Address addr)1010   Address StartFor(Address addr) {
1011     return starts_[RegionNumber(addr)];
1012   }
1013 
AddObject(Address addr,int size)1014   void AddObject(Address addr, int size) {
1015     int start_region = RegionNumber(addr);
1016     int end_region = RegionNumber(addr + size - kPointerSize);
1017     for (int idx = start_region; idx <= end_region; idx++) {
1018       if (starts_[idx] > addr) starts_[idx] = addr;
1019     }
1020   }
1021 
RegionNumber(Address addr)1022   static inline int RegionNumber(Address addr) {
1023     return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1024   }
1025 
Update(Address addr,int size)1026   static void Update(Address addr, int size) {
1027     Page* page = Page::FromAddress(addr);
1028     SkipList* list = page->skip_list();
1029     if (list == NULL) {
1030       list = new SkipList();
1031       page->set_skip_list(list);
1032     }
1033 
1034     list->AddObject(addr, size);
1035   }
1036 
1037  private:
1038   static const int kRegionSizeLog2 = 13;
1039   static const int kRegionSize = 1 << kRegionSizeLog2;
1040   static const int kSize = Page::kPageSize / kRegionSize;
1041 
1042   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1043 
1044   Address starts_[kSize];
1045 };
1046 
1047 
1048 // ----------------------------------------------------------------------------
1049 // A space acquires chunks of memory from the operating system. The memory
1050 // allocator allocated and deallocates pages for the paged heap spaces and large
1051 // pages for large object space.
1052 //
1053 // Each space has to manage it's own pages.
1054 //
1055 class MemoryAllocator {
1056  public:
1057   explicit MemoryAllocator(Isolate* isolate);
1058 
1059   // Initializes its internal bookkeeping structures.
1060   // Max capacity of the total space and executable memory limit.
1061   bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
1062 
1063   void TearDown();
1064 
1065   Page* AllocatePage(
1066       intptr_t size, PagedSpace* owner, Executability executable);
1067 
1068   LargePage* AllocateLargePage(
1069       intptr_t object_size, Space* owner, Executability executable);
1070 
1071   void Free(MemoryChunk* chunk);
1072 
1073   // Returns the maximum available bytes of heaps.
Available()1074   intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
1075 
1076   // Returns allocated spaces in bytes.
Size()1077   intptr_t Size() { return size_; }
1078 
1079   // Returns the maximum available executable bytes of heaps.
AvailableExecutable()1080   intptr_t AvailableExecutable() {
1081     if (capacity_executable_ < size_executable_) return 0;
1082     return capacity_executable_ - size_executable_;
1083   }
1084 
1085   // Returns allocated executable spaces in bytes.
SizeExecutable()1086   intptr_t SizeExecutable() { return size_executable_; }
1087 
1088   // Returns maximum available bytes that the old space can have.
MaxAvailable()1089   intptr_t MaxAvailable() {
1090     return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
1091   }
1092 
1093   // Returns an indication of whether a pointer is in a space that has
1094   // been allocated by this MemoryAllocator.
IsOutsideAllocatedSpace(const void * address)1095   V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
1096     return address < lowest_ever_allocated_ ||
1097         address >= highest_ever_allocated_;
1098   }
1099 
1100 #ifdef DEBUG
1101   // Reports statistic info of the space.
1102   void ReportStatistics();
1103 #endif
1104 
1105   // Returns a MemoryChunk in which the memory region from commit_area_size to
1106   // reserve_area_size of the chunk area is reserved but not committed, it
1107   // could be committed later by calling MemoryChunk::CommitArea.
1108   MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1109                              intptr_t commit_area_size,
1110                              Executability executable,
1111                              Space* space);
1112 
1113   Address ReserveAlignedMemory(size_t requested,
1114                                size_t alignment,
1115                                VirtualMemory* controller);
1116   Address AllocateAlignedMemory(size_t reserve_size,
1117                                 size_t commit_size,
1118                                 size_t alignment,
1119                                 Executability executable,
1120                                 VirtualMemory* controller);
1121 
1122   bool CommitMemory(Address addr, size_t size, Executability executable);
1123 
1124   void FreeMemory(VirtualMemory* reservation, Executability executable);
1125   void FreeMemory(Address addr, size_t size, Executability executable);
1126 
1127   // Commit a contiguous block of memory from the initial chunk.  Assumes that
1128   // the address is not NULL, the size is greater than zero, and that the
1129   // block is contained in the initial chunk.  Returns true if it succeeded
1130   // and false otherwise.
1131   bool CommitBlock(Address start, size_t size, Executability executable);
1132 
1133   // Uncommit a contiguous block of memory [start..(start+size)[.
1134   // start is not NULL, the size is greater than zero, and the
1135   // block is contained in the initial chunk.  Returns true if it succeeded
1136   // and false otherwise.
1137   bool UncommitBlock(Address start, size_t size);
1138 
1139   // Zaps a contiguous block of memory [start..(start+size)[ thus
1140   // filling it up with a recognizable non-NULL bit pattern.
1141   void ZapBlock(Address start, size_t size);
1142 
1143   void PerformAllocationCallback(ObjectSpace space,
1144                                  AllocationAction action,
1145                                  size_t size);
1146 
1147   void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
1148                                           ObjectSpace space,
1149                                           AllocationAction action);
1150 
1151   void RemoveMemoryAllocationCallback(
1152       MemoryAllocationCallback callback);
1153 
1154   bool MemoryAllocationCallbackRegistered(
1155       MemoryAllocationCallback callback);
1156 
1157   static int CodePageGuardStartOffset();
1158 
1159   static int CodePageGuardSize();
1160 
1161   static int CodePageAreaStartOffset();
1162 
1163   static int CodePageAreaEndOffset();
1164 
CodePageAreaSize()1165   static int CodePageAreaSize() {
1166     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1167   }
1168 
1169   MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
1170                                               Address start,
1171                                               size_t commit_size,
1172                                               size_t reserved_size);
1173 
1174  private:
1175   Isolate* isolate_;
1176 
1177   // Maximum space size in bytes.
1178   size_t capacity_;
1179   // Maximum subset of capacity_ that can be executable
1180   size_t capacity_executable_;
1181 
1182   // Allocated space size in bytes.
1183   size_t size_;
1184   // Allocated executable space size in bytes.
1185   size_t size_executable_;
1186 
1187   // We keep the lowest and highest addresses allocated as a quick way
1188   // of determining that pointers are outside the heap. The estimate is
1189   // conservative, i.e. not all addrsses in 'allocated' space are allocated
1190   // to our heap. The range is [lowest, highest[, inclusive on the low end
1191   // and exclusive on the high end.
1192   void* lowest_ever_allocated_;
1193   void* highest_ever_allocated_;
1194 
1195   struct MemoryAllocationCallbackRegistration {
MemoryAllocationCallbackRegistrationMemoryAllocationCallbackRegistration1196     MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
1197                                          ObjectSpace space,
1198                                          AllocationAction action)
1199         : callback(callback), space(space), action(action) {
1200     }
1201     MemoryAllocationCallback callback;
1202     ObjectSpace space;
1203     AllocationAction action;
1204   };
1205 
1206   // A List of callback that are triggered when memory is allocated or free'd
1207   List<MemoryAllocationCallbackRegistration>
1208       memory_allocation_callbacks_;
1209 
1210   // Initializes pages in a chunk. Returns the first page address.
1211   // This function and GetChunkId() are provided for the mark-compact
1212   // collector to rebuild page headers in the from space, which is
1213   // used as a marking stack and its page headers are destroyed.
1214   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1215                                PagedSpace* owner);
1216 
UpdateAllocatedSpaceLimits(void * low,void * high)1217   void UpdateAllocatedSpaceLimits(void* low, void* high) {
1218     lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
1219     highest_ever_allocated_ = Max(highest_ever_allocated_, high);
1220   }
1221 
1222   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1223 };
1224 
1225 
1226 // -----------------------------------------------------------------------------
1227 // Interface for heap object iterator to be implemented by all object space
1228 // object iterators.
1229 //
1230 // NOTE: The space specific object iterators also implements the own next()
1231 //       method which is used to avoid using virtual functions
1232 //       iterating a specific space.
1233 
1234 class ObjectIterator : public Malloced {
1235  public:
~ObjectIterator()1236   virtual ~ObjectIterator() { }
1237 
1238   virtual HeapObject* next_object() = 0;
1239 };
1240 
1241 
1242 // -----------------------------------------------------------------------------
1243 // Heap object iterator in new/old/map spaces.
1244 //
1245 // A HeapObjectIterator iterates objects from the bottom of the given space
1246 // to its top or from the bottom of the given page to its top.
1247 //
1248 // If objects are allocated in the page during iteration the iterator may
1249 // or may not iterate over those objects.  The caller must create a new
1250 // iterator in order to be sure to visit these new objects.
1251 class HeapObjectIterator: public ObjectIterator {
1252  public:
1253   // Creates a new object iterator in a given space.
1254   // If the size function is not given, the iterator calls the default
1255   // Object::Size().
1256   explicit HeapObjectIterator(PagedSpace* space);
1257   HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
1258   HeapObjectIterator(Page* page, HeapObjectCallback size_func);
1259 
1260   // Advance to the next object, skipping free spaces and other fillers and
1261   // skipping the special garbage section of which there is one per space.
1262   // Returns NULL when the iteration has ended.
Next()1263   inline HeapObject* Next() {
1264     do {
1265       HeapObject* next_obj = FromCurrentPage();
1266       if (next_obj != NULL) return next_obj;
1267     } while (AdvanceToNextPage());
1268     return NULL;
1269   }
1270 
next_object()1271   virtual HeapObject* next_object() {
1272     return Next();
1273   }
1274 
1275  private:
1276   enum PageMode { kOnePageOnly, kAllPagesInSpace };
1277 
1278   Address cur_addr_;  // Current iteration point.
1279   Address cur_end_;   // End iteration point.
1280   HeapObjectCallback size_func_;  // Size function or NULL.
1281   PagedSpace* space_;
1282   PageMode page_mode_;
1283 
1284   // Fast (inlined) path of next().
1285   inline HeapObject* FromCurrentPage();
1286 
1287   // Slow path of next(), goes into the next page.  Returns false if the
1288   // iteration has ended.
1289   bool AdvanceToNextPage();
1290 
1291   // Initializes fields.
1292   inline void Initialize(PagedSpace* owner,
1293                          Address start,
1294                          Address end,
1295                          PageMode mode,
1296                          HeapObjectCallback size_func);
1297 };
1298 
1299 
1300 // -----------------------------------------------------------------------------
1301 // A PageIterator iterates the pages in a paged space.
1302 
1303 class PageIterator BASE_EMBEDDED {
1304  public:
1305   explicit inline PageIterator(PagedSpace* space);
1306 
1307   inline bool has_next();
1308   inline Page* next();
1309 
1310  private:
1311   PagedSpace* space_;
1312   Page* prev_page_;  // Previous page returned.
1313   // Next page that will be returned.  Cached here so that we can use this
1314   // iterator for operations that deallocate pages.
1315   Page* next_page_;
1316 };
1317 
1318 
1319 // -----------------------------------------------------------------------------
1320 // A space has a circular list of pages. The next page can be accessed via
1321 // Page::next_page() call.
1322 
1323 // An abstraction of allocation and relocation pointers in a page-structured
1324 // space.
1325 class AllocationInfo {
1326  public:
AllocationInfo()1327   AllocationInfo() : top_(NULL), limit_(NULL) {
1328   }
1329 
INLINE(void set_top (Address top))1330   INLINE(void set_top(Address top)) {
1331     SLOW_ASSERT(top == NULL ||
1332         (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
1333     top_ = top;
1334   }
1335 
INLINE(Address top ())1336   INLINE(Address top()) const {
1337     SLOW_ASSERT(top_ == NULL ||
1338         (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
1339     return top_;
1340   }
1341 
top_address()1342   Address* top_address() {
1343     return &top_;
1344   }
1345 
INLINE(void set_limit (Address limit))1346   INLINE(void set_limit(Address limit)) {
1347     SLOW_ASSERT(limit == NULL ||
1348         (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
1349     limit_ = limit;
1350   }
1351 
INLINE(Address limit ())1352   INLINE(Address limit()) const {
1353     SLOW_ASSERT(limit_ == NULL ||
1354         (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
1355     return limit_;
1356   }
1357 
limit_address()1358   Address* limit_address() {
1359     return &limit_;
1360   }
1361 
1362 #ifdef DEBUG
VerifyPagedAllocation()1363   bool VerifyPagedAllocation() {
1364     return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
1365         && (top_ <= limit_);
1366   }
1367 #endif
1368 
1369  private:
1370   // Current allocation top.
1371   Address top_;
1372   // Current allocation limit.
1373   Address limit_;
1374 };
1375 
1376 
1377 // An abstraction of the accounting statistics of a page-structured space.
1378 // The 'capacity' of a space is the number of object-area bytes (i.e., not
1379 // including page bookkeeping structures) currently in the space. The 'size'
1380 // of a space is the number of allocated bytes, the 'waste' in the space is
1381 // the number of bytes that are not allocated and not available to
1382 // allocation without reorganizing the space via a GC (e.g. small blocks due
1383 // to internal fragmentation, top of page areas in map space), and the bytes
1384 // 'available' is the number of unallocated bytes that are not waste.  The
1385 // capacity is the sum of size, waste, and available.
1386 //
1387 // The stats are only set by functions that ensure they stay balanced. These
1388 // functions increase or decrease one of the non-capacity stats in
1389 // conjunction with capacity, or else they always balance increases and
1390 // decreases to the non-capacity stats.
1391 class AllocationStats BASE_EMBEDDED {
1392  public:
AllocationStats()1393   AllocationStats() { Clear(); }
1394 
1395   // Zero out all the allocation statistics (i.e., no capacity).
Clear()1396   void Clear() {
1397     capacity_ = 0;
1398     max_capacity_ = 0;
1399     size_ = 0;
1400     waste_ = 0;
1401   }
1402 
ClearSizeWaste()1403   void ClearSizeWaste() {
1404     size_ = capacity_;
1405     waste_ = 0;
1406   }
1407 
1408   // Reset the allocation statistics (i.e., available = capacity with no
1409   // wasted or allocated bytes).
Reset()1410   void Reset() {
1411     size_ = 0;
1412     waste_ = 0;
1413   }
1414 
1415   // Accessors for the allocation statistics.
Capacity()1416   intptr_t Capacity() { return capacity_; }
MaxCapacity()1417   intptr_t MaxCapacity() { return max_capacity_; }
Size()1418   intptr_t Size() { return size_; }
Waste()1419   intptr_t Waste() { return waste_; }
1420 
1421   // Grow the space by adding available bytes.  They are initially marked as
1422   // being in use (part of the size), but will normally be immediately freed,
1423   // putting them on the free list and removing them from size_.
ExpandSpace(int size_in_bytes)1424   void ExpandSpace(int size_in_bytes) {
1425     capacity_ += size_in_bytes;
1426     size_ += size_in_bytes;
1427     if (capacity_ > max_capacity_) {
1428       max_capacity_ = capacity_;
1429     }
1430     ASSERT(size_ >= 0);
1431   }
1432 
1433   // Shrink the space by removing available bytes.  Since shrinking is done
1434   // during sweeping, bytes have been marked as being in use (part of the size)
1435   // and are hereby freed.
ShrinkSpace(int size_in_bytes)1436   void ShrinkSpace(int size_in_bytes) {
1437     capacity_ -= size_in_bytes;
1438     size_ -= size_in_bytes;
1439     ASSERT(size_ >= 0);
1440   }
1441 
1442   // Allocate from available bytes (available -> size).
AllocateBytes(intptr_t size_in_bytes)1443   void AllocateBytes(intptr_t size_in_bytes) {
1444     size_ += size_in_bytes;
1445     ASSERT(size_ >= 0);
1446   }
1447 
1448   // Free allocated bytes, making them available (size -> available).
DeallocateBytes(intptr_t size_in_bytes)1449   void DeallocateBytes(intptr_t size_in_bytes) {
1450     size_ -= size_in_bytes;
1451     ASSERT(size_ >= 0);
1452   }
1453 
1454   // Waste free bytes (available -> waste).
WasteBytes(int size_in_bytes)1455   void WasteBytes(int size_in_bytes) {
1456     size_ -= size_in_bytes;
1457     waste_ += size_in_bytes;
1458     ASSERT(size_ >= 0);
1459   }
1460 
1461  private:
1462   intptr_t capacity_;
1463   intptr_t max_capacity_;
1464   intptr_t size_;
1465   intptr_t waste_;
1466 };
1467 
1468 
1469 // -----------------------------------------------------------------------------
1470 // Free lists for old object spaces
1471 //
1472 // Free-list nodes are free blocks in the heap.  They look like heap objects
1473 // (free-list node pointers have the heap object tag, and they have a map like
1474 // a heap object).  They have a size and a next pointer.  The next pointer is
1475 // the raw address of the next free list node (or NULL).
1476 class FreeListNode: public HeapObject {
1477  public:
1478   // Obtain a free-list node from a raw address.  This is not a cast because
1479   // it does not check nor require that the first word at the address is a map
1480   // pointer.
FromAddress(Address address)1481   static FreeListNode* FromAddress(Address address) {
1482     return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
1483   }
1484 
1485   static inline bool IsFreeListNode(HeapObject* object);
1486 
1487   // Set the size in bytes, which can be read with HeapObject::Size().  This
1488   // function also writes a map to the first word of the block so that it
1489   // looks like a heap object to the garbage collector and heap iteration
1490   // functions.
1491   void set_size(Heap* heap, int size_in_bytes);
1492 
1493   // Accessors for the next field.
1494   inline FreeListNode* next();
1495   inline FreeListNode** next_address();
1496   inline void set_next(FreeListNode* next);
1497 
1498   inline void Zap();
1499 
cast(Object * object)1500   static inline FreeListNode* cast(Object* object) {
1501     return reinterpret_cast<FreeListNode*>(object);
1502   }
1503 
1504  private:
1505   static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
1506 
1507   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
1508 };
1509 
1510 
1511 // The free list category holds a pointer to the top element and a pointer to
1512 // the end element of the linked list of free memory blocks.
1513 class FreeListCategory {
1514  public:
FreeListCategory()1515   FreeListCategory() :
1516       top_(0),
1517       end_(NULL),
1518       available_(0) {}
1519 
1520   intptr_t Concatenate(FreeListCategory* category);
1521 
1522   void Reset();
1523 
1524   void Free(FreeListNode* node, int size_in_bytes);
1525 
1526   FreeListNode* PickNodeFromList(int *node_size);
1527   FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
1528 
1529   intptr_t EvictFreeListItemsInList(Page* p);
1530   bool ContainsPageFreeListItemsInList(Page* p);
1531 
1532   void RepairFreeList(Heap* heap);
1533 
top()1534   FreeListNode* top() const {
1535     return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
1536   }
1537 
set_top(FreeListNode * top)1538   void set_top(FreeListNode* top) {
1539     base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
1540   }
1541 
GetEndAddress()1542   FreeListNode** GetEndAddress() { return &end_; }
end()1543   FreeListNode* end() const { return end_; }
set_end(FreeListNode * end)1544   void set_end(FreeListNode* end) { end_ = end; }
1545 
GetAvailableAddress()1546   int* GetAvailableAddress() { return &available_; }
available()1547   int available() const { return available_; }
set_available(int available)1548   void set_available(int available) { available_ = available; }
1549 
mutex()1550   Mutex* mutex() { return &mutex_; }
1551 
IsEmpty()1552   bool IsEmpty() {
1553     return top() == 0;
1554   }
1555 
1556 #ifdef DEBUG
1557   intptr_t SumFreeList();
1558   int FreeListLength();
1559 #endif
1560 
1561  private:
1562   // top_ points to the top FreeListNode* in the free list category.
1563   base::AtomicWord top_;
1564   FreeListNode* end_;
1565   Mutex mutex_;
1566 
1567   // Total available bytes in all blocks of this free list category.
1568   int available_;
1569 };
1570 
1571 
1572 // The free list for the old space.  The free list is organized in such a way
1573 // as to encourage objects allocated around the same time to be near each
1574 // other.  The normal way to allocate is intended to be by bumping a 'top'
1575 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
1576 // find a new space to allocate from.  This is done with the free list, which
1577 // is divided up into rough categories to cut down on waste.  Having finer
1578 // categories would scatter allocation more.
1579 
1580 // The old space free list is organized in categories.
1581 // 1-31 words:  Such small free areas are discarded for efficiency reasons.
1582 //     They can be reclaimed by the compactor.  However the distance between top
1583 //     and limit may be this small.
1584 // 32-255 words: There is a list of spaces this large.  It is used for top and
1585 //     limit when the object we need to allocate is 1-31 words in size.  These
1586 //     spaces are called small.
1587 // 256-2047 words: There is a list of spaces this large.  It is used for top and
1588 //     limit when the object we need to allocate is 32-255 words in size.  These
1589 //     spaces are called medium.
1590 // 1048-16383 words: There is a list of spaces this large.  It is used for top
1591 //     and limit when the object we need to allocate is 256-2047 words in size.
1592 //     These spaces are call large.
1593 // At least 16384 words.  This list is for objects of 2048 words or larger.
1594 //     Empty pages are added to this list.  These spaces are called huge.
1595 class FreeList {
1596  public:
1597   explicit FreeList(PagedSpace* owner);
1598 
1599   intptr_t Concatenate(FreeList* free_list);
1600 
1601   // Clear the free list.
1602   void Reset();
1603 
1604   // Return the number of bytes available on the free list.
available()1605   intptr_t available() {
1606     return small_list_.available() + medium_list_.available() +
1607            large_list_.available() + huge_list_.available();
1608   }
1609 
1610   // Place a node on the free list.  The block of size 'size_in_bytes'
1611   // starting at 'start' is placed on the free list.  The return value is the
1612   // number of bytes that have been lost due to internal fragmentation by
1613   // freeing the block.  Bookkeeping information will be written to the block,
1614   // i.e., its contents will be destroyed.  The start address should be word
1615   // aligned, and the size should be a non-zero multiple of the word size.
1616   int Free(Address start, int size_in_bytes);
1617 
1618   // Allocate a block of size 'size_in_bytes' from the free list.  The block
1619   // is unitialized.  A failure is returned if no block is available.  The
1620   // number of bytes lost to fragmentation is returned in the output parameter
1621   // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
1622   MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1623 
IsEmpty()1624   bool IsEmpty() {
1625     return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
1626            large_list_.IsEmpty() && huge_list_.IsEmpty();
1627   }
1628 
1629 #ifdef DEBUG
1630   void Zap();
1631   intptr_t SumFreeLists();
1632   bool IsVeryLong();
1633 #endif
1634 
1635   // Used after booting the VM.
1636   void RepairLists(Heap* heap);
1637 
1638   intptr_t EvictFreeListItems(Page* p);
1639   bool ContainsPageFreeListItems(Page* p);
1640 
small_list()1641   FreeListCategory* small_list() { return &small_list_; }
medium_list()1642   FreeListCategory* medium_list() { return &medium_list_; }
large_list()1643   FreeListCategory* large_list() { return &large_list_; }
huge_list()1644   FreeListCategory* huge_list() { return &huge_list_; }
1645 
1646  private:
1647   // The size range of blocks, in bytes.
1648   static const int kMinBlockSize = 3 * kPointerSize;
1649   static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
1650 
1651   FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
1652 
1653   PagedSpace* owner_;
1654   Heap* heap_;
1655 
1656   static const int kSmallListMin = 0x20 * kPointerSize;
1657   static const int kSmallListMax = 0xff * kPointerSize;
1658   static const int kMediumListMax = 0x7ff * kPointerSize;
1659   static const int kLargeListMax = 0x3fff * kPointerSize;
1660   static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
1661   static const int kMediumAllocationMax = kSmallListMax;
1662   static const int kLargeAllocationMax = kMediumListMax;
1663   FreeListCategory small_list_;
1664   FreeListCategory medium_list_;
1665   FreeListCategory large_list_;
1666   FreeListCategory huge_list_;
1667 
1668   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1669 };
1670 
1671 
1672 class AllocationResult {
1673  public:
1674   // Implicit constructor from Object*.
AllocationResult(Object * object)1675   AllocationResult(Object* object) : object_(object),  // NOLINT
1676                                      retry_space_(INVALID_SPACE) { }
1677 
AllocationResult()1678   AllocationResult() : object_(NULL),
1679                        retry_space_(INVALID_SPACE) { }
1680 
1681   static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
1682     return AllocationResult(space);
1683   }
1684 
IsRetry()1685   inline bool IsRetry() { return retry_space_ != INVALID_SPACE; }
1686 
1687   template <typename T>
To(T ** obj)1688   bool To(T** obj) {
1689     if (IsRetry()) return false;
1690     *obj = T::cast(object_);
1691     return true;
1692   }
1693 
ToObjectChecked()1694   Object* ToObjectChecked() {
1695     CHECK(!IsRetry());
1696     return object_;
1697   }
1698 
RetrySpace()1699   AllocationSpace RetrySpace() {
1700     ASSERT(IsRetry());
1701     return retry_space_;
1702   }
1703 
1704  private:
AllocationResult(AllocationSpace space)1705   explicit AllocationResult(AllocationSpace space) : object_(NULL),
1706                                                      retry_space_(space) { }
1707 
1708   Object* object_;
1709   AllocationSpace retry_space_;
1710 };
1711 
1712 
1713 class PagedSpace : public Space {
1714  public:
1715   // Creates a space with a maximum capacity, and an id.
1716   PagedSpace(Heap* heap,
1717              intptr_t max_capacity,
1718              AllocationSpace id,
1719              Executability executable);
1720 
~PagedSpace()1721   virtual ~PagedSpace() {}
1722 
1723   // Set up the space using the given address range of virtual memory (from
1724   // the memory allocator's initial chunk) if possible.  If the block of
1725   // addresses is not big enough to contain a single page-aligned page, a
1726   // fresh chunk will be allocated.
1727   bool SetUp();
1728 
1729   // Returns true if the space has been successfully set up and not
1730   // subsequently torn down.
1731   bool HasBeenSetUp();
1732 
1733   // Cleans up the space, frees all pages in this space except those belonging
1734   // to the initial chunk, uncommits addresses in the initial chunk.
1735   void TearDown();
1736 
1737   // Checks whether an object/address is in this space.
1738   inline bool Contains(Address a);
Contains(HeapObject * o)1739   bool Contains(HeapObject* o) { return Contains(o->address()); }
1740 
1741   // Given an address occupied by a live object, return that object if it is
1742   // in this space, or a Smi if it is not.  The implementation iterates over
1743   // objects in the page containing the address, the cost is linear in the
1744   // number of objects in the page.  It may be slow.
1745   Object* FindObject(Address addr);
1746 
1747   // During boot the free_space_map is created, and afterwards we may need
1748   // to write it into the free list nodes that were already created.
1749   void RepairFreeListsAfterBoot();
1750 
1751   // Prepares for a mark-compact GC.
1752   void PrepareForMarkCompact();
1753 
1754   // Current capacity without growing (Size() + Available()).
Capacity()1755   intptr_t Capacity() { return accounting_stats_.Capacity(); }
1756 
1757   // Total amount of memory committed for this space.  For paged
1758   // spaces this equals the capacity.
CommittedMemory()1759   intptr_t CommittedMemory() { return Capacity(); }
1760 
1761   // The maximum amount of memory ever committed for this space.
MaximumCommittedMemory()1762   intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
1763 
1764   // Approximate amount of physical memory committed for this space.
1765   size_t CommittedPhysicalMemory();
1766 
1767   struct SizeStats {
TotalSizeStats1768     intptr_t Total() {
1769       return small_size_ + medium_size_ + large_size_ + huge_size_;
1770     }
1771 
1772     intptr_t small_size_;
1773     intptr_t medium_size_;
1774     intptr_t large_size_;
1775     intptr_t huge_size_;
1776   };
1777 
1778   void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
1779   void ResetFreeListStatistics();
1780 
1781   // Sets the capacity, the available space and the wasted space to zero.
1782   // The stats are rebuilt during sweeping by adding each page to the
1783   // capacity and the size when it is encountered.  As free spaces are
1784   // discovered during the sweeping they are subtracted from the size and added
1785   // to the available and wasted totals.
ClearStats()1786   void ClearStats() {
1787     accounting_stats_.ClearSizeWaste();
1788     ResetFreeListStatistics();
1789   }
1790 
1791   // Increases the number of available bytes of that space.
AddToAccountingStats(intptr_t bytes)1792   void AddToAccountingStats(intptr_t bytes) {
1793     accounting_stats_.DeallocateBytes(bytes);
1794   }
1795 
1796   // Available bytes without growing.  These are the bytes on the free list.
1797   // The bytes in the linear allocation area are not included in this total
1798   // because updating the stats would slow down allocation.  New pages are
1799   // immediately added to the free list so they show up here.
Available()1800   intptr_t Available() { return free_list_.available(); }
1801 
1802   // Allocated bytes in this space.  Garbage bytes that were not found due to
1803   // concurrent sweeping are counted as being allocated!  The bytes in the
1804   // current linear allocation area (between top and limit) are also counted
1805   // here.
Size()1806   virtual intptr_t Size() { return accounting_stats_.Size(); }
1807 
1808   // As size, but the bytes in lazily swept pages are estimated and the bytes
1809   // in the current linear allocation area are not included.
1810   virtual intptr_t SizeOfObjects();
1811 
1812   // Wasted bytes in this space.  These are just the bytes that were thrown away
1813   // due to being too small to use for allocation.  They do not include the
1814   // free bytes that were not found at all due to lazy sweeping.
Waste()1815   virtual intptr_t Waste() { return accounting_stats_.Waste(); }
1816 
1817   // Returns the allocation pointer in this space.
top()1818   Address top() { return allocation_info_.top(); }
limit()1819   Address limit() { return allocation_info_.limit(); }
1820 
1821   // The allocation top address.
allocation_top_address()1822   Address* allocation_top_address() {
1823     return allocation_info_.top_address();
1824   }
1825 
1826   // The allocation limit address.
allocation_limit_address()1827   Address* allocation_limit_address() {
1828     return allocation_info_.limit_address();
1829   }
1830 
1831   // Allocate the requested number of bytes in the space if possible, return a
1832   // failure object if not.
1833   MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
1834 
1835   // Give a block of memory to the space's free list.  It might be added to
1836   // the free list or accounted as waste.
1837   // If add_to_freelist is false then just accounting stats are updated and
1838   // no attempt to add area to free list is made.
Free(Address start,int size_in_bytes)1839   int Free(Address start, int size_in_bytes) {
1840     int wasted = free_list_.Free(start, size_in_bytes);
1841     accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
1842     return size_in_bytes - wasted;
1843   }
1844 
ResetFreeList()1845   void ResetFreeList() {
1846     free_list_.Reset();
1847   }
1848 
1849   // Set space allocation info.
SetTopAndLimit(Address top,Address limit)1850   void SetTopAndLimit(Address top, Address limit) {
1851     ASSERT(top == limit ||
1852            Page::FromAddress(top) == Page::FromAddress(limit - 1));
1853     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1854     allocation_info_.set_top(top);
1855     allocation_info_.set_limit(limit);
1856   }
1857 
1858   // Empty space allocation info, returning unused area to free list.
EmptyAllocationInfo()1859   void EmptyAllocationInfo() {
1860     // Mark the old linear allocation area with a free space map so it can be
1861     // skipped when scanning the heap.
1862     int old_linear_size = static_cast<int>(limit() - top());
1863     Free(top(), old_linear_size);
1864     SetTopAndLimit(NULL, NULL);
1865   }
1866 
Allocate(int bytes)1867   void Allocate(int bytes) {
1868     accounting_stats_.AllocateBytes(bytes);
1869   }
1870 
1871   void IncreaseCapacity(int size);
1872 
1873   // Releases an unused page and shrinks the space.
1874   void ReleasePage(Page* page);
1875 
1876   // The dummy page that anchors the linked list of pages.
anchor()1877   Page* anchor() { return &anchor_; }
1878 
1879 #ifdef VERIFY_HEAP
1880   // Verify integrity of this space.
1881   virtual void Verify(ObjectVisitor* visitor);
1882 
1883   // Overridden by subclasses to verify space-specific object
1884   // properties (e.g., only maps or free-list nodes are in map space).
VerifyObject(HeapObject * obj)1885   virtual void VerifyObject(HeapObject* obj) {}
1886 #endif
1887 
1888 #ifdef DEBUG
1889   // Print meta info and objects in this space.
1890   virtual void Print();
1891 
1892   // Reports statistics for the space
1893   void ReportStatistics();
1894 
1895   // Report code object related statistics
1896   void CollectCodeStatistics();
1897   static void ReportCodeStatistics(Isolate* isolate);
1898   static void ResetCodeStatistics(Isolate* isolate);
1899 #endif
1900 
was_swept_conservatively()1901   bool was_swept_conservatively() { return was_swept_conservatively_; }
set_was_swept_conservatively(bool b)1902   void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
1903 
1904   // Evacuation candidates are swept by evacuator.  Needs to return a valid
1905   // result before _and_ after evacuation has finished.
ShouldBeSweptBySweeperThreads(Page * p)1906   static bool ShouldBeSweptBySweeperThreads(Page* p) {
1907     return !p->IsEvacuationCandidate() &&
1908            !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
1909            !p->WasSweptPrecisely();
1910   }
1911 
IncrementUnsweptFreeBytes(intptr_t by)1912   void IncrementUnsweptFreeBytes(intptr_t by) {
1913     unswept_free_bytes_ += by;
1914   }
1915 
IncreaseUnsweptFreeBytes(Page * p)1916   void IncreaseUnsweptFreeBytes(Page* p) {
1917     ASSERT(ShouldBeSweptBySweeperThreads(p));
1918     unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
1919   }
1920 
DecrementUnsweptFreeBytes(intptr_t by)1921   void DecrementUnsweptFreeBytes(intptr_t by) {
1922     unswept_free_bytes_ -= by;
1923   }
1924 
DecreaseUnsweptFreeBytes(Page * p)1925   void DecreaseUnsweptFreeBytes(Page* p) {
1926     ASSERT(ShouldBeSweptBySweeperThreads(p));
1927     unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
1928   }
1929 
ResetUnsweptFreeBytes()1930   void ResetUnsweptFreeBytes() {
1931     unswept_free_bytes_ = 0;
1932   }
1933 
1934   // This function tries to steal size_in_bytes memory from the sweeper threads
1935   // free-lists. If it does not succeed stealing enough memory, it will wait
1936   // for the sweeper threads to finish sweeping.
1937   // It returns true when sweeping is completed and false otherwise.
1938   bool EnsureSweeperProgress(intptr_t size_in_bytes);
1939 
set_end_of_unswept_pages(Page * page)1940   void set_end_of_unswept_pages(Page* page) {
1941     end_of_unswept_pages_ = page;
1942   }
1943 
end_of_unswept_pages()1944   Page* end_of_unswept_pages() {
1945     return end_of_unswept_pages_;
1946   }
1947 
FirstPage()1948   Page* FirstPage() { return anchor_.next_page(); }
LastPage()1949   Page* LastPage() { return anchor_.prev_page(); }
1950 
1951   void EvictEvacuationCandidatesFromFreeLists();
1952 
1953   bool CanExpand();
1954 
1955   // Returns the number of total pages in this space.
1956   int CountTotalPages();
1957 
1958   // Return size of allocatable area on a page in this space.
AreaSize()1959   inline int AreaSize() {
1960     return area_size_;
1961   }
1962 
1963  protected:
free_list()1964   FreeList* free_list() { return &free_list_; }
1965 
1966   int area_size_;
1967 
1968   // Maximum capacity of this space.
1969   intptr_t max_capacity_;
1970 
1971   intptr_t SizeOfFirstPage();
1972 
1973   // Accounting information for this space.
1974   AllocationStats accounting_stats_;
1975 
1976   // The dummy page that anchors the double linked list of pages.
1977   Page anchor_;
1978 
1979   // The space's free list.
1980   FreeList free_list_;
1981 
1982   // Normal allocation information.
1983   AllocationInfo allocation_info_;
1984 
1985   bool was_swept_conservatively_;
1986 
1987   // The number of free bytes which could be reclaimed by advancing the
1988   // concurrent sweeper threads.  This is only an estimation because concurrent
1989   // sweeping is done conservatively.
1990   intptr_t unswept_free_bytes_;
1991 
1992   // The sweeper threads iterate over the list of pointer and data space pages
1993   // and sweep these pages concurrently. They will stop sweeping after the
1994   // end_of_unswept_pages_ page.
1995   Page* end_of_unswept_pages_;
1996 
1997   // Expands the space by allocating a fixed number of pages. Returns false if
1998   // it cannot allocate requested number of pages from OS, or if the hard heap
1999   // size limit has been hit.
2000   bool Expand();
2001 
2002   // Generic fast case allocation function that tries linear allocation at the
2003   // address denoted by top in allocation_info_.
2004   inline HeapObject* AllocateLinearly(int size_in_bytes);
2005 
2006   MUST_USE_RESULT HeapObject*
2007       WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes);
2008 
2009   // Slow path of AllocateRaw.  This function is space-dependent.
2010   MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2011 
2012   friend class PageIterator;
2013   friend class MarkCompactCollector;
2014 };
2015 
2016 
2017 class NumberAndSizeInfo BASE_EMBEDDED {
2018  public:
NumberAndSizeInfo()2019   NumberAndSizeInfo() : number_(0), bytes_(0) {}
2020 
number()2021   int number() const { return number_; }
increment_number(int num)2022   void increment_number(int num) { number_ += num; }
2023 
bytes()2024   int bytes() const { return bytes_; }
increment_bytes(int size)2025   void increment_bytes(int size) { bytes_ += size; }
2026 
clear()2027   void clear() {
2028     number_ = 0;
2029     bytes_ = 0;
2030   }
2031 
2032  private:
2033   int number_;
2034   int bytes_;
2035 };
2036 
2037 
2038 // HistogramInfo class for recording a single "bar" of a histogram.  This
2039 // class is used for collecting statistics to print to the log file.
2040 class HistogramInfo: public NumberAndSizeInfo {
2041  public:
HistogramInfo()2042   HistogramInfo() : NumberAndSizeInfo() {}
2043 
name()2044   const char* name() { return name_; }
set_name(const char * name)2045   void set_name(const char* name) { name_ = name; }
2046 
2047  private:
2048   const char* name_;
2049 };
2050 
2051 
2052 enum SemiSpaceId {
2053   kFromSpace = 0,
2054   kToSpace = 1
2055 };
2056 
2057 
2058 class SemiSpace;
2059 
2060 
2061 class NewSpacePage : public MemoryChunk {
2062  public:
2063   // GC related flags copied from from-space to to-space when
2064   // flipping semispaces.
2065   static const intptr_t kCopyOnFlipFlagsMask =
2066     (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
2067     (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
2068     (1 << MemoryChunk::SCAN_ON_SCAVENGE);
2069 
2070   static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
2071 
next_page()2072   inline NewSpacePage* next_page() const {
2073     return static_cast<NewSpacePage*>(next_chunk());
2074   }
2075 
set_next_page(NewSpacePage * page)2076   inline void set_next_page(NewSpacePage* page) {
2077     set_next_chunk(page);
2078   }
2079 
prev_page()2080   inline NewSpacePage* prev_page() const {
2081     return static_cast<NewSpacePage*>(prev_chunk());
2082   }
2083 
set_prev_page(NewSpacePage * page)2084   inline void set_prev_page(NewSpacePage* page) {
2085     set_prev_chunk(page);
2086   }
2087 
semi_space()2088   SemiSpace* semi_space() {
2089     return reinterpret_cast<SemiSpace*>(owner());
2090   }
2091 
is_anchor()2092   bool is_anchor() { return !this->InNewSpace(); }
2093 
IsAtStart(Address addr)2094   static bool IsAtStart(Address addr) {
2095     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
2096         == kObjectStartOffset;
2097   }
2098 
IsAtEnd(Address addr)2099   static bool IsAtEnd(Address addr) {
2100     return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
2101   }
2102 
address()2103   Address address() {
2104     return reinterpret_cast<Address>(this);
2105   }
2106 
2107   // Finds the NewSpacePage containg the given address.
FromAddress(Address address_in_page)2108   static inline NewSpacePage* FromAddress(Address address_in_page) {
2109     Address page_start =
2110         reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
2111                                   ~Page::kPageAlignmentMask);
2112     NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
2113     return page;
2114   }
2115 
2116   // Find the page for a limit address. A limit address is either an address
2117   // inside a page, or the address right after the last byte of a page.
FromLimit(Address address_limit)2118   static inline NewSpacePage* FromLimit(Address address_limit) {
2119     return NewSpacePage::FromAddress(address_limit - 1);
2120   }
2121 
2122   // Checks if address1 and address2 are on the same new space page.
OnSamePage(Address address1,Address address2)2123   static inline bool OnSamePage(Address address1, Address address2) {
2124     return NewSpacePage::FromAddress(address1) ==
2125            NewSpacePage::FromAddress(address2);
2126   }
2127 
2128  private:
2129   // Create a NewSpacePage object that is only used as anchor
2130   // for the doubly-linked list of real pages.
NewSpacePage(SemiSpace * owner)2131   explicit NewSpacePage(SemiSpace* owner) {
2132     InitializeAsAnchor(owner);
2133   }
2134 
2135   static NewSpacePage* Initialize(Heap* heap,
2136                                   Address start,
2137                                   SemiSpace* semi_space);
2138 
2139   // Intialize a fake NewSpacePage used as sentinel at the ends
2140   // of a doubly-linked list of real NewSpacePages.
2141   // Only uses the prev/next links, and sets flags to not be in new-space.
2142   void InitializeAsAnchor(SemiSpace* owner);
2143 
2144   friend class SemiSpace;
2145   friend class SemiSpaceIterator;
2146 };
2147 
2148 
2149 // -----------------------------------------------------------------------------
2150 // SemiSpace in young generation
2151 //
2152 // A semispace is a contiguous chunk of memory holding page-like memory
2153 // chunks. The mark-compact collector  uses the memory of the first page in
2154 // the from space as a marking stack when tracing live objects.
2155 
2156 class SemiSpace : public Space {
2157  public:
2158   // Constructor.
SemiSpace(Heap * heap,SemiSpaceId semispace)2159   SemiSpace(Heap* heap, SemiSpaceId semispace)
2160     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2161       start_(NULL),
2162       age_mark_(NULL),
2163       id_(semispace),
2164       anchor_(this),
2165       current_page_(NULL) { }
2166 
2167   // Sets up the semispace using the given chunk.
2168   void SetUp(Address start, int initial_capacity, int maximum_capacity);
2169 
2170   // Tear down the space.  Heap memory was not allocated by the space, so it
2171   // is not deallocated here.
2172   void TearDown();
2173 
2174   // True if the space has been set up but not torn down.
HasBeenSetUp()2175   bool HasBeenSetUp() { return start_ != NULL; }
2176 
2177   // Grow the semispace to the new capacity.  The new capacity
2178   // requested must be larger than the current capacity and less than
2179   // the maximum capacity.
2180   bool GrowTo(int new_capacity);
2181 
2182   // Shrinks the semispace to the new capacity.  The new capacity
2183   // requested must be more than the amount of used memory in the
2184   // semispace and less than the current capacity.
2185   bool ShrinkTo(int new_capacity);
2186 
2187   // Returns the start address of the first page of the space.
space_start()2188   Address space_start() {
2189     ASSERT(anchor_.next_page() != &anchor_);
2190     return anchor_.next_page()->area_start();
2191   }
2192 
2193   // Returns the start address of the current page of the space.
page_low()2194   Address page_low() {
2195     return current_page_->area_start();
2196   }
2197 
2198   // Returns one past the end address of the space.
space_end()2199   Address space_end() {
2200     return anchor_.prev_page()->area_end();
2201   }
2202 
2203   // Returns one past the end address of the current page of the space.
page_high()2204   Address page_high() {
2205     return current_page_->area_end();
2206   }
2207 
AdvancePage()2208   bool AdvancePage() {
2209     NewSpacePage* next_page = current_page_->next_page();
2210     if (next_page == anchor()) return false;
2211     current_page_ = next_page;
2212     return true;
2213   }
2214 
2215   // Resets the space to using the first page.
2216   void Reset();
2217 
2218   // Age mark accessors.
age_mark()2219   Address age_mark() { return age_mark_; }
2220   void set_age_mark(Address mark);
2221 
2222   // True if the address is in the address range of this semispace (not
2223   // necessarily below the allocation pointer).
Contains(Address a)2224   bool Contains(Address a) {
2225     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2226            == reinterpret_cast<uintptr_t>(start_);
2227   }
2228 
2229   // True if the object is a heap object in the address range of this
2230   // semispace (not necessarily below the allocation pointer).
Contains(Object * o)2231   bool Contains(Object* o) {
2232     return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
2233   }
2234 
2235   // If we don't have these here then SemiSpace will be abstract.  However
2236   // they should never be called.
Size()2237   virtual intptr_t Size() {
2238     UNREACHABLE();
2239     return 0;
2240   }
2241 
is_committed()2242   bool is_committed() { return committed_; }
2243   bool Commit();
2244   bool Uncommit();
2245 
first_page()2246   NewSpacePage* first_page() { return anchor_.next_page(); }
current_page()2247   NewSpacePage* current_page() { return current_page_; }
2248 
2249 #ifdef VERIFY_HEAP
2250   virtual void Verify();
2251 #endif
2252 
2253 #ifdef DEBUG
2254   virtual void Print();
2255   // Validate a range of of addresses in a SemiSpace.
2256   // The "from" address must be on a page prior to the "to" address,
2257   // in the linked page order, or it must be earlier on the same page.
2258   static void AssertValidRange(Address from, Address to);
2259 #else
2260   // Do nothing.
AssertValidRange(Address from,Address to)2261   inline static void AssertValidRange(Address from, Address to) {}
2262 #endif
2263 
2264   // Returns the current capacity of the semi space.
Capacity()2265   int Capacity() { return capacity_; }
2266 
2267   // Returns the maximum capacity of the semi space.
MaximumCapacity()2268   int MaximumCapacity() { return maximum_capacity_; }
2269 
2270   // Returns the initial capacity of the semi space.
InitialCapacity()2271   int InitialCapacity() { return initial_capacity_; }
2272 
id()2273   SemiSpaceId id() { return id_; }
2274 
2275   static void Swap(SemiSpace* from, SemiSpace* to);
2276 
2277   // Returns the maximum amount of memory ever committed by the semi space.
MaximumCommittedMemory()2278   size_t MaximumCommittedMemory() { return maximum_committed_; }
2279 
2280   // Approximate amount of physical memory committed for this space.
2281   size_t CommittedPhysicalMemory();
2282 
2283  private:
2284   // Flips the semispace between being from-space and to-space.
2285   // Copies the flags into the masked positions on all pages in the space.
2286   void FlipPages(intptr_t flags, intptr_t flag_mask);
2287 
2288   // Updates Capacity and MaximumCommitted based on new capacity.
2289   void SetCapacity(int new_capacity);
2290 
anchor()2291   NewSpacePage* anchor() { return &anchor_; }
2292 
2293   // The current and maximum capacity of the space.
2294   int capacity_;
2295   int maximum_capacity_;
2296   int initial_capacity_;
2297 
2298   intptr_t maximum_committed_;
2299 
2300   // The start address of the space.
2301   Address start_;
2302   // Used to govern object promotion during mark-compact collection.
2303   Address age_mark_;
2304 
2305   // Masks and comparison values to test for containment in this semispace.
2306   uintptr_t address_mask_;
2307   uintptr_t object_mask_;
2308   uintptr_t object_expected_;
2309 
2310   bool committed_;
2311   SemiSpaceId id_;
2312 
2313   NewSpacePage anchor_;
2314   NewSpacePage* current_page_;
2315 
2316   friend class SemiSpaceIterator;
2317   friend class NewSpacePageIterator;
2318  public:
2319   TRACK_MEMORY("SemiSpace")
2320 };
2321 
2322 
2323 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2324 // semispace of the heap's new space.  It iterates over the objects in the
2325 // semispace from a given start address (defaulting to the bottom of the
2326 // semispace) to the top of the semispace.  New objects allocated after the
2327 // iterator is created are not iterated.
2328 class SemiSpaceIterator : public ObjectIterator {
2329  public:
2330   // Create an iterator over the objects in the given space.  If no start
2331   // address is given, the iterator starts from the bottom of the space.  If
2332   // no size function is given, the iterator calls Object::Size().
2333 
2334   // Iterate over all of allocated to-space.
2335   explicit SemiSpaceIterator(NewSpace* space);
2336   // Iterate over all of allocated to-space, with a custome size function.
2337   SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
2338   // Iterate over part of allocated to-space, from start to the end
2339   // of allocation.
2340   SemiSpaceIterator(NewSpace* space, Address start);
2341   // Iterate from one address to another in the same semi-space.
2342   SemiSpaceIterator(Address from, Address to);
2343 
Next()2344   HeapObject* Next() {
2345     if (current_ == limit_) return NULL;
2346     if (NewSpacePage::IsAtEnd(current_)) {
2347       NewSpacePage* page = NewSpacePage::FromLimit(current_);
2348       page = page->next_page();
2349       ASSERT(!page->is_anchor());
2350       current_ = page->area_start();
2351       if (current_ == limit_) return NULL;
2352     }
2353 
2354     HeapObject* object = HeapObject::FromAddress(current_);
2355     int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
2356 
2357     current_ += size;
2358     return object;
2359   }
2360 
2361   // Implementation of the ObjectIterator functions.
next_object()2362   virtual HeapObject* next_object() { return Next(); }
2363 
2364  private:
2365   void Initialize(Address start,
2366                   Address end,
2367                   HeapObjectCallback size_func);
2368 
2369   // The current iteration point.
2370   Address current_;
2371   // The end of iteration.
2372   Address limit_;
2373   // The callback function.
2374   HeapObjectCallback size_func_;
2375 };
2376 
2377 
2378 // -----------------------------------------------------------------------------
2379 // A PageIterator iterates the pages in a semi-space.
2380 class NewSpacePageIterator BASE_EMBEDDED {
2381  public:
2382   // Make an iterator that runs over all pages in to-space.
2383   explicit inline NewSpacePageIterator(NewSpace* space);
2384 
2385   // Make an iterator that runs over all pages in the given semispace,
2386   // even those not used in allocation.
2387   explicit inline NewSpacePageIterator(SemiSpace* space);
2388 
2389   // Make iterator that iterates from the page containing start
2390   // to the page that contains limit in the same semispace.
2391   inline NewSpacePageIterator(Address start, Address limit);
2392 
2393   inline bool has_next();
2394   inline NewSpacePage* next();
2395 
2396  private:
2397   NewSpacePage* prev_page_;  // Previous page returned.
2398   // Next page that will be returned.  Cached here so that we can use this
2399   // iterator for operations that deallocate pages.
2400   NewSpacePage* next_page_;
2401   // Last page returned.
2402   NewSpacePage* last_page_;
2403 };
2404 
2405 
2406 // -----------------------------------------------------------------------------
2407 // The young generation space.
2408 //
2409 // The new space consists of a contiguous pair of semispaces.  It simply
2410 // forwards most functions to the appropriate semispace.
2411 
2412 class NewSpace : public Space {
2413  public:
2414   // Constructor.
NewSpace(Heap * heap)2415   explicit NewSpace(Heap* heap)
2416     : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2417       to_space_(heap, kToSpace),
2418       from_space_(heap, kFromSpace),
2419       reservation_(),
2420       inline_allocation_limit_step_(0) {}
2421 
2422   // Sets up the new space using the given chunk.
2423   bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
2424 
2425   // Tears down the space.  Heap memory was not allocated by the space, so it
2426   // is not deallocated here.
2427   void TearDown();
2428 
2429   // True if the space has been set up but not torn down.
HasBeenSetUp()2430   bool HasBeenSetUp() {
2431     return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2432   }
2433 
2434   // Flip the pair of spaces.
2435   void Flip();
2436 
2437   // Grow the capacity of the semispaces.  Assumes that they are not at
2438   // their maximum capacity.
2439   void Grow();
2440 
2441   // Shrink the capacity of the semispaces.
2442   void Shrink();
2443 
2444   // True if the address or object lies in the address range of either
2445   // semispace (not necessarily below the allocation pointer).
Contains(Address a)2446   bool Contains(Address a) {
2447     return (reinterpret_cast<uintptr_t>(a) & address_mask_)
2448         == reinterpret_cast<uintptr_t>(start_);
2449   }
2450 
Contains(Object * o)2451   bool Contains(Object* o) {
2452     Address a = reinterpret_cast<Address>(o);
2453     return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
2454   }
2455 
2456   // Return the allocated bytes in the active semispace.
Size()2457   virtual intptr_t Size() {
2458     return pages_used_ * NewSpacePage::kAreaSize +
2459         static_cast<int>(top() - to_space_.page_low());
2460   }
2461 
2462   // The same, but returning an int.  We have to have the one that returns
2463   // intptr_t because it is inherited, but if we know we are dealing with the
2464   // new space, which can't get as big as the other spaces then this is useful:
SizeAsInt()2465   int SizeAsInt() { return static_cast<int>(Size()); }
2466 
2467   // Return the current capacity of a semispace.
EffectiveCapacity()2468   intptr_t EffectiveCapacity() {
2469     SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
2470     return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
2471   }
2472 
2473   // Return the current capacity of a semispace.
Capacity()2474   intptr_t Capacity() {
2475     ASSERT(to_space_.Capacity() == from_space_.Capacity());
2476     return to_space_.Capacity();
2477   }
2478 
2479   // Return the total amount of memory committed for new space.
CommittedMemory()2480   intptr_t CommittedMemory() {
2481     if (from_space_.is_committed()) return 2 * Capacity();
2482     return Capacity();
2483   }
2484 
2485   // Return the total amount of memory committed for new space.
MaximumCommittedMemory()2486   intptr_t MaximumCommittedMemory() {
2487     return to_space_.MaximumCommittedMemory() +
2488         from_space_.MaximumCommittedMemory();
2489   }
2490 
2491   // Approximate amount of physical memory committed for this space.
2492   size_t CommittedPhysicalMemory();
2493 
2494   // Return the available bytes without growing.
Available()2495   intptr_t Available() {
2496     return Capacity() - Size();
2497   }
2498 
2499   // Return the maximum capacity of a semispace.
MaximumCapacity()2500   int MaximumCapacity() {
2501     ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
2502     return to_space_.MaximumCapacity();
2503   }
2504 
IsAtMaximumCapacity()2505   bool IsAtMaximumCapacity() {
2506     return Capacity() == MaximumCapacity();
2507   }
2508 
2509   // Returns the initial capacity of a semispace.
InitialCapacity()2510   int InitialCapacity() {
2511     ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
2512     return to_space_.InitialCapacity();
2513   }
2514 
2515   // Return the address of the allocation pointer in the active semispace.
top()2516   Address top() {
2517     ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2518     return allocation_info_.top();
2519   }
2520 
set_top(Address top)2521   void set_top(Address top) {
2522     ASSERT(to_space_.current_page()->ContainsLimit(top));
2523     allocation_info_.set_top(top);
2524   }
2525 
2526   // Return the address of the allocation pointer limit in the active semispace.
limit()2527   Address limit() {
2528     ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2529     return allocation_info_.limit();
2530   }
2531 
2532   // Return the address of the first object in the active semispace.
bottom()2533   Address bottom() { return to_space_.space_start(); }
2534 
2535   // Get the age mark of the inactive semispace.
age_mark()2536   Address age_mark() { return from_space_.age_mark(); }
2537   // Set the age mark in the active semispace.
set_age_mark(Address mark)2538   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2539 
2540   // The start address of the space and a bit mask. Anding an address in the
2541   // new space with the mask will result in the start address.
start()2542   Address start() { return start_; }
mask()2543   uintptr_t mask() { return address_mask_; }
2544 
INLINE(uint32_t AddressToMarkbitIndex (Address addr))2545   INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
2546     ASSERT(Contains(addr));
2547     ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
2548            IsAligned(OffsetFrom(addr) - 1, kPointerSize));
2549     return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
2550   }
2551 
INLINE(Address MarkbitIndexToAddress (uint32_t index))2552   INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
2553     return reinterpret_cast<Address>(index << kPointerSizeLog2);
2554   }
2555 
2556   // The allocation top and limit address.
allocation_top_address()2557   Address* allocation_top_address() {
2558     return allocation_info_.top_address();
2559   }
2560 
2561   // The allocation limit address.
allocation_limit_address()2562   Address* allocation_limit_address() {
2563     return allocation_info_.limit_address();
2564   }
2565 
2566   MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
2567 
2568   // Reset the allocation pointer to the beginning of the active semispace.
2569   void ResetAllocationInfo();
2570 
2571   void UpdateInlineAllocationLimit(int size_in_bytes);
LowerInlineAllocationLimit(intptr_t step)2572   void LowerInlineAllocationLimit(intptr_t step) {
2573     inline_allocation_limit_step_ = step;
2574     UpdateInlineAllocationLimit(0);
2575     top_on_previous_step_ = allocation_info_.top();
2576   }
2577 
2578   // Get the extent of the inactive semispace (for use as a marking stack,
2579   // or to zap it). Notice: space-addresses are not necessarily on the
2580   // same page, so FromSpaceStart() might be above FromSpaceEnd().
FromSpacePageLow()2581   Address FromSpacePageLow() { return from_space_.page_low(); }
FromSpacePageHigh()2582   Address FromSpacePageHigh() { return from_space_.page_high(); }
FromSpaceStart()2583   Address FromSpaceStart() { return from_space_.space_start(); }
FromSpaceEnd()2584   Address FromSpaceEnd() { return from_space_.space_end(); }
2585 
2586   // Get the extent of the active semispace's pages' memory.
ToSpaceStart()2587   Address ToSpaceStart() { return to_space_.space_start(); }
ToSpaceEnd()2588   Address ToSpaceEnd() { return to_space_.space_end(); }
2589 
ToSpaceContains(Address address)2590   inline bool ToSpaceContains(Address address) {
2591     return to_space_.Contains(address);
2592   }
FromSpaceContains(Address address)2593   inline bool FromSpaceContains(Address address) {
2594     return from_space_.Contains(address);
2595   }
2596 
2597   // True if the object is a heap object in the address range of the
2598   // respective semispace (not necessarily below the allocation pointer of the
2599   // semispace).
ToSpaceContains(Object * o)2600   inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
FromSpaceContains(Object * o)2601   inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
2602 
2603   // Try to switch the active semispace to a new, empty, page.
2604   // Returns false if this isn't possible or reasonable (i.e., there
2605   // are no pages, or the current page is already empty), or true
2606   // if successful.
2607   bool AddFreshPage();
2608 
2609 #ifdef VERIFY_HEAP
2610   // Verify the active semispace.
2611   virtual void Verify();
2612 #endif
2613 
2614 #ifdef DEBUG
2615   // Print the active semispace.
Print()2616   virtual void Print() { to_space_.Print(); }
2617 #endif
2618 
2619   // Iterates the active semispace to collect statistics.
2620   void CollectStatistics();
2621   // Reports previously collected statistics of the active semispace.
2622   void ReportStatistics();
2623   // Clears previously collected statistics.
2624   void ClearHistograms();
2625 
2626   // Record the allocation or promotion of a heap object.  Note that we don't
2627   // record every single allocation, but only those that happen in the
2628   // to space during a scavenge GC.
2629   void RecordAllocation(HeapObject* obj);
2630   void RecordPromotion(HeapObject* obj);
2631 
2632   // Return whether the operation succeded.
CommitFromSpaceIfNeeded()2633   bool CommitFromSpaceIfNeeded() {
2634     if (from_space_.is_committed()) return true;
2635     return from_space_.Commit();
2636   }
2637 
UncommitFromSpace()2638   bool UncommitFromSpace() {
2639     if (!from_space_.is_committed()) return true;
2640     return from_space_.Uncommit();
2641   }
2642 
inline_allocation_limit_step()2643   inline intptr_t inline_allocation_limit_step() {
2644     return inline_allocation_limit_step_;
2645   }
2646 
active_space()2647   SemiSpace* active_space() { return &to_space_; }
2648 
2649  private:
2650   // Update allocation info to match the current to-space page.
2651   void UpdateAllocationInfo();
2652 
2653   Address chunk_base_;
2654   uintptr_t chunk_size_;
2655 
2656   // The semispaces.
2657   SemiSpace to_space_;
2658   SemiSpace from_space_;
2659   VirtualMemory reservation_;
2660   int pages_used_;
2661 
2662   // Start address and bit mask for containment testing.
2663   Address start_;
2664   uintptr_t address_mask_;
2665   uintptr_t object_mask_;
2666   uintptr_t object_expected_;
2667 
2668   // Allocation pointer and limit for normal allocation and allocation during
2669   // mark-compact collection.
2670   AllocationInfo allocation_info_;
2671 
2672   // When incremental marking is active we will set allocation_info_.limit
2673   // to be lower than actual limit and then will gradually increase it
2674   // in steps to guarantee that we do incremental marking steps even
2675   // when all allocation is performed from inlined generated code.
2676   intptr_t inline_allocation_limit_step_;
2677 
2678   Address top_on_previous_step_;
2679 
2680   HistogramInfo* allocated_histogram_;
2681   HistogramInfo* promoted_histogram_;
2682 
2683   MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
2684 
2685   friend class SemiSpaceIterator;
2686 
2687  public:
2688   TRACK_MEMORY("NewSpace")
2689 };
2690 
2691 
2692 // -----------------------------------------------------------------------------
2693 // Old object space (excluding map objects)
2694 
2695 class OldSpace : public PagedSpace {
2696  public:
2697   // Creates an old space object with a given maximum capacity.
2698   // The constructor does not allocate pages from OS.
OldSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id,Executability executable)2699   OldSpace(Heap* heap,
2700            intptr_t max_capacity,
2701            AllocationSpace id,
2702            Executability executable)
2703       : PagedSpace(heap, max_capacity, id, executable) {
2704   }
2705 
2706  public:
2707   TRACK_MEMORY("OldSpace")
2708 };
2709 
2710 
2711 // For contiguous spaces, top should be in the space (or at the end) and limit
2712 // should be the end of the space.
2713 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
2714   SLOW_ASSERT((space).page_low() <= (info).top() \
2715               && (info).top() <= (space).page_high() \
2716               && (info).limit() <= (space).page_high())
2717 
2718 
2719 // -----------------------------------------------------------------------------
2720 // Old space for all map objects
2721 
2722 class MapSpace : public PagedSpace {
2723  public:
2724   // Creates a map space object with a maximum capacity.
MapSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id)2725   MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2726       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
2727         max_map_space_pages_(kMaxMapPageIndex - 1) {
2728   }
2729 
2730   // Given an index, returns the page address.
2731   // TODO(1600): this limit is artifical just to keep code compilable
2732   static const int kMaxMapPageIndex = 1 << 16;
2733 
RoundSizeDownToObjectAlignment(int size)2734   virtual int RoundSizeDownToObjectAlignment(int size) {
2735     if (IsPowerOf2(Map::kSize)) {
2736       return RoundDown(size, Map::kSize);
2737     } else {
2738       return (size / Map::kSize) * Map::kSize;
2739     }
2740   }
2741 
2742  protected:
2743   virtual void VerifyObject(HeapObject* obj);
2744 
2745  private:
2746   static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
2747 
2748   // Do map space compaction if there is a page gap.
CompactionThreshold()2749   int CompactionThreshold() {
2750     return kMapsPerPage * (max_map_space_pages_ - 1);
2751   }
2752 
2753   const int max_map_space_pages_;
2754 
2755  public:
2756   TRACK_MEMORY("MapSpace")
2757 };
2758 
2759 
2760 // -----------------------------------------------------------------------------
2761 // Old space for simple property cell objects
2762 
2763 class CellSpace : public PagedSpace {
2764  public:
2765   // Creates a property cell space object with a maximum capacity.
CellSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id)2766   CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
2767       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
2768   }
2769 
RoundSizeDownToObjectAlignment(int size)2770   virtual int RoundSizeDownToObjectAlignment(int size) {
2771     if (IsPowerOf2(Cell::kSize)) {
2772       return RoundDown(size, Cell::kSize);
2773     } else {
2774       return (size / Cell::kSize) * Cell::kSize;
2775     }
2776   }
2777 
2778  protected:
2779   virtual void VerifyObject(HeapObject* obj);
2780 
2781  public:
2782   TRACK_MEMORY("CellSpace")
2783 };
2784 
2785 
2786 // -----------------------------------------------------------------------------
2787 // Old space for all global object property cell objects
2788 
2789 class PropertyCellSpace : public PagedSpace {
2790  public:
2791   // Creates a property cell space object with a maximum capacity.
PropertyCellSpace(Heap * heap,intptr_t max_capacity,AllocationSpace id)2792   PropertyCellSpace(Heap* heap, intptr_t max_capacity,
2793                     AllocationSpace id)
2794       : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
2795   }
2796 
RoundSizeDownToObjectAlignment(int size)2797   virtual int RoundSizeDownToObjectAlignment(int size) {
2798     if (IsPowerOf2(PropertyCell::kSize)) {
2799       return RoundDown(size, PropertyCell::kSize);
2800     } else {
2801       return (size / PropertyCell::kSize) * PropertyCell::kSize;
2802     }
2803   }
2804 
2805  protected:
2806   virtual void VerifyObject(HeapObject* obj);
2807 
2808  public:
2809   TRACK_MEMORY("PropertyCellSpace")
2810 };
2811 
2812 
2813 // -----------------------------------------------------------------------------
2814 // Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
2815 // the large object space. A large object is allocated from OS heap with
2816 // extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2817 // A large object always starts at Page::kObjectStartOffset to a page.
2818 // Large objects do not move during garbage collections.
2819 
2820 class LargeObjectSpace : public Space {
2821  public:
2822   LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
~LargeObjectSpace()2823   virtual ~LargeObjectSpace() {}
2824 
2825   // Initializes internal data structures.
2826   bool SetUp();
2827 
2828   // Releases internal resources, frees objects in this space.
2829   void TearDown();
2830 
ObjectSizeFor(intptr_t chunk_size)2831   static intptr_t ObjectSizeFor(intptr_t chunk_size) {
2832     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2833     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2834   }
2835 
2836   // Shared implementation of AllocateRaw, AllocateRawCode and
2837   // AllocateRawFixedArray.
2838   MUST_USE_RESULT AllocationResult AllocateRaw(int object_size,
2839                                                Executability executable);
2840 
2841   // Available bytes for objects in this space.
2842   inline intptr_t Available();
2843 
Size()2844   virtual intptr_t Size() {
2845     return size_;
2846   }
2847 
SizeOfObjects()2848   virtual intptr_t SizeOfObjects() {
2849     return objects_size_;
2850   }
2851 
MaximumCommittedMemory()2852   intptr_t MaximumCommittedMemory() {
2853     return maximum_committed_;
2854   }
2855 
CommittedMemory()2856   intptr_t CommittedMemory() {
2857     return Size();
2858   }
2859 
2860   // Approximate amount of physical memory committed for this space.
2861   size_t CommittedPhysicalMemory();
2862 
PageCount()2863   int PageCount() {
2864     return page_count_;
2865   }
2866 
2867   // Finds an object for a given address, returns a Smi if it is not found.
2868   // The function iterates through all objects in this space, may be slow.
2869   Object* FindObject(Address a);
2870 
2871   // Finds a large object page containing the given address, returns NULL
2872   // if such a page doesn't exist.
2873   LargePage* FindPage(Address a);
2874 
2875   // Frees unmarked objects.
2876   void FreeUnmarkedObjects();
2877 
2878   // Checks whether a heap object is in this space; O(1).
2879   bool Contains(HeapObject* obj);
2880 
2881   // Checks whether the space is empty.
IsEmpty()2882   bool IsEmpty() { return first_page_ == NULL; }
2883 
first_page()2884   LargePage* first_page() { return first_page_; }
2885 
2886 #ifdef VERIFY_HEAP
2887   virtual void Verify();
2888 #endif
2889 
2890 #ifdef DEBUG
2891   virtual void Print();
2892   void ReportStatistics();
2893   void CollectCodeStatistics();
2894 #endif
2895   // Checks whether an address is in the object area in this space.  It
2896   // iterates all objects in the space. May be slow.
SlowContains(Address addr)2897   bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
2898 
2899  private:
2900   intptr_t max_capacity_;
2901   intptr_t maximum_committed_;
2902   // The head of the linked list of large object chunks.
2903   LargePage* first_page_;
2904   intptr_t size_;  // allocated bytes
2905   int page_count_;  // number of chunks
2906   intptr_t objects_size_;  // size of objects
2907   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2908   HashMap chunk_map_;
2909 
2910   friend class LargeObjectIterator;
2911 
2912  public:
2913   TRACK_MEMORY("LargeObjectSpace")
2914 };
2915 
2916 
2917 class LargeObjectIterator: public ObjectIterator {
2918  public:
2919   explicit LargeObjectIterator(LargeObjectSpace* space);
2920   LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
2921 
2922   HeapObject* Next();
2923 
2924   // implementation of ObjectIterator.
next_object()2925   virtual HeapObject* next_object() { return Next(); }
2926 
2927  private:
2928   LargePage* current_;
2929   HeapObjectCallback size_func_;
2930 };
2931 
2932 
2933 // Iterates over the chunks (pages and large object pages) that can contain
2934 // pointers to new space.
2935 class PointerChunkIterator BASE_EMBEDDED {
2936  public:
2937   inline explicit PointerChunkIterator(Heap* heap);
2938 
2939   // Return NULL when the iterator is done.
next()2940   MemoryChunk* next() {
2941     switch (state_) {
2942       case kOldPointerState: {
2943         if (old_pointer_iterator_.has_next()) {
2944           return old_pointer_iterator_.next();
2945         }
2946         state_ = kMapState;
2947         // Fall through.
2948       }
2949       case kMapState: {
2950         if (map_iterator_.has_next()) {
2951           return map_iterator_.next();
2952         }
2953         state_ = kLargeObjectState;
2954         // Fall through.
2955       }
2956       case kLargeObjectState: {
2957         HeapObject* heap_object;
2958         do {
2959           heap_object = lo_iterator_.Next();
2960           if (heap_object == NULL) {
2961             state_ = kFinishedState;
2962             return NULL;
2963           }
2964           // Fixed arrays are the only pointer-containing objects in large
2965           // object space.
2966         } while (!heap_object->IsFixedArray());
2967         MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
2968         return answer;
2969       }
2970       case kFinishedState:
2971         return NULL;
2972       default:
2973         break;
2974     }
2975     UNREACHABLE();
2976     return NULL;
2977   }
2978 
2979 
2980  private:
2981   enum State {
2982     kOldPointerState,
2983     kMapState,
2984     kLargeObjectState,
2985     kFinishedState
2986   };
2987   State state_;
2988   PageIterator old_pointer_iterator_;
2989   PageIterator map_iterator_;
2990   LargeObjectIterator lo_iterator_;
2991 };
2992 
2993 
2994 #ifdef DEBUG
2995 struct CommentStatistic {
2996   const char* comment;
2997   int size;
2998   int count;
ClearCommentStatistic2999   void Clear() {
3000     comment = NULL;
3001     size = 0;
3002     count = 0;
3003   }
3004   // Must be small, since an iteration is used for lookup.
3005   static const int kMaxComments = 64;
3006 };
3007 #endif
3008 
3009 
3010 } }  // namespace v8::internal
3011 
3012 #endif  // V8_SPACES_H_
3013