• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7 
8 #include <list>
9 
10 #include "src/allocation.h"
11 #include "src/base/atomic-utils.h"
12 #include "src/base/atomicops.h"
13 #include "src/base/bits.h"
14 #include "src/base/hashmap.h"
15 #include "src/base/platform/mutex.h"
16 #include "src/flags.h"
17 #include "src/list.h"
18 #include "src/objects.h"
19 #include "src/utils.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 class AllocationInfo;
25 class AllocationObserver;
26 class CompactionSpace;
27 class CompactionSpaceCollection;
28 class FreeList;
29 class Isolate;
30 class LocalArrayBufferTracker;
31 class MemoryAllocator;
32 class MemoryChunk;
33 class Page;
34 class PagedSpace;
35 class SemiSpace;
36 class SkipList;
37 class SlotsBuffer;
38 class SlotSet;
39 class TypedSlotSet;
40 class Space;
41 
42 // -----------------------------------------------------------------------------
43 // Heap structures:
44 //
45 // A JS heap consists of a young generation, an old generation, and a large
46 // object space. The young generation is divided into two semispaces. A
47 // scavenger implements Cheney's copying algorithm. The old generation is
48 // separated into a map space and an old object space. The map space contains
49 // all (and only) map objects, the rest of old objects go into the old space.
50 // The old generation is collected by a mark-sweep-compact collector.
51 //
52 // The semispaces of the young generation are contiguous.  The old and map
53 // spaces consists of a list of pages. A page has a page header and an object
54 // area.
55 //
56 // There is a separate large object space for objects larger than
57 // Page::kMaxRegularHeapObjectSize, so that they do not have to move during
58 // collection. The large object space is paged. Pages in large object space
59 // may be larger than the page size.
60 //
61 // A store-buffer based write barrier is used to keep track of intergenerational
62 // references.  See heap/store-buffer.h.
63 //
64 // During scavenges and mark-sweep collections we sometimes (after a store
65 // buffer overflow) iterate intergenerational pointers without decoding heap
66 // object maps so if the page belongs to old space or large object space
67 // it is essential to guarantee that the page does not contain any
68 // garbage pointers to new space: every pointer aligned word which satisfies
69 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
70 // new space. Thus objects in old space and large object spaces should have a
71 // special layout (e.g. no bare integer fields). This requirement does not
72 // apply to map space which is iterated in a special fashion. However we still
73 // require pointer fields of dead maps to be cleaned.
74 //
75 // To enable lazy cleaning of old space pages we can mark chunks of the page
76 // as being garbage.  Garbage sections are marked with a special map.  These
77 // sections are skipped when scanning the page, even if we are otherwise
78 // scanning without regard for object boundaries.  Garbage sections are chained
79 // together to form a free list after a GC.  Garbage sections created outside
80 // of GCs by object trunctation etc. may not be in the free list chain.  Very
81 // small free spaces are ignored, they need only be cleaned of bogus pointers
82 // into new space.
83 //
84 // Each page may have up to one special garbage section.  The start of this
85 // section is denoted by the top field in the space.  The end of the section
86 // is denoted by the limit field in the space.  This special garbage section
87 // is not marked with a free space map in the data.  The point of this section
88 // is to enable linear allocation without having to constantly update the byte
89 // array every time the top field is updated and a new object is created.  The
90 // special garbage section is not in the chain of garbage sections.
91 //
92 // Since the top and limit fields are in the space, not the page, only one page
93 // has a special garbage section, and if the top and limit are equal then there
94 // is no special garbage section.
95 
96 // Some assertion macros used in the debugging mode.
97 
98 #define DCHECK_PAGE_ALIGNED(address) \
99   DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
100 
101 #define DCHECK_OBJECT_ALIGNED(address) \
102   DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
103 
104 #define DCHECK_OBJECT_SIZE(size) \
105   DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
106 
107 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
108   DCHECK((0 < size) && (size <= code_space->AreaSize()))
109 
110 #define DCHECK_PAGE_OFFSET(offset) \
111   DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
112 
113 class MarkBit {
114  public:
115   typedef uint32_t CellType;
116 
MarkBit(CellType * cell,CellType mask)117   inline MarkBit(CellType* cell, CellType mask) : cell_(cell), mask_(mask) {}
118 
119 #ifdef DEBUG
120   bool operator==(const MarkBit& other) {
121     return cell_ == other.cell_ && mask_ == other.mask_;
122   }
123 #endif
124 
125  private:
cell()126   inline CellType* cell() { return cell_; }
mask()127   inline CellType mask() { return mask_; }
128 
Next()129   inline MarkBit Next() {
130     CellType new_mask = mask_ << 1;
131     if (new_mask == 0) {
132       return MarkBit(cell_ + 1, 1);
133     } else {
134       return MarkBit(cell_, new_mask);
135     }
136   }
137 
Set()138   inline void Set() { *cell_ |= mask_; }
Get()139   inline bool Get() { return (*cell_ & mask_) != 0; }
Clear()140   inline void Clear() { *cell_ &= ~mask_; }
141 
142   CellType* cell_;
143   CellType mask_;
144 
145   friend class Marking;
146 };
147 
148 
149 // Bitmap is a sequence of cells each containing fixed number of bits.
150 class Bitmap {
151  public:
152   static const uint32_t kBitsPerCell = 32;
153   static const uint32_t kBitsPerCellLog2 = 5;
154   static const uint32_t kBitIndexMask = kBitsPerCell - 1;
155   static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
156   static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
157 
158   static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
159 
160   static const size_t kSize =
161       (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
162 
163 
CellsForLength(int length)164   static int CellsForLength(int length) {
165     return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
166   }
167 
CellsCount()168   int CellsCount() { return CellsForLength(kLength); }
169 
SizeFor(int cells_count)170   static int SizeFor(int cells_count) {
171     return sizeof(MarkBit::CellType) * cells_count;
172   }
173 
INLINE(static uint32_t IndexToCell (uint32_t index))174   INLINE(static uint32_t IndexToCell(uint32_t index)) {
175     return index >> kBitsPerCellLog2;
176   }
177 
IndexInCell(uint32_t index)178   V8_INLINE static uint32_t IndexInCell(uint32_t index) {
179     return index & kBitIndexMask;
180   }
181 
INLINE(static uint32_t CellToIndex (uint32_t index))182   INLINE(static uint32_t CellToIndex(uint32_t index)) {
183     return index << kBitsPerCellLog2;
184   }
185 
INLINE(static uint32_t CellAlignIndex (uint32_t index))186   INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
187     return (index + kBitIndexMask) & ~kBitIndexMask;
188   }
189 
INLINE(MarkBit::CellType * cells ())190   INLINE(MarkBit::CellType* cells()) {
191     return reinterpret_cast<MarkBit::CellType*>(this);
192   }
193 
INLINE(Address address ())194   INLINE(Address address()) { return reinterpret_cast<Address>(this); }
195 
INLINE(static Bitmap * FromAddress (Address addr))196   INLINE(static Bitmap* FromAddress(Address addr)) {
197     return reinterpret_cast<Bitmap*>(addr);
198   }
199 
MarkBitFromIndex(uint32_t index)200   inline MarkBit MarkBitFromIndex(uint32_t index) {
201     MarkBit::CellType mask = 1u << IndexInCell(index);
202     MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
203     return MarkBit(cell, mask);
204   }
205 
206   static inline void Clear(MemoryChunk* chunk);
207 
208   static inline void SetAllBits(MemoryChunk* chunk);
209 
210   static void PrintWord(uint32_t word, uint32_t himask = 0) {
211     for (uint32_t mask = 1; mask != 0; mask <<= 1) {
212       if ((mask & himask) != 0) PrintF("[");
213       PrintF((mask & word) ? "1" : "0");
214       if ((mask & himask) != 0) PrintF("]");
215     }
216   }
217 
218   class CellPrinter {
219    public:
CellPrinter()220     CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
221 
Print(uint32_t pos,uint32_t cell)222     void Print(uint32_t pos, uint32_t cell) {
223       if (cell == seq_type) {
224         seq_length++;
225         return;
226       }
227 
228       Flush();
229 
230       if (IsSeq(cell)) {
231         seq_start = pos;
232         seq_length = 0;
233         seq_type = cell;
234         return;
235       }
236 
237       PrintF("%d: ", pos);
238       PrintWord(cell);
239       PrintF("\n");
240     }
241 
Flush()242     void Flush() {
243       if (seq_length > 0) {
244         PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
245                seq_length * kBitsPerCell);
246         seq_length = 0;
247       }
248     }
249 
IsSeq(uint32_t cell)250     static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
251 
252    private:
253     uint32_t seq_start;
254     uint32_t seq_type;
255     uint32_t seq_length;
256   };
257 
Print()258   void Print() {
259     CellPrinter printer;
260     for (int i = 0; i < CellsCount(); i++) {
261       printer.Print(i, cells()[i]);
262     }
263     printer.Flush();
264     PrintF("\n");
265   }
266 
IsClean()267   bool IsClean() {
268     for (int i = 0; i < CellsCount(); i++) {
269       if (cells()[i] != 0) {
270         return false;
271       }
272     }
273     return true;
274   }
275 
276   // Clears all bits starting from {cell_base_index} up to and excluding
277   // {index}. Note that {cell_base_index} is required to be cell aligned.
ClearRange(uint32_t cell_base_index,uint32_t index)278   void ClearRange(uint32_t cell_base_index, uint32_t index) {
279     DCHECK_EQ(IndexInCell(cell_base_index), 0u);
280     DCHECK_GE(index, cell_base_index);
281     uint32_t start_cell_index = IndexToCell(cell_base_index);
282     uint32_t end_cell_index = IndexToCell(index);
283     DCHECK_GE(end_cell_index, start_cell_index);
284     // Clear all cells till the cell containing the last index.
285     for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
286       cells()[i] = 0;
287     }
288     // Clear all bits in the last cell till the last bit before index.
289     uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
290     cells()[end_cell_index] &= clear_mask;
291   }
292 };
293 
294 enum FreeListCategoryType {
295   kTiniest,
296   kTiny,
297   kSmall,
298   kMedium,
299   kLarge,
300   kHuge,
301 
302   kFirstCategory = kTiniest,
303   kLastCategory = kHuge,
304   kNumberOfCategories = kLastCategory + 1,
305   kInvalidCategory
306 };
307 
308 enum FreeMode { kLinkCategory, kDoNotLinkCategory };
309 
310 // A free list category maintains a linked list of free memory blocks.
311 class FreeListCategory {
312  public:
313   static const int kSize = kIntSize +      // FreeListCategoryType type_
314                            kIntSize +      // int available_
315                            kPointerSize +  // FreeSpace* top_
316                            kPointerSize +  // FreeListCategory* prev_
317                            kPointerSize;   // FreeListCategory* next_
318 
FreeListCategory()319   FreeListCategory()
320       : type_(kInvalidCategory),
321         available_(0),
322         top_(nullptr),
323         prev_(nullptr),
324         next_(nullptr) {}
325 
Initialize(FreeListCategoryType type)326   void Initialize(FreeListCategoryType type) {
327     type_ = type;
328     available_ = 0;
329     top_ = nullptr;
330     prev_ = nullptr;
331     next_ = nullptr;
332   }
333 
334   void Invalidate();
335 
336   void Reset();
337 
ResetStats()338   void ResetStats() { Reset(); }
339 
340   void RepairFreeList(Heap* heap);
341 
342   // Relinks the category into the currently owning free list. Requires that the
343   // category is currently unlinked.
344   void Relink();
345 
346   bool Free(FreeSpace* node, int size_in_bytes, FreeMode mode);
347 
348   // Picks a node from the list and stores its size in |node_size|. Returns
349   // nullptr if the category is empty.
350   FreeSpace* PickNodeFromList(int* node_size);
351 
352   // Performs a single try to pick a node of at least |minimum_size| from the
353   // category. Stores the actual size in |node_size|. Returns nullptr if no
354   // node is found.
355   FreeSpace* TryPickNodeFromList(int minimum_size, int* node_size);
356 
357   // Picks a node of at least |minimum_size| from the category. Stores the
358   // actual size in |node_size|. Returns nullptr if no node is found.
359   FreeSpace* SearchForNodeInList(int minimum_size, int* node_size);
360 
361   inline FreeList* owner();
362   inline bool is_linked();
is_empty()363   bool is_empty() { return top() == nullptr; }
available()364   int available() const { return available_; }
365 
366 #ifdef DEBUG
367   intptr_t SumFreeList();
368   int FreeListLength();
369 #endif
370 
371  private:
372   // For debug builds we accurately compute free lists lengths up until
373   // {kVeryLongFreeList} by manually walking the list.
374   static const int kVeryLongFreeList = 500;
375 
376   inline Page* page();
377 
top()378   FreeSpace* top() { return top_; }
set_top(FreeSpace * top)379   void set_top(FreeSpace* top) { top_ = top; }
prev()380   FreeListCategory* prev() { return prev_; }
set_prev(FreeListCategory * prev)381   void set_prev(FreeListCategory* prev) { prev_ = prev; }
next()382   FreeListCategory* next() { return next_; }
set_next(FreeListCategory * next)383   void set_next(FreeListCategory* next) { next_ = next; }
384 
385   // |type_|: The type of this free list category.
386   FreeListCategoryType type_;
387 
388   // |available_|: Total available bytes in all blocks of this free list
389   // category.
390   int available_;
391 
392   // |top_|: Points to the top FreeSpace* in the free list category.
393   FreeSpace* top_;
394 
395   FreeListCategory* prev_;
396   FreeListCategory* next_;
397 
398   friend class FreeList;
399   friend class PagedSpace;
400 };
401 
402 // MemoryChunk represents a memory region owned by a specific space.
403 // It is divided into the header and the body. Chunk start is always
404 // 1MB aligned. Start of the body is aligned so it can accommodate
405 // any heap object.
406 class MemoryChunk {
407  public:
408   enum MemoryChunkFlags {
409     IS_EXECUTABLE,
410     POINTERS_TO_HERE_ARE_INTERESTING,
411     POINTERS_FROM_HERE_ARE_INTERESTING,
412     IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
413     IN_TO_SPACE,    // All pages in new space has one of these two set.
414     NEW_SPACE_BELOW_AGE_MARK,
415     EVACUATION_CANDIDATE,
416     NEVER_EVACUATE,  // May contain immortal immutables.
417 
418     // Large objects can have a progress bar in their page header. These object
419     // are scanned in increments and will be kept black while being scanned.
420     // Even if the mutator writes to them they will be kept black and a white
421     // to grey transition is performed in the value.
422     HAS_PROGRESS_BAR,
423 
424     // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
425     // from new to old space during evacuation.
426     PAGE_NEW_OLD_PROMOTION,
427 
428     // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
429     // within the new space during evacuation.
430     PAGE_NEW_NEW_PROMOTION,
431 
432     // A black page has all mark bits set to 1 (black). A black page currently
433     // cannot be iterated because it is not swept. Moreover live bytes are also
434     // not updated.
435     BLACK_PAGE,
436 
437     // This flag is intended to be used for testing. Works only when both
438     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
439     // are set. It forces the page to become an evacuation candidate at next
440     // candidates selection cycle.
441     FORCE_EVACUATION_CANDIDATE_FOR_TESTING,
442 
443     // This flag is intended to be used for testing.
444     NEVER_ALLOCATE_ON_PAGE,
445 
446     // The memory chunk is already logically freed, however the actual freeing
447     // still has to be performed.
448     PRE_FREED,
449 
450     // |POOLED|: When actually freeing this chunk, only uncommit and do not
451     // give up the reservation as we still reuse the chunk at some point.
452     POOLED,
453 
454     // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
455     //   has been aborted and needs special handling by the sweeper.
456     COMPACTION_WAS_ABORTED,
457 
458     // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
459     // on pages is sometimes aborted. The flag is used to avoid repeatedly
460     // triggering on the same page.
461     COMPACTION_WAS_ABORTED_FOR_TESTING,
462 
463     // |ANCHOR|: Flag is set if page is an anchor.
464     ANCHOR,
465 
466     // Last flag, keep at bottom.
467     NUM_MEMORY_CHUNK_FLAGS
468   };
469 
470   // |kSweepingDone|: The page state when sweeping is complete or sweeping must
471   //   not be performed on that page. Sweeper threads that are done with their
472   //   work will set this value and not touch the page anymore.
473   // |kSweepingPending|: This page is ready for parallel sweeping.
474   // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
475   enum ConcurrentSweepingState {
476     kSweepingDone,
477     kSweepingPending,
478     kSweepingInProgress,
479   };
480 
481   // Every n write barrier invocations we go to runtime even though
482   // we could have handled it in generated code.  This lets us check
483   // whether we have hit the limit and should do some more marking.
484   static const int kWriteBarrierCounterGranularity = 500;
485 
486   static const int kPointersToHereAreInterestingMask =
487       1 << POINTERS_TO_HERE_ARE_INTERESTING;
488 
489   static const int kPointersFromHereAreInterestingMask =
490       1 << POINTERS_FROM_HERE_ARE_INTERESTING;
491 
492   static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
493 
494   static const int kSkipEvacuationSlotsRecordingMask =
495       (1 << EVACUATION_CANDIDATE) | (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
496 
497   static const intptr_t kAlignment =
498       (static_cast<uintptr_t>(1) << kPageSizeBits);
499 
500   static const intptr_t kAlignmentMask = kAlignment - 1;
501 
502   static const intptr_t kSizeOffset = 0;
503 
504   static const intptr_t kFlagsOffset = kSizeOffset + kPointerSize;
505 
506   static const intptr_t kLiveBytesOffset =
507       kSizeOffset + kPointerSize  // size_t size
508       + kIntptrSize               // intptr_t flags_
509       + kPointerSize              // Address area_start_
510       + kPointerSize              // Address area_end_
511       + 2 * kPointerSize          // base::VirtualMemory reservation_
512       + kPointerSize              // Address owner_
513       + kPointerSize              // Heap* heap_
514       + kIntSize;                 // int progress_bar_
515 
516   static const size_t kOldToNewSlotsOffset =
517       kLiveBytesOffset + kIntSize;  // int live_byte_count_
518 
519   static const size_t kWriteBarrierCounterOffset =
520       kOldToNewSlotsOffset + kPointerSize  // SlotSet* old_to_new_slots_;
521       + kPointerSize                       // SlotSet* old_to_old_slots_;
522       + kPointerSize   // TypedSlotSet* typed_old_to_new_slots_;
523       + kPointerSize   // TypedSlotSet* typed_old_to_old_slots_;
524       + kPointerSize;  // SkipList* skip_list_;
525 
526   static const size_t kMinHeaderSize =
527       kWriteBarrierCounterOffset +
528       kIntptrSize         // intptr_t write_barrier_counter_
529       + kPointerSize      // AtomicValue high_water_mark_
530       + kPointerSize      // base::Mutex* mutex_
531       + kPointerSize      // base::AtomicWord concurrent_sweeping_
532       + 2 * kPointerSize  // AtomicNumber free-list statistics
533       + kPointerSize      // AtomicValue next_chunk_
534       + kPointerSize      // AtomicValue prev_chunk_
535       // FreeListCategory categories_[kNumberOfCategories]
536       + FreeListCategory::kSize * kNumberOfCategories +
537       kPointerSize;  // LocalArrayBufferTracker* local_tracker_;
538 
539   // We add some more space to the computed header size to amount for missing
540   // alignment requirements in our computation.
541   // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
542   static const size_t kHeaderSize = kMinHeaderSize;
543 
544   static const int kBodyOffset =
545       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
546 
547   // The start offset of the object area in a page. Aligned to both maps and
548   // code alignment to be suitable for both.  Also aligned to 32 words because
549   // the marking bitmap is arranged in 32 bit chunks.
550   static const int kObjectStartAlignment = 32 * kPointerSize;
551   static const int kObjectStartOffset =
552       kBodyOffset - 1 +
553       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
554 
555   // Page size in bytes.  This must be a multiple of the OS page size.
556   static const int kPageSize = 1 << kPageSizeBits;
557   static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
558 
559   static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
560 
561   static inline void IncrementLiveBytesFromMutator(HeapObject* object, int by);
562   static inline void IncrementLiveBytesFromGC(HeapObject* object, int by);
563 
564   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)565   static MemoryChunk* FromAddress(Address a) {
566     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
567   }
568 
569   static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
570 
UpdateHighWaterMark(Address mark)571   static inline void UpdateHighWaterMark(Address mark) {
572     if (mark == nullptr) return;
573     // Need to subtract one from the mark because when a chunk is full the
574     // top points to the next address after the chunk, which effectively belongs
575     // to another chunk. See the comment to Page::FromTopOrLimit.
576     MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
577     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
578     intptr_t old_mark = 0;
579     do {
580       old_mark = chunk->high_water_mark_.Value();
581     } while ((new_mark > old_mark) &&
582              !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
583   }
584 
IsValid(MemoryChunk * chunk)585   static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
586 
address()587   Address address() { return reinterpret_cast<Address>(this); }
588 
mutex()589   base::Mutex* mutex() { return mutex_; }
590 
Contains(Address addr)591   bool Contains(Address addr) {
592     return addr >= area_start() && addr < area_end();
593   }
594 
595   // Checks whether |addr| can be a limit of addresses in this page. It's a
596   // limit if it's in the page, or if it's just after the last byte of the page.
ContainsLimit(Address addr)597   bool ContainsLimit(Address addr) {
598     return addr >= area_start() && addr <= area_end();
599   }
600 
concurrent_sweeping_state()601   base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
602     return concurrent_sweeping_;
603   }
604 
605   // Manage live byte count, i.e., count of bytes in black objects.
606   inline void ResetLiveBytes();
607   inline void IncrementLiveBytes(int by);
608 
LiveBytes()609   int LiveBytes() {
610     DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
611     DCHECK(!IsFlagSet(BLACK_PAGE) || live_byte_count_ == 0);
612     return live_byte_count_;
613   }
614 
SetLiveBytes(int live_bytes)615   void SetLiveBytes(int live_bytes) {
616     if (IsFlagSet(BLACK_PAGE)) return;
617     DCHECK_GE(live_bytes, 0);
618     DCHECK_LE(static_cast<size_t>(live_bytes), size_);
619     live_byte_count_ = live_bytes;
620   }
621 
write_barrier_counter()622   int write_barrier_counter() {
623     return static_cast<int>(write_barrier_counter_);
624   }
625 
set_write_barrier_counter(int counter)626   void set_write_barrier_counter(int counter) {
627     write_barrier_counter_ = counter;
628   }
629 
size()630   size_t size() const { return size_; }
631 
heap()632   inline Heap* heap() const { return heap_; }
633 
skip_list()634   inline SkipList* skip_list() { return skip_list_; }
635 
set_skip_list(SkipList * skip_list)636   inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
637 
old_to_new_slots()638   inline SlotSet* old_to_new_slots() { return old_to_new_slots_; }
old_to_old_slots()639   inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
typed_old_to_new_slots()640   inline TypedSlotSet* typed_old_to_new_slots() {
641     return typed_old_to_new_slots_;
642   }
typed_old_to_old_slots()643   inline TypedSlotSet* typed_old_to_old_slots() {
644     return typed_old_to_old_slots_;
645   }
local_tracker()646   inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
647 
648   void AllocateOldToNewSlots();
649   void ReleaseOldToNewSlots();
650   void AllocateOldToOldSlots();
651   void ReleaseOldToOldSlots();
652   void AllocateTypedOldToNewSlots();
653   void ReleaseTypedOldToNewSlots();
654   void AllocateTypedOldToOldSlots();
655   void ReleaseTypedOldToOldSlots();
656   void AllocateLocalTracker();
657   void ReleaseLocalTracker();
658 
area_start()659   Address area_start() { return area_start_; }
area_end()660   Address area_end() { return area_end_; }
area_size()661   int area_size() { return static_cast<int>(area_end() - area_start()); }
662 
663   bool CommitArea(size_t requested);
664 
665   // Approximate amount of physical memory committed for this chunk.
CommittedPhysicalMemory()666   size_t CommittedPhysicalMemory() { return high_water_mark_.Value(); }
667 
HighWaterMark()668   Address HighWaterMark() { return address() + high_water_mark_.Value(); }
669 
progress_bar()670   int progress_bar() {
671     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
672     return progress_bar_;
673   }
674 
set_progress_bar(int progress_bar)675   void set_progress_bar(int progress_bar) {
676     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
677     progress_bar_ = progress_bar;
678   }
679 
ResetProgressBar()680   void ResetProgressBar() {
681     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
682       set_progress_bar(0);
683       ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
684     }
685   }
686 
markbits()687   inline Bitmap* markbits() {
688     return Bitmap::FromAddress(address() + kHeaderSize);
689   }
690 
AddressToMarkbitIndex(Address addr)691   inline uint32_t AddressToMarkbitIndex(Address addr) {
692     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
693   }
694 
MarkbitIndexToAddress(uint32_t index)695   inline Address MarkbitIndexToAddress(uint32_t index) {
696     return this->address() + (index << kPointerSizeLog2);
697   }
698 
PrintMarkbits()699   void PrintMarkbits() { markbits()->Print(); }
700 
SetFlag(int flag)701   void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
702 
ClearFlag(int flag)703   void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
704 
IsFlagSet(int flag)705   bool IsFlagSet(int flag) {
706     return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
707   }
708 
709   // Set or clear multiple flags at a time. The flags in the mask are set to
710   // the value in "flags", the rest retain the current value in |flags_|.
SetFlags(intptr_t flags,intptr_t mask)711   void SetFlags(intptr_t flags, intptr_t mask) {
712     flags_ = (flags_ & ~mask) | (flags & mask);
713   }
714 
715   // Return all current flags.
GetFlags()716   intptr_t GetFlags() { return flags_; }
717 
NeverEvacuate()718   bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
719 
MarkNeverEvacuate()720   void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
721 
IsEvacuationCandidate()722   bool IsEvacuationCandidate() {
723     DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
724     return IsFlagSet(EVACUATION_CANDIDATE);
725   }
726 
CanAllocate()727   bool CanAllocate() {
728     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
729   }
730 
ShouldSkipEvacuationSlotRecording()731   bool ShouldSkipEvacuationSlotRecording() {
732     return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) &&
733            !IsFlagSet(COMPACTION_WAS_ABORTED);
734   }
735 
executable()736   Executability executable() {
737     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
738   }
739 
InNewSpace()740   bool InNewSpace() {
741     return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
742   }
743 
InToSpace()744   bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
745 
InFromSpace()746   bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
747 
next_chunk()748   MemoryChunk* next_chunk() { return next_chunk_.Value(); }
749 
prev_chunk()750   MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
751 
set_next_chunk(MemoryChunk * next)752   void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
753 
set_prev_chunk(MemoryChunk * prev)754   void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
755 
owner()756   Space* owner() const {
757     if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
758         kPageHeaderTag) {
759       return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
760                                       kPageHeaderTag);
761     } else {
762       return nullptr;
763     }
764   }
765 
set_owner(Space * space)766   void set_owner(Space* space) {
767     DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
768     owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
769     DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
770            kPageHeaderTag);
771   }
772 
HasPageHeader()773   bool HasPageHeader() { return owner() != nullptr; }
774 
775   void InsertAfter(MemoryChunk* other);
776   void Unlink();
777 
778  protected:
779   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
780                                  Address area_start, Address area_end,
781                                  Executability executable, Space* owner,
782                                  base::VirtualMemory* reservation);
783 
784   // Should be called when memory chunk is about to be freed.
785   void ReleaseAllocatedMemory();
786 
reserved_memory()787   base::VirtualMemory* reserved_memory() { return &reservation_; }
788 
789   size_t size_;
790   intptr_t flags_;
791 
792   // Start and end of allocatable memory on this chunk.
793   Address area_start_;
794   Address area_end_;
795 
796   // If the chunk needs to remember its memory reservation, it is stored here.
797   base::VirtualMemory reservation_;
798 
799   // The identity of the owning space.  This is tagged as a failure pointer, but
800   // no failure can be in an object, so this can be distinguished from any entry
801   // in a fixed array.
802   Address owner_;
803 
804   Heap* heap_;
805 
806   // Used by the incremental marker to keep track of the scanning progress in
807   // large objects that have a progress bar and are scanned in increments.
808   int progress_bar_;
809 
810   // Count of bytes marked black on page.
811   int live_byte_count_;
812 
813   // A single slot set for small pages (of size kPageSize) or an array of slot
814   // set for large pages. In the latter case the number of entries in the array
815   // is ceil(size() / kPageSize).
816   SlotSet* old_to_new_slots_;
817   SlotSet* old_to_old_slots_;
818   TypedSlotSet* typed_old_to_new_slots_;
819   TypedSlotSet* typed_old_to_old_slots_;
820 
821   SkipList* skip_list_;
822 
823   intptr_t write_barrier_counter_;
824 
825   // Assuming the initial allocation on a page is sequential,
826   // count highest number of bytes ever allocated on the page.
827   base::AtomicValue<intptr_t> high_water_mark_;
828 
829   base::Mutex* mutex_;
830 
831   base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
832 
833   // PagedSpace free-list statistics.
834   base::AtomicNumber<intptr_t> available_in_free_list_;
835   base::AtomicNumber<intptr_t> wasted_memory_;
836 
837   // next_chunk_ holds a pointer of type MemoryChunk
838   base::AtomicValue<MemoryChunk*> next_chunk_;
839   // prev_chunk_ holds a pointer of type MemoryChunk
840   base::AtomicValue<MemoryChunk*> prev_chunk_;
841 
842   FreeListCategory categories_[kNumberOfCategories];
843 
844   LocalArrayBufferTracker* local_tracker_;
845 
846  private:
InitializeReservedMemory()847   void InitializeReservedMemory() { reservation_.Reset(); }
848 
849   friend class MemoryAllocator;
850   friend class MemoryChunkValidator;
851 };
852 
853 // -----------------------------------------------------------------------------
854 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
855 //
856 // The only way to get a page pointer is by calling factory methods:
857 //   Page* p = Page::FromAddress(addr); or
858 //   Page* p = Page::FromTopOrLimit(top);
859 class Page : public MemoryChunk {
860  public:
861   static const intptr_t kCopyAllFlags = ~0;
862 
863   // Page flags copied from from-space to to-space when flipping semispaces.
864   static const intptr_t kCopyOnFlipFlagsMask =
865       (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
866       (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
867 
868   // Maximum object size that gets allocated into regular pages. Objects larger
869   // than that size are allocated in large object space and are never moved in
870   // memory. This also applies to new space allocation, since objects are never
871   // migrated from new space to large object space. Takes double alignment into
872   // account.
873   // TODO(hpayer): This limit should be way smaller but we currently have
874   // short living objects >256K.
875   static const int kMaxRegularHeapObjectSize = 600 * KB;
876 
877   static inline Page* ConvertNewToOld(Page* old_page, PagedSpace* new_owner);
878 
879   // Returns the page containing a given address. The address ranges
880   // from [page_addr .. page_addr + kPageSize[. This only works if the object
881   // is in fact in a page.
FromAddress(Address addr)882   static Page* FromAddress(Address addr) {
883     return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
884   }
885 
886   // Returns the page containing the address provided. The address can
887   // potentially point righter after the page. To be also safe for tagged values
888   // we subtract a hole word. The valid address ranges from
889   // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
FromAllocationAreaAddress(Address address)890   static Page* FromAllocationAreaAddress(Address address) {
891     return Page::FromAddress(address - kPointerSize);
892   }
893 
894   // Checks if address1 and address2 are on the same new space page.
OnSamePage(Address address1,Address address2)895   static bool OnSamePage(Address address1, Address address2) {
896     return Page::FromAddress(address1) == Page::FromAddress(address2);
897   }
898 
899   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address addr)900   static bool IsAlignedToPageSize(Address addr) {
901     return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
902   }
903 
IsAtObjectStart(Address addr)904   static bool IsAtObjectStart(Address addr) {
905     return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
906            kObjectStartOffset;
907   }
908 
909   inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
910 
911   // Create a Page object that is only used as anchor for the doubly-linked
912   // list of real pages.
Page(Space * owner)913   explicit Page(Space* owner) { InitializeAsAnchor(owner); }
914 
915   inline void MarkNeverAllocateForTesting();
916   inline void MarkEvacuationCandidate();
917   inline void ClearEvacuationCandidate();
918 
next_page()919   Page* next_page() { return static_cast<Page*>(next_chunk()); }
prev_page()920   Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
set_next_page(Page * page)921   void set_next_page(Page* page) { set_next_chunk(page); }
set_prev_page(Page * page)922   void set_prev_page(Page* page) { set_prev_chunk(page); }
923 
924   template <typename Callback>
ForAllFreeListCategories(Callback callback)925   inline void ForAllFreeListCategories(Callback callback) {
926     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
927       callback(&categories_[i]);
928     }
929   }
930 
931   // Returns the offset of a given address to this page.
Offset(Address a)932   inline int Offset(Address a) {
933     int offset = static_cast<int>(a - address());
934     return offset;
935   }
936 
937   // Returns the address for a given offset to the this page.
OffsetToAddress(int offset)938   Address OffsetToAddress(int offset) {
939     DCHECK_PAGE_OFFSET(offset);
940     return address() + offset;
941   }
942 
943   // WaitUntilSweepingCompleted only works when concurrent sweeping is in
944   // progress. In particular, when we know that right before this call a
945   // sweeper thread was sweeping this page.
WaitUntilSweepingCompleted()946   void WaitUntilSweepingCompleted() {
947     mutex_->Lock();
948     mutex_->Unlock();
949     DCHECK(SweepingDone());
950   }
951 
SweepingDone()952   bool SweepingDone() {
953     return concurrent_sweeping_state().Value() == kSweepingDone;
954   }
955 
956   void ResetFreeListStatistics();
957 
LiveBytesFromFreeList()958   int LiveBytesFromFreeList() {
959     return static_cast<int>(area_size() - wasted_memory() -
960                             available_in_free_list());
961   }
962 
free_list_category(FreeListCategoryType type)963   FreeListCategory* free_list_category(FreeListCategoryType type) {
964     return &categories_[type];
965   }
966 
is_anchor()967   bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
968 
wasted_memory()969   intptr_t wasted_memory() { return wasted_memory_.Value(); }
add_wasted_memory(intptr_t waste)970   void add_wasted_memory(intptr_t waste) { wasted_memory_.Increment(waste); }
available_in_free_list()971   intptr_t available_in_free_list() { return available_in_free_list_.Value(); }
add_available_in_free_list(intptr_t available)972   void add_available_in_free_list(intptr_t available) {
973     available_in_free_list_.Increment(available);
974   }
975 
976 #ifdef DEBUG
977   void Print();
978 #endif  // DEBUG
979 
980  private:
981   enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
982 
983   template <InitializationMode mode = kFreeMemory>
984   static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
985                                  Executability executable, PagedSpace* owner);
986   static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
987                                  Executability executable, SemiSpace* owner);
988 
989   inline void InitializeFreeListCategories();
990 
991   void InitializeAsAnchor(Space* owner);
992 
993   friend class MemoryAllocator;
994 };
995 
996 class LargePage : public MemoryChunk {
997  public:
GetObject()998   HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
999 
next_page()1000   inline LargePage* next_page() {
1001     return static_cast<LargePage*>(next_chunk());
1002   }
1003 
set_next_page(LargePage * page)1004   inline void set_next_page(LargePage* page) { set_next_chunk(page); }
1005 
1006   // A limit to guarantee that we do not overflow typed slot offset in
1007   // the old to old remembered set.
1008   // Note that this limit is higher than what assembler already imposes on
1009   // x64 and ia32 architectures.
1010   static const int kMaxCodePageSize = 512 * MB;
1011 
1012  private:
1013   static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
1014                                       Executability executable, Space* owner);
1015 
1016   friend class MemoryAllocator;
1017 };
1018 
1019 
1020 // ----------------------------------------------------------------------------
1021 // Space is the abstract superclass for all allocation spaces.
1022 class Space : public Malloced {
1023  public:
Space(Heap * heap,AllocationSpace id,Executability executable)1024   Space(Heap* heap, AllocationSpace id, Executability executable)
1025       : allocation_observers_(new List<AllocationObserver*>()),
1026         allocation_observers_paused_(false),
1027         heap_(heap),
1028         id_(id),
1029         executable_(executable),
1030         committed_(0),
1031         max_committed_(0) {}
1032 
~Space()1033   virtual ~Space() {}
1034 
heap()1035   Heap* heap() const { return heap_; }
1036 
1037   // Does the space need executable memory?
executable()1038   Executability executable() { return executable_; }
1039 
1040   // Identity used in error reporting.
identity()1041   AllocationSpace identity() { return id_; }
1042 
AddAllocationObserver(AllocationObserver * observer)1043   virtual void AddAllocationObserver(AllocationObserver* observer) {
1044     allocation_observers_->Add(observer);
1045   }
1046 
RemoveAllocationObserver(AllocationObserver * observer)1047   virtual void RemoveAllocationObserver(AllocationObserver* observer) {
1048     bool removed = allocation_observers_->RemoveElement(observer);
1049     USE(removed);
1050     DCHECK(removed);
1051   }
1052 
PauseAllocationObservers()1053   virtual void PauseAllocationObservers() {
1054     allocation_observers_paused_ = true;
1055   }
1056 
ResumeAllocationObservers()1057   virtual void ResumeAllocationObservers() {
1058     allocation_observers_paused_ = false;
1059   }
1060 
1061   void AllocationStep(Address soon_object, int size);
1062 
1063   // Return the total amount committed memory for this space, i.e., allocatable
1064   // memory and page headers.
CommittedMemory()1065   virtual intptr_t CommittedMemory() { return committed_; }
1066 
MaximumCommittedMemory()1067   virtual intptr_t MaximumCommittedMemory() { return max_committed_; }
1068 
1069   // Returns allocated size.
1070   virtual intptr_t Size() = 0;
1071 
1072   // Returns size of objects. Can differ from the allocated size
1073   // (e.g. see LargeObjectSpace).
SizeOfObjects()1074   virtual intptr_t SizeOfObjects() { return Size(); }
1075 
1076   // Approximate amount of physical memory committed for this space.
1077   virtual size_t CommittedPhysicalMemory() = 0;
1078 
1079   // Return the available bytes without growing.
1080   virtual intptr_t Available() = 0;
1081 
RoundSizeDownToObjectAlignment(int size)1082   virtual int RoundSizeDownToObjectAlignment(int size) {
1083     if (id_ == CODE_SPACE) {
1084       return RoundDown(size, kCodeAlignment);
1085     } else {
1086       return RoundDown(size, kPointerSize);
1087     }
1088   }
1089 
AccountCommitted(intptr_t bytes)1090   void AccountCommitted(intptr_t bytes) {
1091     DCHECK_GE(bytes, 0);
1092     committed_ += bytes;
1093     if (committed_ > max_committed_) {
1094       max_committed_ = committed_;
1095     }
1096   }
1097 
AccountUncommitted(intptr_t bytes)1098   void AccountUncommitted(intptr_t bytes) {
1099     DCHECK_GE(bytes, 0);
1100     committed_ -= bytes;
1101     DCHECK_GE(committed_, 0);
1102   }
1103 
1104 #ifdef DEBUG
1105   virtual void Print() = 0;
1106 #endif
1107 
1108  protected:
1109   v8::base::SmartPointer<List<AllocationObserver*>> allocation_observers_;
1110   bool allocation_observers_paused_;
1111 
1112  private:
1113   Heap* heap_;
1114   AllocationSpace id_;
1115   Executability executable_;
1116 
1117   // Keeps track of committed memory in a space.
1118   intptr_t committed_;
1119   intptr_t max_committed_;
1120 };
1121 
1122 
1123 class MemoryChunkValidator {
1124   // Computed offsets should match the compiler generated ones.
1125   STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
1126   STATIC_ASSERT(MemoryChunk::kLiveBytesOffset ==
1127                 offsetof(MemoryChunk, live_byte_count_));
1128   STATIC_ASSERT(MemoryChunk::kOldToNewSlotsOffset ==
1129                 offsetof(MemoryChunk, old_to_new_slots_));
1130   STATIC_ASSERT(MemoryChunk::kWriteBarrierCounterOffset ==
1131                 offsetof(MemoryChunk, write_barrier_counter_));
1132 
1133   // Validate our estimates on the header size.
1134   STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1135   STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1136   STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
1137 };
1138 
1139 
1140 // ----------------------------------------------------------------------------
1141 // All heap objects containing executable code (code objects) must be allocated
1142 // from a 2 GB range of memory, so that they can call each other using 32-bit
1143 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
1144 // displacements cover the entire 4GB virtual address space.  On 64-bit
1145 // platforms, we support this using the CodeRange object, which reserves and
1146 // manages a range of virtual memory.
1147 class CodeRange {
1148  public:
1149   explicit CodeRange(Isolate* isolate);
~CodeRange()1150   ~CodeRange() { TearDown(); }
1151 
1152   // Reserves a range of virtual memory, but does not commit any of it.
1153   // Can only be called once, at heap initialization time.
1154   // Returns false on failure.
1155   bool SetUp(size_t requested_size);
1156 
valid()1157   bool valid() { return code_range_ != NULL; }
start()1158   Address start() {
1159     DCHECK(valid());
1160     return static_cast<Address>(code_range_->address());
1161   }
size()1162   size_t size() {
1163     DCHECK(valid());
1164     return code_range_->size();
1165   }
contains(Address address)1166   bool contains(Address address) {
1167     if (!valid()) return false;
1168     Address start = static_cast<Address>(code_range_->address());
1169     return start <= address && address < start + code_range_->size();
1170   }
1171 
1172   // Allocates a chunk of memory from the large-object portion of
1173   // the code range.  On platforms with no separate code range, should
1174   // not be called.
1175   MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
1176                                             const size_t commit_size,
1177                                             size_t* allocated);
1178   bool CommitRawMemory(Address start, size_t length);
1179   bool UncommitRawMemory(Address start, size_t length);
1180   void FreeRawMemory(Address buf, size_t length);
1181 
1182  private:
1183   class FreeBlock {
1184    public:
FreeBlock()1185     FreeBlock() : start(0), size(0) {}
FreeBlock(Address start_arg,size_t size_arg)1186     FreeBlock(Address start_arg, size_t size_arg)
1187         : start(start_arg), size(size_arg) {
1188       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1189       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1190     }
FreeBlock(void * start_arg,size_t size_arg)1191     FreeBlock(void* start_arg, size_t size_arg)
1192         : start(static_cast<Address>(start_arg)), size(size_arg) {
1193       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1194       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1195     }
1196 
1197     Address start;
1198     size_t size;
1199   };
1200 
1201   // Frees the range of virtual memory, and frees the data structures used to
1202   // manage it.
1203   void TearDown();
1204 
1205   // Finds a block on the allocation list that contains at least the
1206   // requested amount of memory.  If none is found, sorts and merges
1207   // the existing free memory blocks, and searches again.
1208   // If none can be found, returns false.
1209   bool GetNextAllocationBlock(size_t requested);
1210   // Compares the start addresses of two free blocks.
1211   static int CompareFreeBlockAddress(const FreeBlock* left,
1212                                      const FreeBlock* right);
1213   bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1214   void ReleaseBlock(const FreeBlock* block);
1215 
1216   Isolate* isolate_;
1217 
1218   // The reserved range of virtual memory that all code objects are put in.
1219   base::VirtualMemory* code_range_;
1220 
1221   // The global mutex guards free_list_ and allocation_list_ as GC threads may
1222   // access both lists concurrently to the main thread.
1223   base::Mutex code_range_mutex_;
1224 
1225   // Freed blocks of memory are added to the free list.  When the allocation
1226   // list is exhausted, the free list is sorted and merged to make the new
1227   // allocation list.
1228   List<FreeBlock> free_list_;
1229 
1230   // Memory is allocated from the free blocks on the allocation list.
1231   // The block at current_allocation_block_index_ is the current block.
1232   List<FreeBlock> allocation_list_;
1233   int current_allocation_block_index_;
1234 
1235   DISALLOW_COPY_AND_ASSIGN(CodeRange);
1236 };
1237 
1238 
1239 class SkipList {
1240  public:
SkipList()1241   SkipList() { Clear(); }
1242 
Clear()1243   void Clear() {
1244     for (int idx = 0; idx < kSize; idx++) {
1245       starts_[idx] = reinterpret_cast<Address>(-1);
1246     }
1247   }
1248 
StartFor(Address addr)1249   Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1250 
AddObject(Address addr,int size)1251   void AddObject(Address addr, int size) {
1252     int start_region = RegionNumber(addr);
1253     int end_region = RegionNumber(addr + size - kPointerSize);
1254     for (int idx = start_region; idx <= end_region; idx++) {
1255       if (starts_[idx] > addr) {
1256         starts_[idx] = addr;
1257       } else {
1258         // In the first region, there may already be an object closer to the
1259         // start of the region. Do not change the start in that case. If this
1260         // is not the first region, you probably added overlapping objects.
1261         DCHECK_EQ(start_region, idx);
1262       }
1263     }
1264   }
1265 
RegionNumber(Address addr)1266   static inline int RegionNumber(Address addr) {
1267     return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1268   }
1269 
Update(Address addr,int size)1270   static void Update(Address addr, int size) {
1271     Page* page = Page::FromAddress(addr);
1272     SkipList* list = page->skip_list();
1273     if (list == NULL) {
1274       list = new SkipList();
1275       page->set_skip_list(list);
1276     }
1277 
1278     list->AddObject(addr, size);
1279   }
1280 
1281  private:
1282   static const int kRegionSizeLog2 = 13;
1283   static const int kRegionSize = 1 << kRegionSizeLog2;
1284   static const int kSize = Page::kPageSize / kRegionSize;
1285 
1286   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1287 
1288   Address starts_[kSize];
1289 };
1290 
1291 
1292 // ----------------------------------------------------------------------------
1293 // A space acquires chunks of memory from the operating system. The memory
1294 // allocator allocated and deallocates pages for the paged heap spaces and large
1295 // pages for large object space.
1296 class MemoryAllocator {
1297  public:
1298   // Unmapper takes care of concurrently unmapping and uncommitting memory
1299   // chunks.
1300   class Unmapper {
1301    public:
1302     class UnmapFreeMemoryTask;
1303 
Unmapper(MemoryAllocator * allocator)1304     explicit Unmapper(MemoryAllocator* allocator)
1305         : allocator_(allocator),
1306           pending_unmapping_tasks_semaphore_(0),
1307           concurrent_unmapping_tasks_active_(0) {}
1308 
AddMemoryChunkSafe(MemoryChunk * chunk)1309     void AddMemoryChunkSafe(MemoryChunk* chunk) {
1310       if ((chunk->size() == Page::kPageSize) &&
1311           (chunk->executable() != EXECUTABLE)) {
1312         AddMemoryChunkSafe<kRegular>(chunk);
1313       } else {
1314         AddMemoryChunkSafe<kNonRegular>(chunk);
1315       }
1316     }
1317 
TryGetPooledMemoryChunkSafe()1318     MemoryChunk* TryGetPooledMemoryChunkSafe() {
1319       // Procedure:
1320       // (1) Try to get a chunk that was declared as pooled and already has
1321       // been uncommitted.
1322       // (2) Try to steal any memory chunk of kPageSize that would've been
1323       // unmapped.
1324       MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1325       if (chunk == nullptr) {
1326         chunk = GetMemoryChunkSafe<kRegular>();
1327         if (chunk != nullptr) {
1328           // For stolen chunks we need to manually free any allocated memory.
1329           chunk->ReleaseAllocatedMemory();
1330         }
1331       }
1332       return chunk;
1333     }
1334 
1335     void FreeQueuedChunks();
1336     bool WaitUntilCompleted();
1337 
1338    private:
1339     enum ChunkQueueType {
1340       kRegular,     // Pages of kPageSize that do not live in a CodeRange and
1341                     // can thus be used for stealing.
1342       kNonRegular,  // Large chunks and executable chunks.
1343       kPooled,      // Pooled chunks, already uncommited and ready for reuse.
1344       kNumberOfChunkQueues,
1345     };
1346 
1347     template <ChunkQueueType type>
AddMemoryChunkSafe(MemoryChunk * chunk)1348     void AddMemoryChunkSafe(MemoryChunk* chunk) {
1349       base::LockGuard<base::Mutex> guard(&mutex_);
1350       if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
1351         chunks_[type].push_back(chunk);
1352       } else {
1353         DCHECK_EQ(type, kRegular);
1354         delayed_regular_chunks_.push_back(chunk);
1355       }
1356     }
1357 
1358     template <ChunkQueueType type>
GetMemoryChunkSafe()1359     MemoryChunk* GetMemoryChunkSafe() {
1360       base::LockGuard<base::Mutex> guard(&mutex_);
1361       if (chunks_[type].empty()) return nullptr;
1362       MemoryChunk* chunk = chunks_[type].front();
1363       chunks_[type].pop_front();
1364       return chunk;
1365     }
1366 
1367     void ReconsiderDelayedChunks();
1368     void PerformFreeMemoryOnQueuedChunks();
1369 
1370     base::Mutex mutex_;
1371     MemoryAllocator* allocator_;
1372     std::list<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1373     // Delayed chunks cannot be processed in the current unmapping cycle because
1374     // of dependencies such as an active sweeper.
1375     // See MemoryAllocator::CanFreeMemoryChunk.
1376     std::list<MemoryChunk*> delayed_regular_chunks_;
1377     base::Semaphore pending_unmapping_tasks_semaphore_;
1378     intptr_t concurrent_unmapping_tasks_active_;
1379 
1380     friend class MemoryAllocator;
1381   };
1382 
1383   enum AllocationMode {
1384     kRegular,
1385     kPooled,
1386   };
1387   enum FreeMode {
1388     kFull,
1389     kPreFreeAndQueue,
1390     kPooledAndQueue,
1391   };
1392 
1393   explicit MemoryAllocator(Isolate* isolate);
1394 
1395   // Initializes its internal bookkeeping structures.
1396   // Max capacity of the total space and executable memory limit.
1397   bool SetUp(intptr_t max_capacity, intptr_t capacity_executable,
1398              intptr_t code_range_size);
1399 
1400   void TearDown();
1401 
1402   // Allocates a Page from the allocator. AllocationMode is used to indicate
1403   // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1404   // should be tried first.
1405   template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1406             typename SpaceType>
1407   Page* AllocatePage(intptr_t size, SpaceType* owner, Executability executable);
1408 
1409   LargePage* AllocateLargePage(intptr_t size, LargeObjectSpace* owner,
1410                                Executability executable);
1411 
1412   template <MemoryAllocator::FreeMode mode = kFull>
1413   void Free(MemoryChunk* chunk);
1414 
1415   bool CanFreeMemoryChunk(MemoryChunk* chunk);
1416 
1417   // Returns allocated spaces in bytes.
Size()1418   intptr_t Size() { return size_.Value(); }
1419 
1420   // Returns allocated executable spaces in bytes.
SizeExecutable()1421   intptr_t SizeExecutable() { return size_executable_.Value(); }
1422 
1423   // Returns the maximum available bytes of heaps.
Available()1424   intptr_t Available() {
1425     intptr_t size = Size();
1426     return capacity_ < size ? 0 : capacity_ - size;
1427   }
1428 
1429   // Returns the maximum available executable bytes of heaps.
AvailableExecutable()1430   intptr_t AvailableExecutable() {
1431     intptr_t executable_size = SizeExecutable();
1432     if (capacity_executable_ < executable_size) return 0;
1433     return capacity_executable_ - executable_size;
1434   }
1435 
1436   // Returns maximum available bytes that the old space can have.
MaxAvailable()1437   intptr_t MaxAvailable() {
1438     return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
1439   }
1440 
1441   // Returns an indication of whether a pointer is in a space that has
1442   // been allocated by this MemoryAllocator.
IsOutsideAllocatedSpace(const void * address)1443   V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1444     return address < lowest_ever_allocated_.Value() ||
1445            address >= highest_ever_allocated_.Value();
1446   }
1447 
1448 #ifdef DEBUG
1449   // Reports statistic info of the space.
1450   void ReportStatistics();
1451 #endif
1452 
1453   // Returns a MemoryChunk in which the memory region from commit_area_size to
1454   // reserve_area_size of the chunk area is reserved but not committed, it
1455   // could be committed later by calling MemoryChunk::CommitArea.
1456   MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
1457                              intptr_t commit_area_size,
1458                              Executability executable, Space* space);
1459 
1460   Address ReserveAlignedMemory(size_t requested, size_t alignment,
1461                                base::VirtualMemory* controller);
1462   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1463                                 size_t alignment, Executability executable,
1464                                 base::VirtualMemory* controller);
1465 
1466   bool CommitMemory(Address addr, size_t size, Executability executable);
1467 
1468   void FreeMemory(base::VirtualMemory* reservation, Executability executable);
1469   void FreeMemory(Address addr, size_t size, Executability executable);
1470 
1471   // Commit a contiguous block of memory from the initial chunk.  Assumes that
1472   // the address is not NULL, the size is greater than zero, and that the
1473   // block is contained in the initial chunk.  Returns true if it succeeded
1474   // and false otherwise.
1475   bool CommitBlock(Address start, size_t size, Executability executable);
1476 
1477   // Uncommit a contiguous block of memory [start..(start+size)[.
1478   // start is not NULL, the size is greater than zero, and the
1479   // block is contained in the initial chunk.  Returns true if it succeeded
1480   // and false otherwise.
1481   bool UncommitBlock(Address start, size_t size);
1482 
1483   // Zaps a contiguous block of memory [start..(start+size)[ thus
1484   // filling it up with a recognizable non-NULL bit pattern.
1485   void ZapBlock(Address start, size_t size);
1486 
1487   static int CodePageGuardStartOffset();
1488 
1489   static int CodePageGuardSize();
1490 
1491   static int CodePageAreaStartOffset();
1492 
1493   static int CodePageAreaEndOffset();
1494 
CodePageAreaSize()1495   static int CodePageAreaSize() {
1496     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1497   }
1498 
PageAreaSize(AllocationSpace space)1499   static int PageAreaSize(AllocationSpace space) {
1500     DCHECK_NE(LO_SPACE, space);
1501     return (space == CODE_SPACE) ? CodePageAreaSize()
1502                                  : Page::kAllocatableMemory;
1503   }
1504 
1505   MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1506                                               Address start, size_t commit_size,
1507                                               size_t reserved_size);
1508 
code_range()1509   CodeRange* code_range() { return code_range_; }
unmapper()1510   Unmapper* unmapper() { return &unmapper_; }
1511 
1512  private:
1513   // PreFree logically frees the object, i.e., it takes care of the size
1514   // bookkeeping and calls the allocation callback.
1515   void PreFreeMemory(MemoryChunk* chunk);
1516 
1517   // FreeMemory can be called concurrently when PreFree was executed before.
1518   void PerformFreeMemory(MemoryChunk* chunk);
1519 
1520   // See AllocatePage for public interface. Note that currently we only support
1521   // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1522   template <typename SpaceType>
1523   MemoryChunk* AllocatePagePooled(SpaceType* owner);
1524 
1525   Isolate* isolate_;
1526 
1527   CodeRange* code_range_;
1528 
1529   // Maximum space size in bytes.
1530   intptr_t capacity_;
1531   // Maximum subset of capacity_ that can be executable
1532   intptr_t capacity_executable_;
1533 
1534   // Allocated space size in bytes.
1535   base::AtomicNumber<intptr_t> size_;
1536   // Allocated executable space size in bytes.
1537   base::AtomicNumber<intptr_t> size_executable_;
1538 
1539   // We keep the lowest and highest addresses allocated as a quick way
1540   // of determining that pointers are outside the heap. The estimate is
1541   // conservative, i.e. not all addrsses in 'allocated' space are allocated
1542   // to our heap. The range is [lowest, highest[, inclusive on the low end
1543   // and exclusive on the high end.
1544   base::AtomicValue<void*> lowest_ever_allocated_;
1545   base::AtomicValue<void*> highest_ever_allocated_;
1546 
1547   // Initializes pages in a chunk. Returns the first page address.
1548   // This function and GetChunkId() are provided for the mark-compact
1549   // collector to rebuild page headers in the from space, which is
1550   // used as a marking stack and its page headers are destroyed.
1551   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1552                                PagedSpace* owner);
1553 
UpdateAllocatedSpaceLimits(void * low,void * high)1554   void UpdateAllocatedSpaceLimits(void* low, void* high) {
1555     // The use of atomic primitives does not guarantee correctness (wrt.
1556     // desired semantics) by default. The loop here ensures that we update the
1557     // values only if they did not change in between.
1558     void* ptr = nullptr;
1559     do {
1560       ptr = lowest_ever_allocated_.Value();
1561     } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1562     do {
1563       ptr = highest_ever_allocated_.Value();
1564     } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1565   }
1566 
1567   base::VirtualMemory last_chunk_;
1568   Unmapper unmapper_;
1569 
1570   friend class TestCodeRangeScope;
1571 
1572   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1573 };
1574 
1575 
1576 // -----------------------------------------------------------------------------
1577 // Interface for heap object iterator to be implemented by all object space
1578 // object iterators.
1579 //
1580 // NOTE: The space specific object iterators also implements the own next()
1581 //       method which is used to avoid using virtual functions
1582 //       iterating a specific space.
1583 
1584 class ObjectIterator : public Malloced {
1585  public:
~ObjectIterator()1586   virtual ~ObjectIterator() {}
1587   virtual HeapObject* Next() = 0;
1588 };
1589 
1590 template <class PAGE_TYPE>
1591 class PageIteratorImpl
1592     : public std::iterator<std::forward_iterator_tag, PAGE_TYPE> {
1593  public:
PageIteratorImpl(PAGE_TYPE * p)1594   explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE> & other)1595   PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
1596   PAGE_TYPE* operator*() { return p_; }
1597   bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1598     return rhs.p_ == p_;
1599   }
1600   bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1601     return rhs.p_ != p_;
1602   }
1603   inline PageIteratorImpl<PAGE_TYPE>& operator++();
1604   inline PageIteratorImpl<PAGE_TYPE> operator++(int);
1605 
1606  private:
1607   PAGE_TYPE* p_;
1608 };
1609 
1610 typedef PageIteratorImpl<Page> PageIterator;
1611 typedef PageIteratorImpl<LargePage> LargePageIterator;
1612 
1613 class PageRange {
1614  public:
1615   typedef PageIterator iterator;
PageRange(Page * begin,Page * end)1616   PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
PageRange(Page * page)1617   explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
begin()1618   iterator begin() { return iterator(begin_); }
end()1619   iterator end() { return iterator(end_); }
1620 
1621  private:
1622   Page* begin_;
1623   Page* end_;
1624 };
1625 
1626 // -----------------------------------------------------------------------------
1627 // Heap object iterator in new/old/map spaces.
1628 //
1629 // A HeapObjectIterator iterates objects from the bottom of the given space
1630 // to its top or from the bottom of the given page to its top.
1631 //
1632 // If objects are allocated in the page during iteration the iterator may
1633 // or may not iterate over those objects.  The caller must create a new
1634 // iterator in order to be sure to visit these new objects.
1635 class HeapObjectIterator : public ObjectIterator {
1636  public:
1637   // Creates a new object iterator in a given space.
1638   explicit HeapObjectIterator(PagedSpace* space);
1639   explicit HeapObjectIterator(Page* page);
1640 
1641   // Advance to the next object, skipping free spaces and other fillers and
1642   // skipping the special garbage section of which there is one per space.
1643   // Returns nullptr when the iteration has ended.
1644   inline HeapObject* Next() override;
1645 
1646  private:
1647   // Fast (inlined) path of next().
1648   inline HeapObject* FromCurrentPage();
1649 
1650   // Slow path of next(), goes into the next page.  Returns false if the
1651   // iteration has ended.
1652   bool AdvanceToNextPage();
1653 
1654   Address cur_addr_;  // Current iteration point.
1655   Address cur_end_;   // End iteration point.
1656   PagedSpace* space_;
1657   PageRange page_range_;
1658   PageRange::iterator current_page_;
1659 };
1660 
1661 
1662 // -----------------------------------------------------------------------------
1663 // A space has a circular list of pages. The next page can be accessed via
1664 // Page::next_page() call.
1665 
1666 // An abstraction of allocation and relocation pointers in a page-structured
1667 // space.
1668 class AllocationInfo {
1669  public:
AllocationInfo()1670   AllocationInfo() : top_(nullptr), limit_(nullptr) {}
AllocationInfo(Address top,Address limit)1671   AllocationInfo(Address top, Address limit) : top_(top), limit_(limit) {}
1672 
Reset(Address top,Address limit)1673   void Reset(Address top, Address limit) {
1674     set_top(top);
1675     set_limit(limit);
1676   }
1677 
INLINE(void set_top (Address top))1678   INLINE(void set_top(Address top)) {
1679     SLOW_DCHECK(top == NULL ||
1680                 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
1681     top_ = top;
1682   }
1683 
INLINE(Address top ())1684   INLINE(Address top()) const {
1685     SLOW_DCHECK(top_ == NULL ||
1686                 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1687     return top_;
1688   }
1689 
top_address()1690   Address* top_address() { return &top_; }
1691 
INLINE(void set_limit (Address limit))1692   INLINE(void set_limit(Address limit)) {
1693     limit_ = limit;
1694   }
1695 
INLINE(Address limit ())1696   INLINE(Address limit()) const {
1697     return limit_;
1698   }
1699 
limit_address()1700   Address* limit_address() { return &limit_; }
1701 
1702 #ifdef DEBUG
VerifyPagedAllocation()1703   bool VerifyPagedAllocation() {
1704     return (Page::FromAllocationAreaAddress(top_) ==
1705             Page::FromAllocationAreaAddress(limit_)) &&
1706            (top_ <= limit_);
1707   }
1708 #endif
1709 
1710  private:
1711   // Current allocation top.
1712   Address top_;
1713   // Current allocation limit.
1714   Address limit_;
1715 };
1716 
1717 
1718 // An abstraction of the accounting statistics of a page-structured space.
1719 //
1720 // The stats are only set by functions that ensure they stay balanced. These
1721 // functions increase or decrease one of the non-capacity stats in conjunction
1722 // with capacity, or else they always balance increases and decreases to the
1723 // non-capacity stats.
1724 class AllocationStats BASE_EMBEDDED {
1725  public:
AllocationStats()1726   AllocationStats() { Clear(); }
1727 
1728   // Zero out all the allocation statistics (i.e., no capacity).
Clear()1729   void Clear() {
1730     capacity_ = 0;
1731     max_capacity_ = 0;
1732     size_ = 0;
1733   }
1734 
ClearSize()1735   void ClearSize() { size_ = capacity_; }
1736 
1737   // Accessors for the allocation statistics.
Capacity()1738   intptr_t Capacity() { return capacity_; }
MaxCapacity()1739   intptr_t MaxCapacity() { return max_capacity_; }
Size()1740   intptr_t Size() {
1741     CHECK_GE(size_, 0);
1742     return size_;
1743   }
1744 
1745   // Grow the space by adding available bytes.  They are initially marked as
1746   // being in use (part of the size), but will normally be immediately freed,
1747   // putting them on the free list and removing them from size_.
ExpandSpace(int size_in_bytes)1748   void ExpandSpace(int size_in_bytes) {
1749     capacity_ += size_in_bytes;
1750     size_ += size_in_bytes;
1751     if (capacity_ > max_capacity_) {
1752       max_capacity_ = capacity_;
1753     }
1754     CHECK(size_ >= 0);
1755   }
1756 
1757   // Shrink the space by removing available bytes.  Since shrinking is done
1758   // during sweeping, bytes have been marked as being in use (part of the size)
1759   // and are hereby freed.
ShrinkSpace(int size_in_bytes)1760   void ShrinkSpace(int size_in_bytes) {
1761     capacity_ -= size_in_bytes;
1762     size_ -= size_in_bytes;
1763     CHECK_GE(size_, 0);
1764   }
1765 
1766   // Allocate from available bytes (available -> size).
AllocateBytes(intptr_t size_in_bytes)1767   void AllocateBytes(intptr_t size_in_bytes) {
1768     size_ += size_in_bytes;
1769     CHECK_GE(size_, 0);
1770   }
1771 
1772   // Free allocated bytes, making them available (size -> available).
DeallocateBytes(intptr_t size_in_bytes)1773   void DeallocateBytes(intptr_t size_in_bytes) {
1774     size_ -= size_in_bytes;
1775     CHECK_GE(size_, 0);
1776   }
1777 
1778   // Merge {other} into {this}.
Merge(const AllocationStats & other)1779   void Merge(const AllocationStats& other) {
1780     capacity_ += other.capacity_;
1781     size_ += other.size_;
1782     if (other.max_capacity_ > max_capacity_) {
1783       max_capacity_ = other.max_capacity_;
1784     }
1785     CHECK_GE(size_, 0);
1786   }
1787 
DecreaseCapacity(intptr_t size_in_bytes)1788   void DecreaseCapacity(intptr_t size_in_bytes) {
1789     capacity_ -= size_in_bytes;
1790     CHECK_GE(capacity_, 0);
1791     CHECK_GE(capacity_, size_);
1792   }
1793 
IncreaseCapacity(intptr_t size_in_bytes)1794   void IncreaseCapacity(intptr_t size_in_bytes) { capacity_ += size_in_bytes; }
1795 
1796  private:
1797   // |capacity_|: The number of object-area bytes (i.e., not including page
1798   // bookkeeping structures) currently in the space.
1799   intptr_t capacity_;
1800 
1801   // |max_capacity_|: The maximum capacity ever observed.
1802   intptr_t max_capacity_;
1803 
1804   // |size_|: The number of allocated bytes.
1805   intptr_t size_;
1806 };
1807 
1808 // A free list maintaining free blocks of memory. The free list is organized in
1809 // a way to encourage objects allocated around the same time to be near each
1810 // other. The normal way to allocate is intended to be by bumping a 'top'
1811 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
1812 // find a new space to allocate from. This is done with the free list, which is
1813 // divided up into rough categories to cut down on waste. Having finer
1814 // categories would scatter allocation more.
1815 
1816 // The free list is organized in categories as follows:
1817 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1818 //   allocation, when categories >= small do not have entries anymore.
1819 // 11-31 words (tiny): The tiny blocks are only used for allocation, when
1820 //   categories >= small do not have entries anymore.
1821 // 32-255 words (small): Used for allocating free space between 1-31 words in
1822 //   size.
1823 // 256-2047 words (medium): Used for allocating free space between 32-255 words
1824 //   in size.
1825 // 1048-16383 words (large): Used for allocating free space between 256-2047
1826 //   words in size.
1827 // At least 16384 words (huge): This list is for objects of 2048 words or
1828 //   larger. Empty pages are also added to this list.
1829 class FreeList {
1830  public:
1831   // This method returns how much memory can be allocated after freeing
1832   // maximum_freed memory.
GuaranteedAllocatable(int maximum_freed)1833   static inline int GuaranteedAllocatable(int maximum_freed) {
1834     if (maximum_freed <= kTiniestListMax) {
1835       // Since we are not iterating over all list entries, we cannot guarantee
1836       // that we can find the maximum freed block in that free list.
1837       return 0;
1838     } else if (maximum_freed <= kTinyListMax) {
1839       return kTinyAllocationMax;
1840     } else if (maximum_freed <= kSmallListMax) {
1841       return kSmallAllocationMax;
1842     } else if (maximum_freed <= kMediumListMax) {
1843       return kMediumAllocationMax;
1844     } else if (maximum_freed <= kLargeListMax) {
1845       return kLargeAllocationMax;
1846     }
1847     return maximum_freed;
1848   }
1849 
1850   explicit FreeList(PagedSpace* owner);
1851 
1852   // Adds a node on the free list. The block of size {size_in_bytes} starting
1853   // at {start} is placed on the free list. The return value is the number of
1854   // bytes that were not added to the free list, because they freed memory block
1855   // was too small. Bookkeeping information will be written to the block, i.e.,
1856   // its contents will be destroyed. The start address should be word aligned,
1857   // and the size should be a non-zero multiple of the word size.
1858   int Free(Address start, int size_in_bytes, FreeMode mode);
1859 
1860   // Allocate a block of size {size_in_bytes} from the free list. The block is
1861   // unitialized. A failure is returned if no block is available. The size
1862   // should be a non-zero multiple of the word size.
1863   MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
1864 
1865   // Clear the free list.
1866   void Reset();
1867 
ResetStats()1868   void ResetStats() {
1869     wasted_bytes_.SetValue(0);
1870     ForAllFreeListCategories(
1871         [](FreeListCategory* category) { category->ResetStats(); });
1872   }
1873 
1874   // Return the number of bytes available on the free list.
Available()1875   intptr_t Available() {
1876     intptr_t available = 0;
1877     ForAllFreeListCategories([&available](FreeListCategory* category) {
1878       available += category->available();
1879     });
1880     return available;
1881   }
1882 
IsEmpty()1883   bool IsEmpty() {
1884     bool empty = true;
1885     ForAllFreeListCategories([&empty](FreeListCategory* category) {
1886       if (!category->is_empty()) empty = false;
1887     });
1888     return empty;
1889   }
1890 
1891   // Used after booting the VM.
1892   void RepairLists(Heap* heap);
1893 
1894   intptr_t EvictFreeListItems(Page* page);
1895   bool ContainsPageFreeListItems(Page* page);
1896 
owner()1897   PagedSpace* owner() { return owner_; }
wasted_bytes()1898   intptr_t wasted_bytes() { return wasted_bytes_.Value(); }
1899 
1900   template <typename Callback>
ForAllFreeListCategories(FreeListCategoryType type,Callback callback)1901   void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1902     FreeListCategory* current = categories_[type];
1903     while (current != nullptr) {
1904       FreeListCategory* next = current->next();
1905       callback(current);
1906       current = next;
1907     }
1908   }
1909 
1910   template <typename Callback>
ForAllFreeListCategories(Callback callback)1911   void ForAllFreeListCategories(Callback callback) {
1912     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1913       ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1914     }
1915   }
1916 
1917   bool AddCategory(FreeListCategory* category);
1918   void RemoveCategory(FreeListCategory* category);
1919   void PrintCategories(FreeListCategoryType type);
1920 
1921 #ifdef DEBUG
1922   intptr_t SumFreeLists();
1923   bool IsVeryLong();
1924 #endif
1925 
1926  private:
1927   class FreeListCategoryIterator {
1928    public:
FreeListCategoryIterator(FreeList * free_list,FreeListCategoryType type)1929     FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1930         : current_(free_list->categories_[type]) {}
1931 
HasNext()1932     bool HasNext() { return current_ != nullptr; }
1933 
Next()1934     FreeListCategory* Next() {
1935       DCHECK(HasNext());
1936       FreeListCategory* tmp = current_;
1937       current_ = current_->next();
1938       return tmp;
1939     }
1940 
1941    private:
1942     FreeListCategory* current_;
1943   };
1944 
1945   // The size range of blocks, in bytes.
1946   static const int kMinBlockSize = 3 * kPointerSize;
1947   static const int kMaxBlockSize = Page::kAllocatableMemory;
1948 
1949   static const int kTiniestListMax = 0xa * kPointerSize;
1950   static const int kTinyListMax = 0x1f * kPointerSize;
1951   static const int kSmallListMax = 0xff * kPointerSize;
1952   static const int kMediumListMax = 0x7ff * kPointerSize;
1953   static const int kLargeListMax = 0x3fff * kPointerSize;
1954   static const int kTinyAllocationMax = kTiniestListMax;
1955   static const int kSmallAllocationMax = kTinyListMax;
1956   static const int kMediumAllocationMax = kSmallListMax;
1957   static const int kLargeAllocationMax = kMediumListMax;
1958 
1959   FreeSpace* FindNodeFor(int size_in_bytes, int* node_size);
1960 
1961   // Walks all available categories for a given |type| and tries to retrieve
1962   // a node. Returns nullptr if the category is empty.
1963   FreeSpace* FindNodeIn(FreeListCategoryType type, int* node_size);
1964 
1965   // Tries to retrieve a node from the first category in a given |type|.
1966   // Returns nullptr if the category is empty.
1967   FreeSpace* TryFindNodeIn(FreeListCategoryType type, int* node_size,
1968                            int minimum_size);
1969 
1970   // Searches a given |type| for a node of at least |minimum_size|.
1971   FreeSpace* SearchForNodeInList(FreeListCategoryType type, int* node_size,
1972                                  int minimum_size);
1973 
SelectFreeListCategoryType(size_t size_in_bytes)1974   FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1975     if (size_in_bytes <= kTiniestListMax) {
1976       return kTiniest;
1977     } else if (size_in_bytes <= kTinyListMax) {
1978       return kTiny;
1979     } else if (size_in_bytes <= kSmallListMax) {
1980       return kSmall;
1981     } else if (size_in_bytes <= kMediumListMax) {
1982       return kMedium;
1983     } else if (size_in_bytes <= kLargeListMax) {
1984       return kLarge;
1985     }
1986     return kHuge;
1987   }
1988 
1989   // The tiny categories are not used for fast allocation.
SelectFastAllocationFreeListCategoryType(size_t size_in_bytes)1990   FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1991       size_t size_in_bytes) {
1992     if (size_in_bytes <= kSmallAllocationMax) {
1993       return kSmall;
1994     } else if (size_in_bytes <= kMediumAllocationMax) {
1995       return kMedium;
1996     } else if (size_in_bytes <= kLargeAllocationMax) {
1997       return kLarge;
1998     }
1999     return kHuge;
2000   }
2001 
top(FreeListCategoryType type)2002   FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
2003 
2004   PagedSpace* owner_;
2005   base::AtomicNumber<intptr_t> wasted_bytes_;
2006   FreeListCategory* categories_[kNumberOfCategories];
2007 
2008   friend class FreeListCategory;
2009 
2010   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
2011 };
2012 
2013 
2014 class AllocationResult {
2015  public:
2016   // Implicit constructor from Object*.
AllocationResult(Object * object)2017   AllocationResult(Object* object)  // NOLINT
2018       : object_(object) {
2019     // AllocationResults can't return Smis, which are used to represent
2020     // failure and the space to retry in.
2021     CHECK(!object->IsSmi());
2022   }
2023 
AllocationResult()2024   AllocationResult() : object_(Smi::FromInt(NEW_SPACE)) {}
2025 
2026   static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
2027     return AllocationResult(space);
2028   }
2029 
IsRetry()2030   inline bool IsRetry() { return object_->IsSmi(); }
2031 
2032   template <typename T>
To(T ** obj)2033   bool To(T** obj) {
2034     if (IsRetry()) return false;
2035     *obj = T::cast(object_);
2036     return true;
2037   }
2038 
ToObjectChecked()2039   Object* ToObjectChecked() {
2040     CHECK(!IsRetry());
2041     return object_;
2042   }
2043 
2044   inline AllocationSpace RetrySpace();
2045 
2046  private:
AllocationResult(AllocationSpace space)2047   explicit AllocationResult(AllocationSpace space)
2048       : object_(Smi::FromInt(static_cast<int>(space))) {}
2049 
2050   Object* object_;
2051 };
2052 
2053 
2054 STATIC_ASSERT(sizeof(AllocationResult) == kPointerSize);
2055 
2056 
2057 // LocalAllocationBuffer represents a linear allocation area that is created
2058 // from a given {AllocationResult} and can be used to allocate memory without
2059 // synchronization.
2060 //
2061 // The buffer is properly closed upon destruction and reassignment.
2062 // Example:
2063 //   {
2064 //     AllocationResult result = ...;
2065 //     LocalAllocationBuffer a(heap, result, size);
2066 //     LocalAllocationBuffer b = a;
2067 //     CHECK(!a.IsValid());
2068 //     CHECK(b.IsValid());
2069 //     // {a} is invalid now and cannot be used for further allocations.
2070 //   }
2071 //   // Since {b} went out of scope, the LAB is closed, resulting in creating a
2072 //   // filler object for the remaining area.
2073 class LocalAllocationBuffer {
2074  public:
2075   // Indicates that a buffer cannot be used for allocations anymore. Can result
2076   // from either reassigning a buffer, or trying to construct it from an
2077   // invalid {AllocationResult}.
2078   static inline LocalAllocationBuffer InvalidBuffer();
2079 
2080   // Creates a new LAB from a given {AllocationResult}. Results in
2081   // InvalidBuffer if the result indicates a retry.
2082   static inline LocalAllocationBuffer FromResult(Heap* heap,
2083                                                  AllocationResult result,
2084                                                  intptr_t size);
2085 
~LocalAllocationBuffer()2086   ~LocalAllocationBuffer() { Close(); }
2087 
2088   // Convert to C++11 move-semantics once allowed by the style guide.
2089   LocalAllocationBuffer(const LocalAllocationBuffer& other);
2090   LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
2091 
2092   MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
2093       int size_in_bytes, AllocationAlignment alignment);
2094 
IsValid()2095   inline bool IsValid() { return allocation_info_.top() != nullptr; }
2096 
2097   // Try to merge LABs, which is only possible when they are adjacent in memory.
2098   // Returns true if the merge was successful, false otherwise.
2099   inline bool TryMerge(LocalAllocationBuffer* other);
2100 
2101  private:
2102   LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
2103 
2104   void Close();
2105 
2106   Heap* heap_;
2107   AllocationInfo allocation_info_;
2108 };
2109 
2110 class NewSpacePageRange {
2111  public:
2112   typedef PageRange::iterator iterator;
2113   inline NewSpacePageRange(Address start, Address limit);
begin()2114   iterator begin() { return range_.begin(); }
end()2115   iterator end() { return range_.end(); }
2116 
2117  private:
2118   PageRange range_;
2119 };
2120 
2121 class PagedSpace : public Space {
2122  public:
2123   typedef PageIterator iterator;
2124 
2125   static const intptr_t kCompactionMemoryWanted = 500 * KB;
2126 
2127   // Creates a space with an id.
2128   PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
2129 
~PagedSpace()2130   ~PagedSpace() override { TearDown(); }
2131 
2132   // Set up the space using the given address range of virtual memory (from
2133   // the memory allocator's initial chunk) if possible.  If the block of
2134   // addresses is not big enough to contain a single page-aligned page, a
2135   // fresh chunk will be allocated.
2136   bool SetUp();
2137 
2138   // Returns true if the space has been successfully set up and not
2139   // subsequently torn down.
2140   bool HasBeenSetUp();
2141 
2142   // Checks whether an object/address is in this space.
2143   inline bool Contains(Address a);
2144   inline bool Contains(Object* o);
2145   bool ContainsSlow(Address addr);
2146 
2147   // Given an address occupied by a live object, return that object if it is
2148   // in this space, or a Smi if it is not.  The implementation iterates over
2149   // objects in the page containing the address, the cost is linear in the
2150   // number of objects in the page.  It may be slow.
2151   Object* FindObject(Address addr);
2152 
2153   // During boot the free_space_map is created, and afterwards we may need
2154   // to write it into the free list nodes that were already created.
2155   void RepairFreeListsAfterDeserialization();
2156 
2157   // Prepares for a mark-compact GC.
2158   void PrepareForMarkCompact();
2159 
2160   // Current capacity without growing (Size() + Available()).
Capacity()2161   intptr_t Capacity() { return accounting_stats_.Capacity(); }
2162 
2163   // Approximate amount of physical memory committed for this space.
2164   size_t CommittedPhysicalMemory() override;
2165 
2166   void ResetFreeListStatistics();
2167 
2168   // Sets the capacity, the available space and the wasted space to zero.
2169   // The stats are rebuilt during sweeping by adding each page to the
2170   // capacity and the size when it is encountered.  As free spaces are
2171   // discovered during the sweeping they are subtracted from the size and added
2172   // to the available and wasted totals.
ClearStats()2173   void ClearStats() {
2174     accounting_stats_.ClearSize();
2175     free_list_.ResetStats();
2176     ResetFreeListStatistics();
2177   }
2178 
2179   // Available bytes without growing.  These are the bytes on the free list.
2180   // The bytes in the linear allocation area are not included in this total
2181   // because updating the stats would slow down allocation.  New pages are
2182   // immediately added to the free list so they show up here.
Available()2183   intptr_t Available() override { return free_list_.Available(); }
2184 
2185   // Allocated bytes in this space.  Garbage bytes that were not found due to
2186   // concurrent sweeping are counted as being allocated!  The bytes in the
2187   // current linear allocation area (between top and limit) are also counted
2188   // here.
Size()2189   intptr_t Size() override { return accounting_stats_.Size(); }
2190 
2191   // As size, but the bytes in lazily swept pages are estimated and the bytes
2192   // in the current linear allocation area are not included.
2193   intptr_t SizeOfObjects() override;
2194 
2195   // Wasted bytes in this space.  These are just the bytes that were thrown away
2196   // due to being too small to use for allocation.
Waste()2197   virtual intptr_t Waste() { return free_list_.wasted_bytes(); }
2198 
2199   // Returns the allocation pointer in this space.
top()2200   Address top() { return allocation_info_.top(); }
limit()2201   Address limit() { return allocation_info_.limit(); }
2202 
2203   // The allocation top address.
allocation_top_address()2204   Address* allocation_top_address() { return allocation_info_.top_address(); }
2205 
2206   // The allocation limit address.
allocation_limit_address()2207   Address* allocation_limit_address() {
2208     return allocation_info_.limit_address();
2209   }
2210 
2211   enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
2212 
2213   // Allocate the requested number of bytes in the space if possible, return a
2214   // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
2215   // to be manually updated later.
2216   MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
2217       int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
2218 
2219   MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
2220       int size_in_bytes);
2221 
2222   // Allocate the requested number of bytes in the space double aligned if
2223   // possible, return a failure object if not.
2224   MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
2225       int size_in_bytes, AllocationAlignment alignment);
2226 
2227   // Allocate the requested number of bytes in the space and consider allocation
2228   // alignment if needed.
2229   MUST_USE_RESULT inline AllocationResult AllocateRaw(
2230       int size_in_bytes, AllocationAlignment alignment);
2231 
2232   // Give a block of memory to the space's free list.  It might be added to
2233   // the free list or accounted as waste.
2234   // If add_to_freelist is false then just accounting stats are updated and
2235   // no attempt to add area to free list is made.
Free(Address start,int size_in_bytes)2236   int Free(Address start, int size_in_bytes) {
2237     int wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2238     accounting_stats_.DeallocateBytes(size_in_bytes);
2239     return size_in_bytes - wasted;
2240   }
2241 
UnaccountedFree(Address start,int size_in_bytes)2242   int UnaccountedFree(Address start, int size_in_bytes) {
2243     int wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2244     return size_in_bytes - wasted;
2245   }
2246 
ResetFreeList()2247   void ResetFreeList() { free_list_.Reset(); }
2248 
2249   // Set space allocation info.
SetTopAndLimit(Address top,Address limit)2250   void SetTopAndLimit(Address top, Address limit) {
2251     DCHECK(top == limit ||
2252            Page::FromAddress(top) == Page::FromAddress(limit - 1));
2253     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2254     allocation_info_.Reset(top, limit);
2255   }
2256 
2257   // Empty space allocation info, returning unused area to free list.
EmptyAllocationInfo()2258   void EmptyAllocationInfo() {
2259     // Mark the old linear allocation area with a free space map so it can be
2260     // skipped when scanning the heap.
2261     int old_linear_size = static_cast<int>(limit() - top());
2262     Free(top(), old_linear_size);
2263     SetTopAndLimit(NULL, NULL);
2264   }
2265 
Allocate(int bytes)2266   void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
2267 
2268   void IncreaseCapacity(int size);
2269 
2270   // Releases an unused page and shrinks the space.
2271   void ReleasePage(Page* page);
2272 
2273   // The dummy page that anchors the linked list of pages.
anchor()2274   Page* anchor() { return &anchor_; }
2275 
2276   // Collect code size related statistics
2277   void CollectCodeStatistics();
2278 
2279   // Reset code size related statistics
2280   static void ResetCodeAndMetadataStatistics(Isolate* isolate);
2281 
2282 #ifdef VERIFY_HEAP
2283   // Verify integrity of this space.
2284   virtual void Verify(ObjectVisitor* visitor);
2285 
2286   // Overridden by subclasses to verify space-specific object
2287   // properties (e.g., only maps or free-list nodes are in map space).
VerifyObject(HeapObject * obj)2288   virtual void VerifyObject(HeapObject* obj) {}
2289 #endif
2290 
2291 #ifdef DEBUG
2292   // Print meta info and objects in this space.
2293   void Print() override;
2294 
2295   // Reports statistics for the space
2296   void ReportStatistics();
2297 
2298   // Report code object related statistics
2299   static void ReportCodeStatistics(Isolate* isolate);
2300   static void ResetCodeStatistics(Isolate* isolate);
2301 #endif
2302 
FirstPage()2303   Page* FirstPage() { return anchor_.next_page(); }
LastPage()2304   Page* LastPage() { return anchor_.prev_page(); }
2305 
2306   void EvictEvacuationCandidatesFromLinearAllocationArea();
2307 
2308   bool CanExpand(size_t size);
2309 
2310   // Returns the number of total pages in this space.
2311   int CountTotalPages();
2312 
2313   // Return size of allocatable area on a page in this space.
AreaSize()2314   inline int AreaSize() { return area_size_; }
2315 
is_local()2316   virtual bool is_local() { return false; }
2317 
2318   // Merges {other} into the current space. Note that this modifies {other},
2319   // e.g., removes its bump pointer area and resets statistics.
2320   void MergeCompactionSpace(CompactionSpace* other);
2321 
2322   // Refills the free list from the corresponding free list filled by the
2323   // sweeper.
2324   virtual void RefillFreeList();
2325 
free_list()2326   FreeList* free_list() { return &free_list_; }
2327 
mutex()2328   base::Mutex* mutex() { return &space_mutex_; }
2329 
2330   inline void UnlinkFreeListCategories(Page* page);
2331   inline intptr_t RelinkFreeListCategories(Page* page);
2332 
begin()2333   iterator begin() { return iterator(anchor_.next_page()); }
end()2334   iterator end() { return iterator(&anchor_); }
2335 
2336  protected:
2337   // PagedSpaces that should be included in snapshots have different, i.e.,
2338   // smaller, initial pages.
snapshotable()2339   virtual bool snapshotable() { return true; }
2340 
HasPages()2341   bool HasPages() { return anchor_.next_page() != &anchor_; }
2342 
2343   // Cleans up the space, frees all pages in this space except those belonging
2344   // to the initial chunk, uncommits addresses in the initial chunk.
2345   void TearDown();
2346 
2347   // Expands the space by allocating a fixed number of pages. Returns false if
2348   // it cannot allocate requested number of pages from OS, or if the hard heap
2349   // size limit has been hit.
2350   bool Expand();
2351 
2352   // Generic fast case allocation function that tries linear allocation at the
2353   // address denoted by top in allocation_info_.
2354   inline HeapObject* AllocateLinearly(int size_in_bytes);
2355 
2356   // Generic fast case allocation function that tries aligned linear allocation
2357   // at the address denoted by top in allocation_info_. Writes the aligned
2358   // allocation size, which includes the filler size, to size_in_bytes.
2359   inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
2360                                              AllocationAlignment alignment);
2361 
2362   // If sweeping is still in progress try to sweep unswept pages. If that is
2363   // not successful, wait for the sweeper threads and re-try free-list
2364   // allocation.
2365   MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
2366       int size_in_bytes);
2367 
2368   // Slow path of AllocateRaw.  This function is space-dependent.
2369   MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2370 
2371   int area_size_;
2372 
2373   // Accounting information for this space.
2374   AllocationStats accounting_stats_;
2375 
2376   // The dummy page that anchors the double linked list of pages.
2377   Page anchor_;
2378 
2379   // The space's free list.
2380   FreeList free_list_;
2381 
2382   // Normal allocation information.
2383   AllocationInfo allocation_info_;
2384 
2385   // Mutex guarding any concurrent access to the space.
2386   base::Mutex space_mutex_;
2387 
2388   friend class IncrementalMarking;
2389   friend class MarkCompactCollector;
2390 
2391   // Used in cctest.
2392   friend class HeapTester;
2393 };
2394 
2395 
2396 class NumberAndSizeInfo BASE_EMBEDDED {
2397  public:
NumberAndSizeInfo()2398   NumberAndSizeInfo() : number_(0), bytes_(0) {}
2399 
number()2400   int number() const { return number_; }
increment_number(int num)2401   void increment_number(int num) { number_ += num; }
2402 
bytes()2403   int bytes() const { return bytes_; }
increment_bytes(int size)2404   void increment_bytes(int size) { bytes_ += size; }
2405 
clear()2406   void clear() {
2407     number_ = 0;
2408     bytes_ = 0;
2409   }
2410 
2411  private:
2412   int number_;
2413   int bytes_;
2414 };
2415 
2416 
2417 // HistogramInfo class for recording a single "bar" of a histogram.  This
2418 // class is used for collecting statistics to print to the log file.
2419 class HistogramInfo : public NumberAndSizeInfo {
2420  public:
HistogramInfo()2421   HistogramInfo() : NumberAndSizeInfo() {}
2422 
name()2423   const char* name() { return name_; }
set_name(const char * name)2424   void set_name(const char* name) { name_ = name; }
2425 
2426  private:
2427   const char* name_;
2428 };
2429 
2430 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2431 
2432 // -----------------------------------------------------------------------------
2433 // SemiSpace in young generation
2434 //
2435 // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2436 // The mark-compact collector  uses the memory of the first page in the from
2437 // space as a marking stack when tracing live objects.
2438 class SemiSpace : public Space {
2439  public:
2440   typedef PageIterator iterator;
2441 
2442   static void Swap(SemiSpace* from, SemiSpace* to);
2443 
SemiSpace(Heap * heap,SemiSpaceId semispace)2444   SemiSpace(Heap* heap, SemiSpaceId semispace)
2445       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2446         current_capacity_(0),
2447         maximum_capacity_(0),
2448         minimum_capacity_(0),
2449         age_mark_(nullptr),
2450         committed_(false),
2451         id_(semispace),
2452         anchor_(this),
2453         current_page_(nullptr),
2454         pages_used_(0) {}
2455 
2456   inline bool Contains(HeapObject* o);
2457   inline bool Contains(Object* o);
2458   inline bool ContainsSlow(Address a);
2459 
2460   void SetUp(int initial_capacity, int maximum_capacity);
2461   void TearDown();
HasBeenSetUp()2462   bool HasBeenSetUp() { return maximum_capacity_ != 0; }
2463 
2464   bool Commit();
2465   bool Uncommit();
is_committed()2466   bool is_committed() { return committed_; }
2467 
2468   // Grow the semispace to the new capacity.  The new capacity requested must
2469   // be larger than the current capacity and less than the maximum capacity.
2470   bool GrowTo(int new_capacity);
2471 
2472   // Shrinks the semispace to the new capacity.  The new capacity requested
2473   // must be more than the amount of used memory in the semispace and less
2474   // than the current capacity.
2475   bool ShrinkTo(int new_capacity);
2476 
2477   bool EnsureCurrentCapacity();
2478 
2479   // Returns the start address of the first page of the space.
space_start()2480   Address space_start() {
2481     DCHECK_NE(anchor_.next_page(), anchor());
2482     return anchor_.next_page()->area_start();
2483   }
2484 
first_page()2485   Page* first_page() { return anchor_.next_page(); }
current_page()2486   Page* current_page() { return current_page_; }
pages_used()2487   int pages_used() { return pages_used_; }
2488 
2489   // Returns one past the end address of the space.
space_end()2490   Address space_end() { return anchor_.prev_page()->area_end(); }
2491 
2492   // Returns the start address of the current page of the space.
page_low()2493   Address page_low() { return current_page_->area_start(); }
2494 
2495   // Returns one past the end address of the current page of the space.
page_high()2496   Address page_high() { return current_page_->area_end(); }
2497 
AdvancePage()2498   bool AdvancePage() {
2499     Page* next_page = current_page_->next_page();
2500     // We cannot expand if we reached the maximum number of pages already. Note
2501     // that we need to account for the next page already for this check as we
2502     // could potentially fill the whole page after advancing.
2503     const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2504     if (next_page == anchor() || reached_max_pages) {
2505       return false;
2506     }
2507     current_page_ = next_page;
2508     pages_used_++;
2509     return true;
2510   }
2511 
2512   // Resets the space to using the first page.
2513   void Reset();
2514 
2515   void RemovePage(Page* page);
2516   void PrependPage(Page* page);
2517 
2518   // Age mark accessors.
age_mark()2519   Address age_mark() { return age_mark_; }
2520   void set_age_mark(Address mark);
2521 
2522   // Returns the current capacity of the semispace.
current_capacity()2523   int current_capacity() { return current_capacity_; }
2524 
2525   // Returns the maximum capacity of the semispace.
maximum_capacity()2526   int maximum_capacity() { return maximum_capacity_; }
2527 
2528   // Returns the initial capacity of the semispace.
minimum_capacity()2529   int minimum_capacity() { return minimum_capacity_; }
2530 
id()2531   SemiSpaceId id() { return id_; }
2532 
2533   // Approximate amount of physical memory committed for this space.
2534   size_t CommittedPhysicalMemory() override;
2535 
2536   // If we don't have these here then SemiSpace will be abstract.  However
2537   // they should never be called:
2538 
Size()2539   intptr_t Size() override {
2540     UNREACHABLE();
2541     return 0;
2542   }
2543 
SizeOfObjects()2544   intptr_t SizeOfObjects() override { return Size(); }
2545 
Available()2546   intptr_t Available() override {
2547     UNREACHABLE();
2548     return 0;
2549   }
2550 
2551 #ifdef DEBUG
2552   void Print() override;
2553   // Validate a range of of addresses in a SemiSpace.
2554   // The "from" address must be on a page prior to the "to" address,
2555   // in the linked page order, or it must be earlier on the same page.
2556   static void AssertValidRange(Address from, Address to);
2557 #else
2558   // Do nothing.
AssertValidRange(Address from,Address to)2559   inline static void AssertValidRange(Address from, Address to) {}
2560 #endif
2561 
2562 #ifdef VERIFY_HEAP
2563   virtual void Verify();
2564 #endif
2565 
begin()2566   iterator begin() { return iterator(anchor_.next_page()); }
end()2567   iterator end() { return iterator(anchor()); }
2568 
2569  private:
2570   void RewindPages(Page* start, int num_pages);
2571 
anchor()2572   inline Page* anchor() { return &anchor_; }
max_pages()2573   inline int max_pages() { return current_capacity_ / Page::kPageSize; }
2574 
2575   // Copies the flags into the masked positions on all pages in the space.
2576   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2577 
2578   // The currently committed space capacity.
2579   int current_capacity_;
2580 
2581   // The maximum capacity that can be used by this space. A space cannot grow
2582   // beyond that size.
2583   int maximum_capacity_;
2584 
2585   // The minimum capacity for the space. A space cannot shrink below this size.
2586   int minimum_capacity_;
2587 
2588   // Used to govern object promotion during mark-compact collection.
2589   Address age_mark_;
2590 
2591   bool committed_;
2592   SemiSpaceId id_;
2593 
2594   Page anchor_;
2595   Page* current_page_;
2596   int pages_used_;
2597 
2598   friend class NewSpace;
2599   friend class SemiSpaceIterator;
2600 };
2601 
2602 
2603 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2604 // semispace of the heap's new space.  It iterates over the objects in the
2605 // semispace from a given start address (defaulting to the bottom of the
2606 // semispace) to the top of the semispace.  New objects allocated after the
2607 // iterator is created are not iterated.
2608 class SemiSpaceIterator : public ObjectIterator {
2609  public:
2610   // Create an iterator over the allocated objects in the given to-space.
2611   explicit SemiSpaceIterator(NewSpace* space);
2612 
2613   inline HeapObject* Next() override;
2614 
2615  private:
2616   void Initialize(Address start, Address end);
2617 
2618   // The current iteration point.
2619   Address current_;
2620   // The end of iteration.
2621   Address limit_;
2622 };
2623 
2624 // -----------------------------------------------------------------------------
2625 // The young generation space.
2626 //
2627 // The new space consists of a contiguous pair of semispaces.  It simply
2628 // forwards most functions to the appropriate semispace.
2629 
2630 class NewSpace : public Space {
2631  public:
2632   typedef PageIterator iterator;
2633 
NewSpace(Heap * heap)2634   explicit NewSpace(Heap* heap)
2635       : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2636         to_space_(heap, kToSpace),
2637         from_space_(heap, kFromSpace),
2638         reservation_(),
2639         top_on_previous_step_(0),
2640         allocated_histogram_(nullptr),
2641         promoted_histogram_(nullptr) {}
2642 
2643   inline bool Contains(HeapObject* o);
2644   inline bool ContainsSlow(Address a);
2645   inline bool Contains(Object* o);
2646 
2647   bool SetUp(int initial_semispace_capacity, int max_semispace_capacity);
2648 
2649   // Tears down the space.  Heap memory was not allocated by the space, so it
2650   // is not deallocated here.
2651   void TearDown();
2652 
2653   // True if the space has been set up but not torn down.
HasBeenSetUp()2654   bool HasBeenSetUp() {
2655     return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2656   }
2657 
2658   // Flip the pair of spaces.
2659   void Flip();
2660 
2661   // Grow the capacity of the semispaces.  Assumes that they are not at
2662   // their maximum capacity.
2663   void Grow();
2664 
2665   // Shrink the capacity of the semispaces.
2666   void Shrink();
2667 
2668   // Return the allocated bytes in the active semispace.
Size()2669   intptr_t Size() override {
2670     return to_space_.pages_used() * Page::kAllocatableMemory +
2671            static_cast<int>(top() - to_space_.page_low());
2672   }
2673 
2674   // The same, but returning an int.  We have to have the one that returns
2675   // intptr_t because it is inherited, but if we know we are dealing with the
2676   // new space, which can't get as big as the other spaces then this is useful:
SizeAsInt()2677   int SizeAsInt() { return static_cast<int>(Size()); }
2678 
2679   // Return the allocatable capacity of a semispace.
Capacity()2680   intptr_t Capacity() {
2681     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2682     return (to_space_.current_capacity() / Page::kPageSize) *
2683            Page::kAllocatableMemory;
2684   }
2685 
2686   // Return the current size of a semispace, allocatable and non-allocatable
2687   // memory.
TotalCapacity()2688   intptr_t TotalCapacity() {
2689     DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2690     return to_space_.current_capacity();
2691   }
2692 
2693   // Committed memory for NewSpace is the committed memory of both semi-spaces
2694   // combined.
CommittedMemory()2695   intptr_t CommittedMemory() override {
2696     return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2697   }
2698 
MaximumCommittedMemory()2699   intptr_t MaximumCommittedMemory() override {
2700     return from_space_.MaximumCommittedMemory() +
2701            to_space_.MaximumCommittedMemory();
2702   }
2703 
2704   // Approximate amount of physical memory committed for this space.
2705   size_t CommittedPhysicalMemory() override;
2706 
2707   // Return the available bytes without growing.
Available()2708   intptr_t Available() override { return Capacity() - Size(); }
2709 
AllocatedSinceLastGC()2710   size_t AllocatedSinceLastGC() {
2711     bool seen_age_mark = false;
2712     Address age_mark = to_space_.age_mark();
2713     Page* current_page = to_space_.first_page();
2714     Page* age_mark_page = Page::FromAddress(age_mark);
2715     Page* last_page = Page::FromAddress(top() - kPointerSize);
2716     if (age_mark_page == last_page) {
2717       if (top() - age_mark >= 0) {
2718         return top() - age_mark;
2719       }
2720       // Top was reset at some point, invalidating this metric.
2721       return 0;
2722     }
2723     while (current_page != last_page) {
2724       if (current_page == age_mark_page) {
2725         seen_age_mark = true;
2726         break;
2727       }
2728       current_page = current_page->next_page();
2729     }
2730     if (!seen_age_mark) {
2731       // Top was reset at some point, invalidating this metric.
2732       return 0;
2733     }
2734     intptr_t allocated = age_mark_page->area_end() - age_mark;
2735     DCHECK_EQ(current_page, age_mark_page);
2736     current_page = age_mark_page->next_page();
2737     while (current_page != last_page) {
2738       allocated += Page::kAllocatableMemory;
2739       current_page = current_page->next_page();
2740     }
2741     allocated += top() - current_page->area_start();
2742     DCHECK_LE(0, allocated);
2743     DCHECK_LE(allocated, Size());
2744     return static_cast<size_t>(allocated);
2745   }
2746 
MovePageFromSpaceToSpace(Page * page)2747   void MovePageFromSpaceToSpace(Page* page) {
2748     DCHECK(page->InFromSpace());
2749     from_space_.RemovePage(page);
2750     to_space_.PrependPage(page);
2751   }
2752 
2753   bool Rebalance();
2754 
2755   // Return the maximum capacity of a semispace.
MaximumCapacity()2756   int MaximumCapacity() {
2757     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2758     return to_space_.maximum_capacity();
2759   }
2760 
IsAtMaximumCapacity()2761   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2762 
2763   // Returns the initial capacity of a semispace.
InitialTotalCapacity()2764   int InitialTotalCapacity() {
2765     DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2766     return to_space_.minimum_capacity();
2767   }
2768 
2769   // Return the address of the allocation pointer in the active semispace.
top()2770   Address top() {
2771     DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2772     return allocation_info_.top();
2773   }
2774 
2775   // Return the address of the allocation pointer limit in the active semispace.
limit()2776   Address limit() {
2777     DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2778     return allocation_info_.limit();
2779   }
2780 
2781   // Return the address of the first object in the active semispace.
bottom()2782   Address bottom() { return to_space_.space_start(); }
2783 
2784   // Get the age mark of the inactive semispace.
age_mark()2785   Address age_mark() { return from_space_.age_mark(); }
2786   // Set the age mark in the active semispace.
set_age_mark(Address mark)2787   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2788 
2789   // The allocation top and limit address.
allocation_top_address()2790   Address* allocation_top_address() { return allocation_info_.top_address(); }
2791 
2792   // The allocation limit address.
allocation_limit_address()2793   Address* allocation_limit_address() {
2794     return allocation_info_.limit_address();
2795   }
2796 
2797   MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2798       int size_in_bytes, AllocationAlignment alignment));
2799 
2800   MUST_USE_RESULT INLINE(
2801       AllocationResult AllocateRawUnaligned(int size_in_bytes));
2802 
2803   MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2804       int size_in_bytes, AllocationAlignment alignment));
2805 
2806   MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
2807       int size_in_bytes, AllocationAlignment alignment);
2808 
2809   // Reset the allocation pointer to the beginning of the active semispace.
2810   void ResetAllocationInfo();
2811 
2812   // When inline allocation stepping is active, either because of incremental
2813   // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2814   // inline allocation every once in a while. This is done by setting
2815   // allocation_info_.limit to be lower than the actual limit and and increasing
2816   // it in steps to guarantee that the observers are notified periodically.
2817   void UpdateInlineAllocationLimit(int size_in_bytes);
2818 
DisableInlineAllocationSteps()2819   void DisableInlineAllocationSteps() {
2820     top_on_previous_step_ = 0;
2821     UpdateInlineAllocationLimit(0);
2822   }
2823 
2824   // Allows observation of inline allocation. The observer->Step() method gets
2825   // called after every step_size bytes have been allocated (approximately).
2826   // This works by adjusting the allocation limit to a lower value and adjusting
2827   // it after each step.
2828   void AddAllocationObserver(AllocationObserver* observer) override;
2829 
2830   void RemoveAllocationObserver(AllocationObserver* observer) override;
2831 
2832   // Get the extent of the inactive semispace (for use as a marking stack,
2833   // or to zap it). Notice: space-addresses are not necessarily on the
2834   // same page, so FromSpaceStart() might be above FromSpaceEnd().
FromSpacePageLow()2835   Address FromSpacePageLow() { return from_space_.page_low(); }
FromSpacePageHigh()2836   Address FromSpacePageHigh() { return from_space_.page_high(); }
FromSpaceStart()2837   Address FromSpaceStart() { return from_space_.space_start(); }
FromSpaceEnd()2838   Address FromSpaceEnd() { return from_space_.space_end(); }
2839 
2840   // Get the extent of the active semispace's pages' memory.
ToSpaceStart()2841   Address ToSpaceStart() { return to_space_.space_start(); }
ToSpaceEnd()2842   Address ToSpaceEnd() { return to_space_.space_end(); }
2843 
2844   inline bool ToSpaceContainsSlow(Address a);
2845   inline bool FromSpaceContainsSlow(Address a);
2846   inline bool ToSpaceContains(Object* o);
2847   inline bool FromSpaceContains(Object* o);
2848 
2849   // Try to switch the active semispace to a new, empty, page.
2850   // Returns false if this isn't possible or reasonable (i.e., there
2851   // are no pages, or the current page is already empty), or true
2852   // if successful.
2853   bool AddFreshPage();
2854   bool AddFreshPageSynchronized();
2855 
2856 #ifdef VERIFY_HEAP
2857   // Verify the active semispace.
2858   virtual void Verify();
2859 #endif
2860 
2861 #ifdef DEBUG
2862   // Print the active semispace.
Print()2863   void Print() override { to_space_.Print(); }
2864 #endif
2865 
2866   // Iterates the active semispace to collect statistics.
2867   void CollectStatistics();
2868   // Reports previously collected statistics of the active semispace.
2869   void ReportStatistics();
2870   // Clears previously collected statistics.
2871   void ClearHistograms();
2872 
2873   // Record the allocation or promotion of a heap object.  Note that we don't
2874   // record every single allocation, but only those that happen in the
2875   // to space during a scavenge GC.
2876   void RecordAllocation(HeapObject* obj);
2877   void RecordPromotion(HeapObject* obj);
2878 
2879   // Return whether the operation succeded.
CommitFromSpaceIfNeeded()2880   bool CommitFromSpaceIfNeeded() {
2881     if (from_space_.is_committed()) return true;
2882     return from_space_.Commit();
2883   }
2884 
UncommitFromSpace()2885   bool UncommitFromSpace() {
2886     if (!from_space_.is_committed()) return true;
2887     return from_space_.Uncommit();
2888   }
2889 
IsFromSpaceCommitted()2890   bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2891 
active_space()2892   SemiSpace* active_space() { return &to_space_; }
2893 
2894   void PauseAllocationObservers() override;
2895   void ResumeAllocationObservers() override;
2896 
begin()2897   iterator begin() { return to_space_.begin(); }
end()2898   iterator end() { return to_space_.end(); }
2899 
2900  private:
2901   // Update allocation info to match the current to-space page.
2902   void UpdateAllocationInfo();
2903 
2904   base::Mutex mutex_;
2905 
2906   // The semispaces.
2907   SemiSpace to_space_;
2908   SemiSpace from_space_;
2909   base::VirtualMemory reservation_;
2910 
2911   // Allocation pointer and limit for normal allocation and allocation during
2912   // mark-compact collection.
2913   AllocationInfo allocation_info_;
2914 
2915   Address top_on_previous_step_;
2916 
2917   HistogramInfo* allocated_histogram_;
2918   HistogramInfo* promoted_histogram_;
2919 
2920   bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
2921 
2922   // If we are doing inline allocation in steps, this method performs the 'step'
2923   // operation. top is the memory address of the bump pointer at the last
2924   // inline allocation (i.e. it determines the numbers of bytes actually
2925   // allocated since the last step.) new_top is the address of the bump pointer
2926   // where the next byte is going to be allocated from. top and new_top may be
2927   // different when we cross a page boundary or reset the space.
2928   void InlineAllocationStep(Address top, Address new_top, Address soon_object,
2929                             size_t size);
2930   intptr_t GetNextInlineAllocationStepSize();
2931   void StartNextInlineAllocationStep();
2932 
2933   friend class SemiSpaceIterator;
2934 };
2935 
2936 class PauseAllocationObserversScope {
2937  public:
2938   explicit PauseAllocationObserversScope(Heap* heap);
2939   ~PauseAllocationObserversScope();
2940 
2941  private:
2942   Heap* heap_;
2943   DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
2944 };
2945 
2946 // -----------------------------------------------------------------------------
2947 // Compaction space that is used temporarily during compaction.
2948 
2949 class CompactionSpace : public PagedSpace {
2950  public:
CompactionSpace(Heap * heap,AllocationSpace id,Executability executable)2951   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2952       : PagedSpace(heap, id, executable) {}
2953 
is_local()2954   bool is_local() override { return true; }
2955 
2956  protected:
2957   // The space is temporary and not included in any snapshots.
snapshotable()2958   bool snapshotable() override { return false; }
2959 
2960   MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2961       int size_in_bytes) override;
2962 };
2963 
2964 
2965 // A collection of |CompactionSpace|s used by a single compaction task.
2966 class CompactionSpaceCollection : public Malloced {
2967  public:
CompactionSpaceCollection(Heap * heap)2968   explicit CompactionSpaceCollection(Heap* heap)
2969       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2970         code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2971 
Get(AllocationSpace space)2972   CompactionSpace* Get(AllocationSpace space) {
2973     switch (space) {
2974       case OLD_SPACE:
2975         return &old_space_;
2976       case CODE_SPACE:
2977         return &code_space_;
2978       default:
2979         UNREACHABLE();
2980     }
2981     UNREACHABLE();
2982     return nullptr;
2983   }
2984 
2985  private:
2986   CompactionSpace old_space_;
2987   CompactionSpace code_space_;
2988 };
2989 
2990 
2991 // -----------------------------------------------------------------------------
2992 // Old object space (includes the old space of objects and code space)
2993 
2994 class OldSpace : public PagedSpace {
2995  public:
2996   // Creates an old space object. The constructor does not allocate pages
2997   // from OS.
OldSpace(Heap * heap,AllocationSpace id,Executability executable)2998   OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2999       : PagedSpace(heap, id, executable) {}
3000 };
3001 
3002 
3003 // For contiguous spaces, top should be in the space (or at the end) and limit
3004 // should be the end of the space.
3005 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
3006   SLOW_DCHECK((space).page_low() <= (info).top() &&   \
3007               (info).top() <= (space).page_high() &&  \
3008               (info).limit() <= (space).page_high())
3009 
3010 
3011 // -----------------------------------------------------------------------------
3012 // Old space for all map objects
3013 
3014 class MapSpace : public PagedSpace {
3015  public:
3016   // Creates a map space object.
MapSpace(Heap * heap,AllocationSpace id)3017   MapSpace(Heap* heap, AllocationSpace id)
3018       : PagedSpace(heap, id, NOT_EXECUTABLE) {}
3019 
RoundSizeDownToObjectAlignment(int size)3020   int RoundSizeDownToObjectAlignment(int size) override {
3021     if (base::bits::IsPowerOfTwo32(Map::kSize)) {
3022       return RoundDown(size, Map::kSize);
3023     } else {
3024       return (size / Map::kSize) * Map::kSize;
3025     }
3026   }
3027 
3028 #ifdef VERIFY_HEAP
3029   void VerifyObject(HeapObject* obj) override;
3030 #endif
3031 };
3032 
3033 
3034 // -----------------------------------------------------------------------------
3035 // Large objects ( > Page::kMaxRegularHeapObjectSize ) are allocated and
3036 // managed by the large object space. A large object is allocated from OS
3037 // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
3038 // A large object always starts at Page::kObjectStartOffset to a page.
3039 // Large objects do not move during garbage collections.
3040 
3041 class LargeObjectSpace : public Space {
3042  public:
3043   typedef LargePageIterator iterator;
3044 
3045   LargeObjectSpace(Heap* heap, AllocationSpace id);
3046   virtual ~LargeObjectSpace();
3047 
3048   // Initializes internal data structures.
3049   bool SetUp();
3050 
3051   // Releases internal resources, frees objects in this space.
3052   void TearDown();
3053 
ObjectSizeFor(intptr_t chunk_size)3054   static intptr_t ObjectSizeFor(intptr_t chunk_size) {
3055     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
3056     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
3057   }
3058 
3059   // Shared implementation of AllocateRaw, AllocateRawCode and
3060   // AllocateRawFixedArray.
3061   MUST_USE_RESULT AllocationResult
3062       AllocateRaw(int object_size, Executability executable);
3063 
3064   // Available bytes for objects in this space.
3065   inline intptr_t Available() override;
3066 
Size()3067   intptr_t Size() override { return size_; }
3068 
SizeOfObjects()3069   intptr_t SizeOfObjects() override { return objects_size_; }
3070 
3071   // Approximate amount of physical memory committed for this space.
3072   size_t CommittedPhysicalMemory() override;
3073 
PageCount()3074   int PageCount() { return page_count_; }
3075 
3076   // Finds an object for a given address, returns a Smi if it is not found.
3077   // The function iterates through all objects in this space, may be slow.
3078   Object* FindObject(Address a);
3079 
3080   // Finds a large object page containing the given address, returns NULL
3081   // if such a page doesn't exist.
3082   LargePage* FindPage(Address a);
3083 
3084   // Clears the marking state of live objects.
3085   void ClearMarkingStateOfLiveObjects();
3086 
3087   // Frees unmarked objects.
3088   void FreeUnmarkedObjects();
3089 
3090   // Checks whether a heap object is in this space; O(1).
3091   bool Contains(HeapObject* obj);
3092   // Checks whether an address is in the object area in this space. Iterates
3093   // all objects in the space. May be slow.
ContainsSlow(Address addr)3094   bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
3095 
3096   // Checks whether the space is empty.
IsEmpty()3097   bool IsEmpty() { return first_page_ == NULL; }
3098 
AdjustLiveBytes(int by)3099   void AdjustLiveBytes(int by) { objects_size_ += by; }
3100 
first_page()3101   LargePage* first_page() { return first_page_; }
3102 
3103   // Collect code statistics.
3104   void CollectCodeStatistics();
3105 
begin()3106   iterator begin() { return iterator(first_page_); }
end()3107   iterator end() { return iterator(nullptr); }
3108 
3109 #ifdef VERIFY_HEAP
3110   virtual void Verify();
3111 #endif
3112 
3113 #ifdef DEBUG
3114   void Print() override;
3115   void ReportStatistics();
3116 #endif
3117 
3118  private:
3119   // The head of the linked list of large object chunks.
3120   LargePage* first_page_;
3121   intptr_t size_;          // allocated bytes
3122   int page_count_;         // number of chunks
3123   intptr_t objects_size_;  // size of objects
3124   // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
3125   base::HashMap chunk_map_;
3126 
3127   friend class LargeObjectIterator;
3128 };
3129 
3130 
3131 class LargeObjectIterator : public ObjectIterator {
3132  public:
3133   explicit LargeObjectIterator(LargeObjectSpace* space);
3134 
3135   HeapObject* Next() override;
3136 
3137  private:
3138   LargePage* current_;
3139 };
3140 
3141 // Iterates over the chunks (pages and large object pages) that can contain
3142 // pointers to new space or to evacuation candidates.
3143 class MemoryChunkIterator BASE_EMBEDDED {
3144  public:
3145   inline explicit MemoryChunkIterator(Heap* heap);
3146 
3147   // Return NULL when the iterator is done.
3148   inline MemoryChunk* next();
3149 
3150  private:
3151   enum State {
3152     kOldSpaceState,
3153     kMapState,
3154     kCodeState,
3155     kLargeObjectState,
3156     kFinishedState
3157   };
3158   Heap* heap_;
3159   State state_;
3160   PageIterator old_iterator_;
3161   PageIterator code_iterator_;
3162   PageIterator map_iterator_;
3163   LargePageIterator lo_iterator_;
3164 };
3165 
3166 #ifdef DEBUG
3167 struct CommentStatistic {
3168   const char* comment;
3169   int size;
3170   int count;
ClearCommentStatistic3171   void Clear() {
3172     comment = NULL;
3173     size = 0;
3174     count = 0;
3175   }
3176   // Must be small, since an iteration is used for lookup.
3177   static const int kMaxComments = 64;
3178 };
3179 #endif
3180 }  // namespace internal
3181 }  // namespace v8
3182 
3183 #endif  // V8_HEAP_SPACES_H_
3184