• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7 
8 #include <list>
9 #include <map>
10 #include <memory>
11 #include <unordered_map>
12 #include <unordered_set>
13 #include <vector>
14 
15 #include "src/allocation.h"
16 #include "src/base/atomic-utils.h"
17 #include "src/base/iterator.h"
18 #include "src/base/list.h"
19 #include "src/base/platform/mutex.h"
20 #include "src/cancelable-task.h"
21 #include "src/flags.h"
22 #include "src/globals.h"
23 #include "src/heap/heap.h"
24 #include "src/heap/invalidated-slots.h"
25 #include "src/heap/marking.h"
26 #include "src/objects.h"
27 #include "src/objects/map.h"
28 #include "src/utils.h"
29 
30 namespace v8 {
31 namespace internal {
32 
33 namespace heap {
34 class HeapTester;
35 class TestCodeRangeScope;
36 }  // namespace heap
37 
38 class AllocationObserver;
39 class CompactionSpace;
40 class CompactionSpaceCollection;
41 class FreeList;
42 class Isolate;
43 class LinearAllocationArea;
44 class LocalArrayBufferTracker;
45 class MemoryAllocator;
46 class MemoryChunk;
47 class Page;
48 class PagedSpace;
49 class SemiSpace;
50 class SkipList;
51 class SlotsBuffer;
52 class SlotSet;
53 class TypedSlotSet;
54 class Space;
55 
56 // -----------------------------------------------------------------------------
57 // Heap structures:
58 //
59 // A JS heap consists of a young generation, an old generation, and a large
60 // object space. The young generation is divided into two semispaces. A
61 // scavenger implements Cheney's copying algorithm. The old generation is
62 // separated into a map space and an old object space. The map space contains
63 // all (and only) map objects, the rest of old objects go into the old space.
64 // The old generation is collected by a mark-sweep-compact collector.
65 //
66 // The semispaces of the young generation are contiguous.  The old and map
67 // spaces consists of a list of pages. A page has a page header and an object
68 // area.
69 //
70 // There is a separate large object space for objects larger than
71 // kMaxRegularHeapObjectSize, so that they do not have to move during
72 // collection. The large object space is paged. Pages in large object space
73 // may be larger than the page size.
74 //
75 // A store-buffer based write barrier is used to keep track of intergenerational
76 // references.  See heap/store-buffer.h.
77 //
78 // During scavenges and mark-sweep collections we sometimes (after a store
79 // buffer overflow) iterate intergenerational pointers without decoding heap
80 // object maps so if the page belongs to old space or large object space
81 // it is essential to guarantee that the page does not contain any
82 // garbage pointers to new space: every pointer aligned word which satisfies
83 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
84 // new space. Thus objects in old space and large object spaces should have a
85 // special layout (e.g. no bare integer fields). This requirement does not
86 // apply to map space which is iterated in a special fashion. However we still
87 // require pointer fields of dead maps to be cleaned.
88 //
89 // To enable lazy cleaning of old space pages we can mark chunks of the page
90 // as being garbage.  Garbage sections are marked with a special map.  These
91 // sections are skipped when scanning the page, even if we are otherwise
92 // scanning without regard for object boundaries.  Garbage sections are chained
93 // together to form a free list after a GC.  Garbage sections created outside
94 // of GCs by object trunctation etc. may not be in the free list chain.  Very
95 // small free spaces are ignored, they need only be cleaned of bogus pointers
96 // into new space.
97 //
98 // Each page may have up to one special garbage section.  The start of this
99 // section is denoted by the top field in the space.  The end of the section
100 // is denoted by the limit field in the space.  This special garbage section
101 // is not marked with a free space map in the data.  The point of this section
102 // is to enable linear allocation without having to constantly update the byte
103 // array every time the top field is updated and a new object is created.  The
104 // special garbage section is not in the chain of garbage sections.
105 //
106 // Since the top and limit fields are in the space, not the page, only one page
107 // has a special garbage section, and if the top and limit are equal then there
108 // is no special garbage section.
109 
110 // Some assertion macros used in the debugging mode.
111 
112 #define DCHECK_PAGE_ALIGNED(address) \
113   DCHECK((OffsetFrom(address) & kPageAlignmentMask) == 0)
114 
115 #define DCHECK_OBJECT_ALIGNED(address) \
116   DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
117 
118 #define DCHECK_OBJECT_SIZE(size) \
119   DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
120 
121 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
122   DCHECK((0 < size) && (size <= code_space->AreaSize()))
123 
124 #define DCHECK_PAGE_OFFSET(offset) \
125   DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
126 
127 enum FreeListCategoryType {
128   kTiniest,
129   kTiny,
130   kSmall,
131   kMedium,
132   kLarge,
133   kHuge,
134 
135   kFirstCategory = kTiniest,
136   kLastCategory = kHuge,
137   kNumberOfCategories = kLastCategory + 1,
138   kInvalidCategory
139 };
140 
141 enum FreeMode { kLinkCategory, kDoNotLinkCategory };
142 
143 enum class SpaceAccountingMode { kSpaceAccounted, kSpaceUnaccounted };
144 
145 enum ExternalBackingStoreType {
146   kArrayBuffer,
147   kExternalString,
148   kNumTypes
149 };
150 
151 enum RememberedSetType {
152   OLD_TO_NEW,
153   OLD_TO_OLD,
154   NUMBER_OF_REMEMBERED_SET_TYPES = OLD_TO_OLD + 1
155 };
156 
157 // A free list category maintains a linked list of free memory blocks.
158 class FreeListCategory {
159  public:
FreeListCategory(FreeList * free_list,Page * page)160   FreeListCategory(FreeList* free_list, Page* page)
161       : free_list_(free_list),
162         page_(page),
163         type_(kInvalidCategory),
164         available_(0),
165         top_(nullptr),
166         prev_(nullptr),
167         next_(nullptr) {}
168 
Initialize(FreeListCategoryType type)169   void Initialize(FreeListCategoryType type) {
170     type_ = type;
171     available_ = 0;
172     top_ = nullptr;
173     prev_ = nullptr;
174     next_ = nullptr;
175   }
176 
177   void Reset();
178 
ResetStats()179   void ResetStats() { Reset(); }
180 
181   void RepairFreeList(Heap* heap);
182 
183   // Relinks the category into the currently owning free list. Requires that the
184   // category is currently unlinked.
185   void Relink();
186 
187   void Free(Address address, size_t size_in_bytes, FreeMode mode);
188 
189   // Performs a single try to pick a node of at least |minimum_size| from the
190   // category. Stores the actual size in |node_size|. Returns nullptr if no
191   // node is found.
192   FreeSpace* PickNodeFromList(size_t minimum_size, size_t* node_size);
193 
194   // Picks a node of at least |minimum_size| from the category. Stores the
195   // actual size in |node_size|. Returns nullptr if no node is found.
196   FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
197 
198   inline FreeList* owner();
page()199   inline Page* page() const { return page_; }
200   inline bool is_linked();
is_empty()201   bool is_empty() { return top() == nullptr; }
available()202   size_t available() const { return available_; }
203 
set_free_list(FreeList * free_list)204   void set_free_list(FreeList* free_list) { free_list_ = free_list; }
205 
206 #ifdef DEBUG
207   size_t SumFreeList();
208   int FreeListLength();
209 #endif
210 
211  private:
212   // For debug builds we accurately compute free lists lengths up until
213   // {kVeryLongFreeList} by manually walking the list.
214   static const int kVeryLongFreeList = 500;
215 
top()216   FreeSpace* top() { return top_; }
set_top(FreeSpace * top)217   void set_top(FreeSpace* top) { top_ = top; }
prev()218   FreeListCategory* prev() { return prev_; }
set_prev(FreeListCategory * prev)219   void set_prev(FreeListCategory* prev) { prev_ = prev; }
next()220   FreeListCategory* next() { return next_; }
set_next(FreeListCategory * next)221   void set_next(FreeListCategory* next) { next_ = next; }
222 
223   // This FreeListCategory is owned by the given free_list_.
224   FreeList* free_list_;
225 
226   // This FreeListCategory holds free list entries of the given page_.
227   Page* const page_;
228 
229   // |type_|: The type of this free list category.
230   FreeListCategoryType type_;
231 
232   // |available_|: Total available bytes in all blocks of this free list
233   // category.
234   size_t available_;
235 
236   // |top_|: Points to the top FreeSpace* in the free list category.
237   FreeSpace* top_;
238 
239   FreeListCategory* prev_;
240   FreeListCategory* next_;
241 
242   friend class FreeList;
243   friend class PagedSpace;
244 
245   DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListCategory);
246 };
247 
248 // MemoryChunk represents a memory region owned by a specific space.
249 // It is divided into the header and the body. Chunk start is always
250 // 1MB aligned. Start of the body is aligned so it can accommodate
251 // any heap object.
252 class MemoryChunk {
253  public:
254   // Use with std data structures.
255   struct Hasher {
operatorHasher256     size_t operator()(MemoryChunk* const chunk) const {
257       return reinterpret_cast<size_t>(chunk) >> kPageSizeBits;
258     }
259   };
260 
261   enum Flag {
262     NO_FLAGS = 0u,
263     IS_EXECUTABLE = 1u << 0,
264     POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
265     POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
266     // A page in new space has one of the next two flags set.
267     IN_FROM_SPACE = 1u << 3,
268     IN_TO_SPACE = 1u << 4,
269     NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
270     EVACUATION_CANDIDATE = 1u << 6,
271     NEVER_EVACUATE = 1u << 7,
272 
273     // Large objects can have a progress bar in their page header. These object
274     // are scanned in increments and will be kept black while being scanned.
275     // Even if the mutator writes to them they will be kept black and a white
276     // to grey transition is performed in the value.
277     HAS_PROGRESS_BAR = 1u << 8,
278 
279     // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
280     // from new to old space during evacuation.
281     PAGE_NEW_OLD_PROMOTION = 1u << 9,
282 
283     // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
284     // within the new space during evacuation.
285     PAGE_NEW_NEW_PROMOTION = 1u << 10,
286 
287     // This flag is intended to be used for testing. Works only when both
288     // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
289     // are set. It forces the page to become an evacuation candidate at next
290     // candidates selection cycle.
291     FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
292 
293     // This flag is intended to be used for testing.
294     NEVER_ALLOCATE_ON_PAGE = 1u << 12,
295 
296     // The memory chunk is already logically freed, however the actual freeing
297     // still has to be performed.
298     PRE_FREED = 1u << 13,
299 
300     // |POOLED|: When actually freeing this chunk, only uncommit and do not
301     // give up the reservation as we still reuse the chunk at some point.
302     POOLED = 1u << 14,
303 
304     // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
305     //   has been aborted and needs special handling by the sweeper.
306     COMPACTION_WAS_ABORTED = 1u << 15,
307 
308     // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
309     // on pages is sometimes aborted. The flag is used to avoid repeatedly
310     // triggering on the same page.
311     COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
312 
313     // |SWEEP_TO_ITERATE|: The page requires sweeping using external markbits
314     // to iterate the page.
315     SWEEP_TO_ITERATE = 1u << 17,
316 
317     // |INCREMENTAL_MARKING|: Indicates whether incremental marking is currently
318     // enabled.
319     INCREMENTAL_MARKING = 1u << 18
320   };
321 
322   using Flags = uintptr_t;
323 
324   static const Flags kPointersToHereAreInterestingMask =
325       POINTERS_TO_HERE_ARE_INTERESTING;
326 
327   static const Flags kPointersFromHereAreInterestingMask =
328       POINTERS_FROM_HERE_ARE_INTERESTING;
329 
330   static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
331 
332   static const Flags kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
333 
334   static const Flags kSkipEvacuationSlotsRecordingMask =
335       kEvacuationCandidateMask | kIsInNewSpaceMask;
336 
337   // |kSweepingDone|: The page state when sweeping is complete or sweeping must
338   //   not be performed on that page. Sweeper threads that are done with their
339   //   work will set this value and not touch the page anymore.
340   // |kSweepingPending|: This page is ready for parallel sweeping.
341   // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
342   enum ConcurrentSweepingState {
343     kSweepingDone,
344     kSweepingPending,
345     kSweepingInProgress,
346   };
347 
348   static const intptr_t kAlignment =
349       (static_cast<uintptr_t>(1) << kPageSizeBits);
350 
351   static const intptr_t kAlignmentMask = kAlignment - 1;
352 
353   static const intptr_t kSizeOffset = 0;
354   static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
355   static const intptr_t kAreaStartOffset = kFlagsOffset + kIntptrSize;
356   static const intptr_t kAreaEndOffset = kAreaStartOffset + kPointerSize;
357   static const intptr_t kReservationOffset = kAreaEndOffset + kPointerSize;
358   static const intptr_t kOwnerOffset = kReservationOffset + 2 * kPointerSize;
359 
360   static const size_t kMinHeaderSize =
361       kSizeOffset         // NOLINT
362       + kSizetSize        // size_t size
363       + kUIntptrSize      // uintptr_t flags_
364       + kPointerSize      // Address area_start_
365       + kPointerSize      // Address area_end_
366       + 2 * kPointerSize  // VirtualMemory reservation_
367       + kPointerSize      // Address owner_
368       + kPointerSize      // Heap* heap_
369       + kIntptrSize       // intptr_t progress_bar_
370       + kIntptrSize       // std::atomic<intptr_t> live_byte_count_
371       + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES  // SlotSet* array
372       + kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES  // TypedSlotSet* array
373       + kPointerSize  // InvalidatedSlots* invalidated_slots_
374       + kPointerSize  // SkipList* skip_list_
375       + kPointerSize  // std::atomic<intptr_t> high_water_mark_
376       + kPointerSize  // base::Mutex* mutex_
377       +
378       kPointerSize  // std::atomic<ConcurrentSweepingState> concurrent_sweeping_
379       + kPointerSize  // base::Mutex* page_protection_change_mutex_
380       + kPointerSize  // unitptr_t write_unprotect_counter_
381       + kSizetSize * kNumTypes
382       // std::atomic<size_t> external_backing_store_bytes_
383       + kSizetSize        // size_t allocated_bytes_
384       + kSizetSize        // size_t wasted_memory_
385       + kPointerSize * 2  // base::ListNode
386       + kPointerSize * kNumberOfCategories
387       // FreeListCategory categories_[kNumberOfCategories]
388       + kPointerSize  // LocalArrayBufferTracker* local_tracker_
389       + kIntptrSize   // std::atomic<intptr_t> young_generation_live_byte_count_
390       + kPointerSize;  // Bitmap* young_generation_bitmap_
391 
392   // We add some more space to the computed header size to amount for missing
393   // alignment requirements in our computation.
394   // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
395   static const size_t kHeaderSize = kMinHeaderSize;
396 
397   static const int kBodyOffset =
398       CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
399 
400   // The start offset of the object area in a page. Aligned to both maps and
401   // code alignment to be suitable for both.  Also aligned to 32 words because
402   // the marking bitmap is arranged in 32 bit chunks.
403   static const int kObjectStartAlignment = 32 * kPointerSize;
404   static const int kObjectStartOffset =
405       kBodyOffset - 1 +
406       (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
407 
408   // Page size in bytes.  This must be a multiple of the OS page size.
409   static const int kPageSize = 1 << kPageSizeBits;
410 
411   static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
412 
413   // Maximum number of nested code memory modification scopes.
414   // TODO(6792,mstarzinger): Drop to 3 or lower once WebAssembly is off heap.
415   static const int kMaxWriteUnprotectCounter = 4;
416 
417   // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)418   static MemoryChunk* FromAddress(Address a) {
419     return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
420   }
421   // Only works if the object is in the first kPageSize of the MemoryChunk.
FromHeapObject(const HeapObject * o)422   static MemoryChunk* FromHeapObject(const HeapObject* o) {
423     return reinterpret_cast<MemoryChunk*>(reinterpret_cast<Address>(o) &
424                                           ~kAlignmentMask);
425   }
426 
427   void SetOldGenerationPageFlags(bool is_marking);
428   void SetYoungGenerationPageFlags(bool is_marking);
429 
430   static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
431 
UpdateHighWaterMark(Address mark)432   static inline void UpdateHighWaterMark(Address mark) {
433     if (mark == kNullAddress) return;
434     // Need to subtract one from the mark because when a chunk is full the
435     // top points to the next address after the chunk, which effectively belongs
436     // to another chunk. See the comment to Page::FromTopOrLimit.
437     MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
438     intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
439     intptr_t old_mark = 0;
440     do {
441       old_mark = chunk->high_water_mark_;
442     } while (
443         (new_mark > old_mark) &&
444         !chunk->high_water_mark_.compare_exchange_weak(old_mark, new_mark));
445   }
446 
address()447   Address address() const {
448     return reinterpret_cast<Address>(const_cast<MemoryChunk*>(this));
449   }
450 
mutex()451   base::Mutex* mutex() { return mutex_; }
452 
Contains(Address addr)453   bool Contains(Address addr) {
454     return addr >= area_start() && addr < area_end();
455   }
456 
457   // Checks whether |addr| can be a limit of addresses in this page. It's a
458   // limit if it's in the page, or if it's just after the last byte of the page.
ContainsLimit(Address addr)459   bool ContainsLimit(Address addr) {
460     return addr >= area_start() && addr <= area_end();
461   }
462 
set_concurrent_sweeping_state(ConcurrentSweepingState state)463   void set_concurrent_sweeping_state(ConcurrentSweepingState state) {
464     concurrent_sweeping_ = state;
465   }
466 
concurrent_sweeping_state()467   ConcurrentSweepingState concurrent_sweeping_state() {
468     return static_cast<ConcurrentSweepingState>(concurrent_sweeping_.load());
469   }
470 
SweepingDone()471   bool SweepingDone() { return concurrent_sweeping_ == kSweepingDone; }
472 
size()473   size_t size() const { return size_; }
set_size(size_t size)474   void set_size(size_t size) { size_ = size; }
475 
heap()476   inline Heap* heap() const { return heap_; }
477 
478   Heap* synchronized_heap();
479 
skip_list()480   inline SkipList* skip_list() { return skip_list_; }
481 
set_skip_list(SkipList * skip_list)482   inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
483 
484   template <RememberedSetType type>
ContainsSlots()485   bool ContainsSlots() {
486     return slot_set<type>() != nullptr || typed_slot_set<type>() != nullptr ||
487            invalidated_slots() != nullptr;
488   }
489 
490   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
slot_set()491   SlotSet* slot_set() {
492     if (access_mode == AccessMode::ATOMIC)
493       return base::AsAtomicPointer::Acquire_Load(&slot_set_[type]);
494     return slot_set_[type];
495   }
496 
497   template <RememberedSetType type, AccessMode access_mode = AccessMode::ATOMIC>
typed_slot_set()498   TypedSlotSet* typed_slot_set() {
499     if (access_mode == AccessMode::ATOMIC)
500       return base::AsAtomicPointer::Acquire_Load(&typed_slot_set_[type]);
501     return typed_slot_set_[type];
502   }
503 
504   template <RememberedSetType type>
505   SlotSet* AllocateSlotSet();
506   // Not safe to be called concurrently.
507   template <RememberedSetType type>
508   void ReleaseSlotSet();
509   template <RememberedSetType type>
510   TypedSlotSet* AllocateTypedSlotSet();
511   // Not safe to be called concurrently.
512   template <RememberedSetType type>
513   void ReleaseTypedSlotSet();
514 
515   InvalidatedSlots* AllocateInvalidatedSlots();
516   void ReleaseInvalidatedSlots();
517   void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
518   // Updates invalidated_slots after array left-trimming.
519   void MoveObjectWithInvalidatedSlots(HeapObject* old_start,
520                                       HeapObject* new_start);
invalidated_slots()521   InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
522 
523   void ReleaseLocalTracker();
524 
525   void AllocateYoungGenerationBitmap();
526   void ReleaseYoungGenerationBitmap();
527 
area_start()528   Address area_start() { return area_start_; }
area_end()529   Address area_end() { return area_end_; }
area_size()530   size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
531 
532   // Approximate amount of physical memory committed for this chunk.
533   size_t CommittedPhysicalMemory();
534 
HighWaterMark()535   Address HighWaterMark() { return address() + high_water_mark_; }
536 
progress_bar()537   int progress_bar() {
538     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
539     return static_cast<int>(progress_bar_);
540   }
541 
set_progress_bar(int progress_bar)542   void set_progress_bar(int progress_bar) {
543     DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
544     progress_bar_ = progress_bar;
545   }
546 
ResetProgressBar()547   void ResetProgressBar() {
548     if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
549       set_progress_bar(0);
550     }
551   }
552 
553   void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
554                                           size_t amount);
555   void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
556                                           size_t amount);
ExternalBackingStoreBytes(ExternalBackingStoreType type)557   size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) {
558     return external_backing_store_bytes_[type];
559   }
560 
AddressToMarkbitIndex(Address addr)561   inline uint32_t AddressToMarkbitIndex(Address addr) const {
562     return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
563   }
564 
MarkbitIndexToAddress(uint32_t index)565   inline Address MarkbitIndexToAddress(uint32_t index) const {
566     return this->address() + (index << kPointerSizeLog2);
567   }
568 
569   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
SetFlag(Flag flag)570   void SetFlag(Flag flag) {
571     if (access_mode == AccessMode::NON_ATOMIC) {
572       flags_ |= flag;
573     } else {
574       base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
575     }
576   }
577 
578   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
IsFlagSet(Flag flag)579   bool IsFlagSet(Flag flag) {
580     return (GetFlags<access_mode>() & flag) != 0;
581   }
582 
ClearFlag(Flag flag)583   void ClearFlag(Flag flag) { flags_ &= ~flag; }
584   // Set or clear multiple flags at a time. The flags in the mask are set to
585   // the value in "flags", the rest retain the current value in |flags_|.
SetFlags(uintptr_t flags,uintptr_t mask)586   void SetFlags(uintptr_t flags, uintptr_t mask) {
587     flags_ = (flags_ & ~mask) | (flags & mask);
588   }
589 
590   // Return all current flags.
591   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
GetFlags()592   uintptr_t GetFlags() {
593     if (access_mode == AccessMode::NON_ATOMIC) {
594       return flags_;
595     } else {
596       return base::AsAtomicWord::Relaxed_Load(&flags_);
597     }
598   }
599 
NeverEvacuate()600   bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
601 
MarkNeverEvacuate()602   void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
603 
CanAllocate()604   bool CanAllocate() {
605     return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
606   }
607 
608   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
IsEvacuationCandidate()609   bool IsEvacuationCandidate() {
610     DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
611              IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
612     return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
613   }
614 
615   template <AccessMode access_mode = AccessMode::NON_ATOMIC>
ShouldSkipEvacuationSlotRecording()616   bool ShouldSkipEvacuationSlotRecording() {
617     uintptr_t flags = GetFlags<access_mode>();
618     return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
619            ((flags & COMPACTION_WAS_ABORTED) == 0);
620   }
621 
executable()622   Executability executable() {
623     return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
624   }
625 
InNewSpace()626   bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
627 
InToSpace()628   bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
629 
InFromSpace()630   bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
631 
632   bool InOldSpace() const;
633 
634   bool InLargeObjectSpace() const;
635 
owner()636   Space* owner() const { return owner_; }
637 
set_owner(Space * space)638   void set_owner(Space* space) { owner_ = space; }
639 
640   bool IsPagedSpace() const;
641 
642   // Emits a memory barrier. For TSAN builds the other thread needs to perform
643   // MemoryChunk::synchronized_heap() to simulate the barrier.
644   void InitializationMemoryFence();
645 
646   void SetReadAndExecutable();
647   void SetReadAndWritable();
648 
list_node()649   base::ListNode<MemoryChunk>& list_node() { return list_node_; }
650 
651  protected:
652   static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
653                                  Address area_start, Address area_end,
654                                  Executability executable, Space* owner,
655                                  VirtualMemory* reservation);
656 
657   // Should be called when memory chunk is about to be freed.
658   void ReleaseAllocatedMemory();
659 
reserved_memory()660   VirtualMemory* reserved_memory() { return &reservation_; }
661 
662   size_t size_;
663   uintptr_t flags_;
664 
665   // Start and end of allocatable memory on this chunk.
666   Address area_start_;
667   Address area_end_;
668 
669   // If the chunk needs to remember its memory reservation, it is stored here.
670   VirtualMemory reservation_;
671 
672   // The space owning this memory chunk.
673   std::atomic<Space*> owner_;
674 
675   Heap* heap_;
676 
677   // Used by the incremental marker to keep track of the scanning progress in
678   // large objects that have a progress bar and are scanned in increments.
679   intptr_t progress_bar_;
680 
681   // Count of bytes marked black on page.
682   std::atomic<intptr_t> live_byte_count_;
683 
684   // A single slot set for small pages (of size kPageSize) or an array of slot
685   // set for large pages. In the latter case the number of entries in the array
686   // is ceil(size() / kPageSize).
687   SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
688   TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
689   InvalidatedSlots* invalidated_slots_;
690 
691   SkipList* skip_list_;
692 
693   // Assuming the initial allocation on a page is sequential,
694   // count highest number of bytes ever allocated on the page.
695   std::atomic<intptr_t> high_water_mark_;
696 
697   base::Mutex* mutex_;
698 
699   std::atomic<intptr_t> concurrent_sweeping_;
700 
701   base::Mutex* page_protection_change_mutex_;
702 
703   // This field is only relevant for code pages. It depicts the number of
704   // times a component requested this page to be read+writeable. The
705   // counter is decremented when a component resets to read+executable.
706   // If Value() == 0 => The memory is read and executable.
707   // If Value() >= 1 => The Memory is read and writable (and maybe executable).
708   // The maximum value is limited by {kMaxWriteUnprotectCounter} to prevent
709   // excessive nesting of scopes.
710   // All executable MemoryChunks are allocated rw based on the assumption that
711   // they will be used immediatelly for an allocation. They are initialized
712   // with the number of open CodeSpaceMemoryModificationScopes. The caller
713   // that triggers the page allocation is responsible for decrementing the
714   // counter.
715   uintptr_t write_unprotect_counter_;
716 
717   // Byte allocated on the page, which includes all objects on the page
718   // and the linear allocation area.
719   size_t allocated_bytes_;
720 
721   // Tracks off-heap memory used by this memory chunk.
722   std::atomic<size_t> external_backing_store_bytes_[kNumTypes];
723 
724   // Freed memory that was not added to the free list.
725   size_t wasted_memory_;
726 
727   base::ListNode<MemoryChunk> list_node_;
728 
729   FreeListCategory* categories_[kNumberOfCategories];
730 
731   LocalArrayBufferTracker* local_tracker_;
732 
733   std::atomic<intptr_t> young_generation_live_byte_count_;
734   Bitmap* young_generation_bitmap_;
735 
736  private:
InitializeReservedMemory()737   void InitializeReservedMemory() { reservation_.Reset(); }
738 
739   friend class ConcurrentMarkingState;
740   friend class IncrementalMarkingState;
741   friend class MajorAtomicMarkingState;
742   friend class MajorMarkingState;
743   friend class MajorNonAtomicMarkingState;
744   friend class MemoryAllocator;
745   friend class MemoryChunkValidator;
746   friend class MinorMarkingState;
747   friend class MinorNonAtomicMarkingState;
748   friend class PagedSpace;
749 };
750 
751 static_assert(sizeof(std::atomic<intptr_t>) == kPointerSize,
752               "sizeof(std::atomic<intptr_t>) == kPointerSize");
753 
754 static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
755               "kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
756 
757 
758 // -----------------------------------------------------------------------------
759 // A page is a memory chunk of a size 512K. Large object pages may be larger.
760 //
761 // The only way to get a page pointer is by calling factory methods:
762 //   Page* p = Page::FromAddress(addr); or
763 //   Page* p = Page::FromTopOrLimit(top);
764 class Page : public MemoryChunk {
765  public:
766   static const intptr_t kCopyAllFlags = ~0;
767 
768   // Page flags copied from from-space to to-space when flipping semispaces.
769   static const intptr_t kCopyOnFlipFlagsMask =
770       static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
771       static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
772       static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
773 
774   // Returns the page containing a given address. The address ranges
775   // from [page_addr .. page_addr + kPageSize[. This only works if the object
776   // is in fact in a page.
FromAddress(Address addr)777   static Page* FromAddress(Address addr) {
778     return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
779   }
FromHeapObject(const HeapObject * o)780   static Page* FromHeapObject(const HeapObject* o) {
781     return reinterpret_cast<Page*>(reinterpret_cast<Address>(o) &
782                                    ~kAlignmentMask);
783   }
784 
785   // Returns the page containing the address provided. The address can
786   // potentially point righter after the page. To be also safe for tagged values
787   // we subtract a hole word. The valid address ranges from
788   // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
FromAllocationAreaAddress(Address address)789   static Page* FromAllocationAreaAddress(Address address) {
790     return Page::FromAddress(address - kPointerSize);
791   }
792 
793   // Checks if address1 and address2 are on the same new space page.
OnSamePage(Address address1,Address address2)794   static bool OnSamePage(Address address1, Address address2) {
795     return Page::FromAddress(address1) == Page::FromAddress(address2);
796   }
797 
798   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address addr)799   static bool IsAlignedToPageSize(Address addr) {
800     return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
801   }
802 
IsAtObjectStart(Address addr)803   static bool IsAtObjectStart(Address addr) {
804     return (addr & kPageAlignmentMask) == kObjectStartOffset;
805   }
806 
807   static Page* ConvertNewToOld(Page* old_page);
808 
809   inline void MarkNeverAllocateForTesting();
810   inline void MarkEvacuationCandidate();
811   inline void ClearEvacuationCandidate();
812 
next_page()813   Page* next_page() { return static_cast<Page*>(list_node_.next()); }
prev_page()814   Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
815 
816   template <typename Callback>
ForAllFreeListCategories(Callback callback)817   inline void ForAllFreeListCategories(Callback callback) {
818     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
819       callback(categories_[i]);
820     }
821   }
822 
823   // Returns the offset of a given address to this page.
Offset(Address a)824   inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
825 
826   // Returns the address for a given offset to the this page.
OffsetToAddress(size_t offset)827   Address OffsetToAddress(size_t offset) {
828     DCHECK_PAGE_OFFSET(offset);
829     return address() + offset;
830   }
831 
832   // WaitUntilSweepingCompleted only works when concurrent sweeping is in
833   // progress. In particular, when we know that right before this call a
834   // sweeper thread was sweeping this page.
WaitUntilSweepingCompleted()835   void WaitUntilSweepingCompleted() {
836     mutex_->Lock();
837     mutex_->Unlock();
838     DCHECK(SweepingDone());
839   }
840 
841   void AllocateLocalTracker();
local_tracker()842   inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
843   bool contains_array_buffers();
844 
845   void ResetFreeListStatistics();
846 
847   size_t AvailableInFreeList();
848 
AvailableInFreeListFromAllocatedBytes()849   size_t AvailableInFreeListFromAllocatedBytes() {
850     DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
851     return area_size() - wasted_memory() - allocated_bytes();
852   }
853 
free_list_category(FreeListCategoryType type)854   FreeListCategory* free_list_category(FreeListCategoryType type) {
855     return categories_[type];
856   }
857 
wasted_memory()858   size_t wasted_memory() { return wasted_memory_; }
add_wasted_memory(size_t waste)859   void add_wasted_memory(size_t waste) { wasted_memory_ += waste; }
allocated_bytes()860   size_t allocated_bytes() { return allocated_bytes_; }
IncreaseAllocatedBytes(size_t bytes)861   void IncreaseAllocatedBytes(size_t bytes) {
862     DCHECK_LE(bytes, area_size());
863     allocated_bytes_ += bytes;
864   }
DecreaseAllocatedBytes(size_t bytes)865   void DecreaseAllocatedBytes(size_t bytes) {
866     DCHECK_LE(bytes, area_size());
867     DCHECK_GE(allocated_bytes(), bytes);
868     allocated_bytes_ -= bytes;
869   }
870 
871   void ResetAllocatedBytes();
872 
873   size_t ShrinkToHighWaterMark();
874 
875   V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
876   void DestroyBlackArea(Address start, Address end);
877 
878   void InitializeFreeListCategories();
879   void AllocateFreeListCategories();
880   void ReleaseFreeListCategories();
881 
882 #ifdef DEBUG
883   void Print();
884 #endif  // DEBUG
885 
886  private:
887   enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
888 
889   friend class MemoryAllocator;
890 };
891 
892 class ReadOnlyPage : public Page {
893  public:
894   // Clears any pointers in the header that point out of the page that would
895   // otherwise make the header non-relocatable.
896   void MakeHeaderRelocatable();
897 };
898 
899 class LargePage : public MemoryChunk {
900  public:
GetObject()901   HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
902 
next_page()903   inline LargePage* next_page() {
904     return static_cast<LargePage*>(list_node_.next());
905   }
906 
907   // Uncommit memory that is not in use anymore by the object. If the object
908   // cannot be shrunk 0 is returned.
909   Address GetAddressToShrink(Address object_address, size_t object_size);
910 
911   void ClearOutOfLiveRangeSlots(Address free_start);
912 
913   // A limit to guarantee that we do not overflow typed slot offset in
914   // the old to old remembered set.
915   // Note that this limit is higher than what assembler already imposes on
916   // x64 and ia32 architectures.
917   static const int kMaxCodePageSize = 512 * MB;
918 
919  private:
920   static LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
921                                Executability executable);
922 
923   friend class MemoryAllocator;
924 };
925 
926 
927 // ----------------------------------------------------------------------------
928 // Space is the abstract superclass for all allocation spaces.
929 class Space : public Malloced {
930  public:
Space(Heap * heap,AllocationSpace id)931   Space(Heap* heap, AllocationSpace id)
932       : allocation_observers_paused_(false),
933         heap_(heap),
934         id_(id),
935         committed_(0),
936         max_committed_(0) {
937     external_backing_store_bytes_ =
938         new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
939     external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
940     external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
941         0;
942   }
943 
~Space()944   virtual ~Space() {
945     delete[] external_backing_store_bytes_;
946     external_backing_store_bytes_ = nullptr;
947   }
948 
heap()949   Heap* heap() const { return heap_; }
950 
951   // Identity used in error reporting.
identity()952   AllocationSpace identity() { return id_; }
953 
name()954   const char* name() { return AllocationSpaceName(id_); }
955 
956   V8_EXPORT_PRIVATE virtual void AddAllocationObserver(
957       AllocationObserver* observer);
958 
959   V8_EXPORT_PRIVATE virtual void RemoveAllocationObserver(
960       AllocationObserver* observer);
961 
962   V8_EXPORT_PRIVATE virtual void PauseAllocationObservers();
963 
964   V8_EXPORT_PRIVATE virtual void ResumeAllocationObservers();
965 
StartNextInlineAllocationStep()966   V8_EXPORT_PRIVATE virtual void StartNextInlineAllocationStep() {}
967 
968   void AllocationStep(int bytes_since_last, Address soon_object, int size);
969 
970   // Return the total amount committed memory for this space, i.e., allocatable
971   // memory and page headers.
CommittedMemory()972   virtual size_t CommittedMemory() { return committed_; }
973 
MaximumCommittedMemory()974   virtual size_t MaximumCommittedMemory() { return max_committed_; }
975 
976   // Returns allocated size.
977   virtual size_t Size() = 0;
978 
979   // Returns size of objects. Can differ from the allocated size
980   // (e.g. see LargeObjectSpace).
SizeOfObjects()981   virtual size_t SizeOfObjects() { return Size(); }
982 
983   // Returns amount of off-heap memory in-use by objects in this Space.
ExternalBackingStoreBytes(ExternalBackingStoreType type)984   virtual size_t ExternalBackingStoreBytes(
985       ExternalBackingStoreType type) const {
986     return external_backing_store_bytes_[type];
987   }
988 
989   // Approximate amount of physical memory committed for this space.
990   virtual size_t CommittedPhysicalMemory() = 0;
991 
992   // Return the available bytes without growing.
993   virtual size_t Available() = 0;
994 
RoundSizeDownToObjectAlignment(int size)995   virtual int RoundSizeDownToObjectAlignment(int size) {
996     if (id_ == CODE_SPACE) {
997       return RoundDown(size, kCodeAlignment);
998     } else {
999       return RoundDown(size, kPointerSize);
1000     }
1001   }
1002 
1003   virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
1004 
AccountCommitted(size_t bytes)1005   void AccountCommitted(size_t bytes) {
1006     DCHECK_GE(committed_ + bytes, committed_);
1007     committed_ += bytes;
1008     if (committed_ > max_committed_) {
1009       max_committed_ = committed_;
1010     }
1011   }
1012 
AccountUncommitted(size_t bytes)1013   void AccountUncommitted(size_t bytes) {
1014     DCHECK_GE(committed_, committed_ - bytes);
1015     committed_ -= bytes;
1016   }
1017 
IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,size_t amount)1018   void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1019                                           size_t amount) {
1020     external_backing_store_bytes_[type] += amount;
1021   }
DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,size_t amount)1022   void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
1023                                           size_t amount) {
1024     DCHECK_GE(external_backing_store_bytes_[type], amount);
1025     external_backing_store_bytes_[type] -= amount;
1026   }
1027 
1028   V8_EXPORT_PRIVATE void* GetRandomMmapAddr();
1029 
first_page()1030   MemoryChunk* first_page() { return memory_chunk_list_.front(); }
last_page()1031   MemoryChunk* last_page() { return memory_chunk_list_.back(); }
1032 
memory_chunk_list()1033   base::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
1034 
1035 #ifdef DEBUG
1036   virtual void Print() = 0;
1037 #endif
1038 
1039  protected:
1040   intptr_t GetNextInlineAllocationStepSize();
AllocationObserversActive()1041   bool AllocationObserversActive() {
1042     return !allocation_observers_paused_ && !allocation_observers_.empty();
1043   }
1044 
1045   std::vector<AllocationObserver*> allocation_observers_;
1046 
1047   // The List manages the pages that belong to the given space.
1048   base::List<MemoryChunk> memory_chunk_list_;
1049 
1050   // Tracks off-heap memory used by this space.
1051   std::atomic<size_t>* external_backing_store_bytes_;
1052 
1053  private:
1054   bool allocation_observers_paused_;
1055   Heap* heap_;
1056   AllocationSpace id_;
1057 
1058   // Keeps track of committed memory in a space.
1059   size_t committed_;
1060   size_t max_committed_;
1061 
1062   DISALLOW_COPY_AND_ASSIGN(Space);
1063 };
1064 
1065 
1066 class MemoryChunkValidator {
1067   // Computed offsets should match the compiler generated ones.
1068   STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
1069 
1070   // Validate our estimates on the header size.
1071   STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
1072   STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
1073   STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
1074 };
1075 
1076 
1077 // ----------------------------------------------------------------------------
1078 // All heap objects containing executable code (code objects) must be allocated
1079 // from a 2 GB range of memory, so that they can call each other using 32-bit
1080 // displacements.  This happens automatically on 32-bit platforms, where 32-bit
1081 // displacements cover the entire 4GB virtual address space.  On 64-bit
1082 // platforms, we support this using the CodeRange object, which reserves and
1083 // manages a range of virtual memory.
1084 class CodeRange {
1085  public:
1086   CodeRange(Isolate* isolate, size_t requested_size);
1087   ~CodeRange();
1088 
valid()1089   bool valid() { return virtual_memory_.IsReserved(); }
start()1090   Address start() {
1091     DCHECK(valid());
1092     return virtual_memory_.address();
1093   }
size()1094   size_t size() {
1095     DCHECK(valid());
1096     return virtual_memory_.size();
1097   }
contains(Address address)1098   bool contains(Address address) {
1099     if (!valid()) return false;
1100     Address start = virtual_memory_.address();
1101     return start <= address && address < start + virtual_memory_.size();
1102   }
1103 
1104   // Allocates a chunk of memory from the large-object portion of
1105   // the code range.  On platforms with no separate code range, should
1106   // not be called.
1107   V8_WARN_UNUSED_RESULT Address AllocateRawMemory(const size_t requested_size,
1108                                                   const size_t commit_size,
1109                                                   size_t* allocated);
1110   void FreeRawMemory(Address buf, size_t length);
1111 
1112  private:
1113   class FreeBlock {
1114    public:
FreeBlock()1115     FreeBlock() : start(0), size(0) {}
FreeBlock(Address start_arg,size_t size_arg)1116     FreeBlock(Address start_arg, size_t size_arg)
1117         : start(start_arg), size(size_arg) {
1118       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1119       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1120     }
FreeBlock(void * start_arg,size_t size_arg)1121     FreeBlock(void* start_arg, size_t size_arg)
1122         : start(reinterpret_cast<Address>(start_arg)), size(size_arg) {
1123       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
1124       DCHECK(size >= static_cast<size_t>(Page::kPageSize));
1125     }
1126 
1127     Address start;
1128     size_t size;
1129   };
1130 
1131   // Finds a block on the allocation list that contains at least the
1132   // requested amount of memory.  If none is found, sorts and merges
1133   // the existing free memory blocks, and searches again.
1134   // If none can be found, returns false.
1135   bool GetNextAllocationBlock(size_t requested);
1136   // Compares the start addresses of two free blocks.
1137   static bool CompareFreeBlockAddress(const FreeBlock& left,
1138                                       const FreeBlock& right);
1139   bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1140   void ReleaseBlock(const FreeBlock* block);
1141 
1142   Isolate* isolate_;
1143 
1144   // The reserved range of virtual memory that all code objects are put in.
1145   VirtualMemory virtual_memory_;
1146 
1147   // The global mutex guards free_list_ and allocation_list_ as GC threads may
1148   // access both lists concurrently to the main thread.
1149   base::Mutex code_range_mutex_;
1150 
1151   // Freed blocks of memory are added to the free list.  When the allocation
1152   // list is exhausted, the free list is sorted and merged to make the new
1153   // allocation list.
1154   std::vector<FreeBlock> free_list_;
1155 
1156   // Memory is allocated from the free blocks on the allocation list.
1157   // The block at current_allocation_block_index_ is the current block.
1158   std::vector<FreeBlock> allocation_list_;
1159   size_t current_allocation_block_index_;
1160   size_t requested_code_range_size_;
1161 
1162   DISALLOW_COPY_AND_ASSIGN(CodeRange);
1163 };
1164 
1165 // The process-wide singleton that keeps track of code range regions with the
1166 // intention to reuse free code range regions as a workaround for CFG memory
1167 // leaks (see crbug.com/870054).
1168 class CodeRangeAddressHint {
1169  public:
1170   // Returns the most recently freed code range start address for the given
1171   // size. If there is no such entry, then a random address is returned.
1172   V8_EXPORT_PRIVATE void* GetAddressHint(size_t code_range_size);
1173 
1174   V8_EXPORT_PRIVATE void NotifyFreedCodeRange(void* code_range_start,
1175                                               size_t code_range_size);
1176 
1177  private:
1178   base::Mutex mutex_;
1179   // A map from code range size to an array of recently freed code range
1180   // addresses. There should be O(1) different code range sizes.
1181   // The length of each array is limited by the peak number of code ranges,
1182   // which should be also O(1).
1183   std::map<size_t, std::vector<void*>> recently_freed_;
1184 };
1185 
1186 class SkipList {
1187  public:
SkipList()1188   SkipList() { Clear(); }
1189 
Clear()1190   void Clear() {
1191     for (int idx = 0; idx < kSize; idx++) {
1192       starts_[idx] = static_cast<Address>(-1);
1193     }
1194   }
1195 
StartFor(Address addr)1196   Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1197 
AddObject(Address addr,int size)1198   void AddObject(Address addr, int size) {
1199     int start_region = RegionNumber(addr);
1200     int end_region = RegionNumber(addr + size - kPointerSize);
1201     for (int idx = start_region; idx <= end_region; idx++) {
1202       if (starts_[idx] > addr) {
1203         starts_[idx] = addr;
1204       } else {
1205         // In the first region, there may already be an object closer to the
1206         // start of the region. Do not change the start in that case. If this
1207         // is not the first region, you probably added overlapping objects.
1208         DCHECK_EQ(start_region, idx);
1209       }
1210     }
1211   }
1212 
RegionNumber(Address addr)1213   static inline int RegionNumber(Address addr) {
1214     return (OffsetFrom(addr) & kPageAlignmentMask) >> kRegionSizeLog2;
1215   }
1216 
Update(Address addr,int size)1217   static void Update(Address addr, int size) {
1218     Page* page = Page::FromAddress(addr);
1219     SkipList* list = page->skip_list();
1220     if (list == nullptr) {
1221       list = new SkipList();
1222       page->set_skip_list(list);
1223     }
1224 
1225     list->AddObject(addr, size);
1226   }
1227 
1228  private:
1229   static const int kRegionSizeLog2 = 13;
1230   static const int kRegionSize = 1 << kRegionSizeLog2;
1231   static const int kSize = Page::kPageSize / kRegionSize;
1232 
1233   STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1234 
1235   Address starts_[kSize];
1236 };
1237 
1238 
1239 // ----------------------------------------------------------------------------
1240 // A space acquires chunks of memory from the operating system. The memory
1241 // allocator allocates and deallocates pages for the paged heap spaces and large
1242 // pages for large object space.
1243 class V8_EXPORT_PRIVATE MemoryAllocator {
1244  public:
1245   // Unmapper takes care of concurrently unmapping and uncommitting memory
1246   // chunks.
1247   class Unmapper {
1248    public:
1249     class UnmapFreeMemoryTask;
1250 
Unmapper(Heap * heap,MemoryAllocator * allocator)1251     Unmapper(Heap* heap, MemoryAllocator* allocator)
1252         : heap_(heap),
1253           allocator_(allocator),
1254           pending_unmapping_tasks_semaphore_(0),
1255           pending_unmapping_tasks_(0),
1256           active_unmapping_tasks_(0) {
1257       chunks_[kRegular].reserve(kReservedQueueingSlots);
1258       chunks_[kPooled].reserve(kReservedQueueingSlots);
1259     }
1260 
AddMemoryChunkSafe(MemoryChunk * chunk)1261     void AddMemoryChunkSafe(MemoryChunk* chunk) {
1262       if (chunk->IsPagedSpace() && chunk->executable() != EXECUTABLE) {
1263         AddMemoryChunkSafe<kRegular>(chunk);
1264       } else {
1265         AddMemoryChunkSafe<kNonRegular>(chunk);
1266       }
1267     }
1268 
TryGetPooledMemoryChunkSafe()1269     MemoryChunk* TryGetPooledMemoryChunkSafe() {
1270       // Procedure:
1271       // (1) Try to get a chunk that was declared as pooled and already has
1272       // been uncommitted.
1273       // (2) Try to steal any memory chunk of kPageSize that would've been
1274       // unmapped.
1275       MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1276       if (chunk == nullptr) {
1277         chunk = GetMemoryChunkSafe<kRegular>();
1278         if (chunk != nullptr) {
1279           // For stolen chunks we need to manually free any allocated memory.
1280           chunk->ReleaseAllocatedMemory();
1281         }
1282       }
1283       return chunk;
1284     }
1285 
1286     void FreeQueuedChunks();
1287     void CancelAndWaitForPendingTasks();
1288     void PrepareForMarkCompact();
1289     void EnsureUnmappingCompleted();
1290     void TearDown();
1291     int NumberOfChunks();
1292     size_t CommittedBufferedMemory();
1293 
1294    private:
1295     static const int kReservedQueueingSlots = 64;
1296     static const int kMaxUnmapperTasks = 4;
1297 
1298     enum ChunkQueueType {
1299       kRegular,     // Pages of kPageSize that do not live in a CodeRange and
1300                     // can thus be used for stealing.
1301       kNonRegular,  // Large chunks and executable chunks.
1302       kPooled,      // Pooled chunks, already uncommited and ready for reuse.
1303       kNumberOfChunkQueues,
1304     };
1305 
1306     enum class FreeMode {
1307       kUncommitPooled,
1308       kReleasePooled,
1309     };
1310 
1311     template <ChunkQueueType type>
AddMemoryChunkSafe(MemoryChunk * chunk)1312     void AddMemoryChunkSafe(MemoryChunk* chunk) {
1313       base::LockGuard<base::Mutex> guard(&mutex_);
1314       chunks_[type].push_back(chunk);
1315     }
1316 
1317     template <ChunkQueueType type>
GetMemoryChunkSafe()1318     MemoryChunk* GetMemoryChunkSafe() {
1319       base::LockGuard<base::Mutex> guard(&mutex_);
1320       if (chunks_[type].empty()) return nullptr;
1321       MemoryChunk* chunk = chunks_[type].back();
1322       chunks_[type].pop_back();
1323       return chunk;
1324     }
1325 
1326     bool MakeRoomForNewTasks();
1327 
1328     template <FreeMode mode>
1329     void PerformFreeMemoryOnQueuedChunks();
1330 
1331     void PerformFreeMemoryOnQueuedNonRegularChunks();
1332 
1333     Heap* const heap_;
1334     MemoryAllocator* const allocator_;
1335     base::Mutex mutex_;
1336     std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1337     CancelableTaskManager::Id task_ids_[kMaxUnmapperTasks];
1338     base::Semaphore pending_unmapping_tasks_semaphore_;
1339     intptr_t pending_unmapping_tasks_;
1340     std::atomic<intptr_t> active_unmapping_tasks_;
1341 
1342     friend class MemoryAllocator;
1343   };
1344 
1345   enum AllocationMode {
1346     kRegular,
1347     kPooled,
1348   };
1349 
1350   enum FreeMode {
1351     kFull,
1352     kAlreadyPooled,
1353     kPreFreeAndQueue,
1354     kPooledAndQueue,
1355   };
1356 
1357   static size_t CodePageGuardStartOffset();
1358 
1359   static size_t CodePageGuardSize();
1360 
1361   static size_t CodePageAreaStartOffset();
1362 
1363   static size_t CodePageAreaEndOffset();
1364 
CodePageAreaSize()1365   static size_t CodePageAreaSize() {
1366     return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1367   }
1368 
PageAreaSize(AllocationSpace space)1369   static size_t PageAreaSize(AllocationSpace space) {
1370     DCHECK_NE(LO_SPACE, space);
1371     return (space == CODE_SPACE) ? CodePageAreaSize()
1372                                  : Page::kAllocatableMemory;
1373   }
1374 
1375   static intptr_t GetCommitPageSize();
1376 
1377   MemoryAllocator(Isolate* isolate, size_t max_capacity,
1378                   size_t code_range_size);
1379 
1380   void TearDown();
1381 
1382   // Allocates a Page from the allocator. AllocationMode is used to indicate
1383   // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1384   // should be tried first.
1385   template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1386             typename SpaceType>
1387   Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
1388 
1389   LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
1390                                Executability executable);
1391 
1392   template <MemoryAllocator::FreeMode mode = kFull>
1393   void Free(MemoryChunk* chunk);
1394 
1395   // Returns allocated spaces in bytes.
Size()1396   size_t Size() { return size_; }
1397 
1398   // Returns allocated executable spaces in bytes.
SizeExecutable()1399   size_t SizeExecutable() { return size_executable_; }
1400 
1401   // Returns the maximum available bytes of heaps.
Available()1402   size_t Available() {
1403     const size_t size = Size();
1404     return capacity_ < size ? 0 : capacity_ - size;
1405   }
1406 
1407   // Returns maximum available bytes that the old space can have.
MaxAvailable()1408   size_t MaxAvailable() {
1409     return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
1410   }
1411 
1412   // Returns an indication of whether a pointer is in a space that has
1413   // been allocated by this MemoryAllocator.
IsOutsideAllocatedSpace(Address address)1414   V8_INLINE bool IsOutsideAllocatedSpace(Address address) {
1415     return address < lowest_ever_allocated_ ||
1416            address >= highest_ever_allocated_;
1417   }
1418 
1419   // Returns a MemoryChunk in which the memory region from commit_area_size to
1420   // reserve_area_size of the chunk area is reserved but not committed, it
1421   // could be committed later by calling MemoryChunk::CommitArea.
1422   MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
1423                              Executability executable, Space* space);
1424 
1425   Address ReserveAlignedMemory(size_t requested, size_t alignment, void* hint,
1426                                VirtualMemory* controller);
1427   Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1428                                 size_t alignment, Executability executable,
1429                                 void* hint, VirtualMemory* controller);
1430 
1431   bool CommitMemory(Address addr, size_t size);
1432 
1433   void FreeMemory(VirtualMemory* reservation, Executability executable);
1434   void FreeMemory(Address addr, size_t size, Executability executable);
1435 
1436   // Partially release |bytes_to_free| bytes starting at |start_free|. Note that
1437   // internally memory is freed from |start_free| to the end of the reservation.
1438   // Additional memory beyond the page is not accounted though, so
1439   // |bytes_to_free| is computed by the caller.
1440   void PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1441                          size_t bytes_to_free, Address new_area_end);
1442 
1443   // Commit a contiguous block of memory from the initial chunk.  Assumes that
1444   // the address is not kNullAddress, the size is greater than zero, and that
1445   // the block is contained in the initial chunk.  Returns true if it succeeded
1446   // and false otherwise.
1447   bool CommitBlock(Address start, size_t size);
1448 
1449   // Checks if an allocated MemoryChunk was intended to be used for executable
1450   // memory.
IsMemoryChunkExecutable(MemoryChunk * chunk)1451   bool IsMemoryChunkExecutable(MemoryChunk* chunk) {
1452     return executable_memory_.find(chunk) != executable_memory_.end();
1453   }
1454 
1455   // Uncommit a contiguous block of memory [start..(start+size)[.
1456   // start is not kNullAddress, the size is greater than zero, and the
1457   // block is contained in the initial chunk.  Returns true if it succeeded
1458   // and false otherwise.
1459   bool UncommitBlock(Address start, size_t size);
1460 
1461   // Zaps a contiguous block of memory [start..(start+size)[ with
1462   // a given zap value.
1463   void ZapBlock(Address start, size_t size, uintptr_t zap_value);
1464 
1465   V8_WARN_UNUSED_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
1466                                                     Address start,
1467                                                     size_t commit_size,
1468                                                     size_t reserved_size);
1469 
code_range()1470   CodeRange* code_range() { return code_range_; }
unmapper()1471   Unmapper* unmapper() { return &unmapper_; }
1472 
1473  private:
1474   // PreFree logically frees the object, i.e., it takes care of the size
1475   // bookkeeping and calls the allocation callback.
1476   void PreFreeMemory(MemoryChunk* chunk);
1477 
1478   // FreeMemory can be called concurrently when PreFree was executed before.
1479   void PerformFreeMemory(MemoryChunk* chunk);
1480 
1481   // See AllocatePage for public interface. Note that currently we only support
1482   // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1483   template <typename SpaceType>
1484   MemoryChunk* AllocatePagePooled(SpaceType* owner);
1485 
1486   // Initializes pages in a chunk. Returns the first page address.
1487   // This function and GetChunkId() are provided for the mark-compact
1488   // collector to rebuild page headers in the from space, which is
1489   // used as a marking stack and its page headers are destroyed.
1490   Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1491                                PagedSpace* owner);
1492 
UpdateAllocatedSpaceLimits(Address low,Address high)1493   void UpdateAllocatedSpaceLimits(Address low, Address high) {
1494     // The use of atomic primitives does not guarantee correctness (wrt.
1495     // desired semantics) by default. The loop here ensures that we update the
1496     // values only if they did not change in between.
1497     Address ptr = kNullAddress;
1498     do {
1499       ptr = lowest_ever_allocated_;
1500     } while ((low < ptr) &&
1501              !lowest_ever_allocated_.compare_exchange_weak(ptr, low));
1502     do {
1503       ptr = highest_ever_allocated_;
1504     } while ((high > ptr) &&
1505              !highest_ever_allocated_.compare_exchange_weak(ptr, high));
1506   }
1507 
RegisterExecutableMemoryChunk(MemoryChunk * chunk)1508   void RegisterExecutableMemoryChunk(MemoryChunk* chunk) {
1509     DCHECK(chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE));
1510     DCHECK_EQ(executable_memory_.find(chunk), executable_memory_.end());
1511     executable_memory_.insert(chunk);
1512   }
1513 
UnregisterExecutableMemoryChunk(MemoryChunk * chunk)1514   void UnregisterExecutableMemoryChunk(MemoryChunk* chunk) {
1515     DCHECK_NE(executable_memory_.find(chunk), executable_memory_.end());
1516     executable_memory_.erase(chunk);
1517     chunk->heap()->UnregisterUnprotectedMemoryChunk(chunk);
1518   }
1519 
1520   Isolate* isolate_;
1521   CodeRange* code_range_;
1522 
1523   // Maximum space size in bytes.
1524   size_t capacity_;
1525 
1526   // Allocated space size in bytes.
1527   std::atomic<size_t> size_;
1528   // Allocated executable space size in bytes.
1529   std::atomic<size_t> size_executable_;
1530 
1531   // We keep the lowest and highest addresses allocated as a quick way
1532   // of determining that pointers are outside the heap. The estimate is
1533   // conservative, i.e. not all addresses in 'allocated' space are allocated
1534   // to our heap. The range is [lowest, highest[, inclusive on the low end
1535   // and exclusive on the high end.
1536   std::atomic<Address> lowest_ever_allocated_;
1537   std::atomic<Address> highest_ever_allocated_;
1538 
1539   VirtualMemory last_chunk_;
1540   Unmapper unmapper_;
1541 
1542   // Data structure to remember allocated executable memory chunks.
1543   std::unordered_set<MemoryChunk*> executable_memory_;
1544 
1545   friend class heap::TestCodeRangeScope;
1546 
1547   DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1548 };
1549 
1550 extern template Page*
1551 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1552     size_t size, PagedSpace* owner, Executability executable);
1553 extern template Page*
1554 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1555     size_t size, SemiSpace* owner, Executability executable);
1556 extern template Page*
1557 MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1558     size_t size, SemiSpace* owner, Executability executable);
1559 
1560 // -----------------------------------------------------------------------------
1561 // Interface for heap object iterator to be implemented by all object space
1562 // object iterators.
1563 //
1564 // NOTE: The space specific object iterators also implements the own next()
1565 //       method which is used to avoid using virtual functions
1566 //       iterating a specific space.
1567 
1568 class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
1569  public:
~ObjectIterator()1570   virtual ~ObjectIterator() {}
1571   virtual HeapObject* Next() = 0;
1572 };
1573 
1574 template <class PAGE_TYPE>
1575 class PageIteratorImpl
1576     : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
1577  public:
PageIteratorImpl(PAGE_TYPE * p)1578   explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE> & other)1579   PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
1580   PAGE_TYPE* operator*() { return p_; }
1581   bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1582     return rhs.p_ == p_;
1583   }
1584   bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1585     return rhs.p_ != p_;
1586   }
1587   inline PageIteratorImpl<PAGE_TYPE>& operator++();
1588   inline PageIteratorImpl<PAGE_TYPE> operator++(int);
1589 
1590  private:
1591   PAGE_TYPE* p_;
1592 };
1593 
1594 typedef PageIteratorImpl<Page> PageIterator;
1595 typedef PageIteratorImpl<LargePage> LargePageIterator;
1596 
1597 class PageRange {
1598  public:
1599   typedef PageIterator iterator;
PageRange(Page * begin,Page * end)1600   PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
PageRange(Page * page)1601   explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
1602   inline PageRange(Address start, Address limit);
1603 
begin()1604   iterator begin() { return iterator(begin_); }
end()1605   iterator end() { return iterator(end_); }
1606 
1607  private:
1608   Page* begin_;
1609   Page* end_;
1610 };
1611 
1612 // -----------------------------------------------------------------------------
1613 // Heap object iterator in new/old/map spaces.
1614 //
1615 // A HeapObjectIterator iterates objects from the bottom of the given space
1616 // to its top or from the bottom of the given page to its top.
1617 //
1618 // If objects are allocated in the page during iteration the iterator may
1619 // or may not iterate over those objects.  The caller must create a new
1620 // iterator in order to be sure to visit these new objects.
1621 class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
1622  public:
1623   // Creates a new object iterator in a given space.
1624   explicit HeapObjectIterator(PagedSpace* space);
1625   explicit HeapObjectIterator(Page* page);
1626 
1627   // Advance to the next object, skipping free spaces and other fillers and
1628   // skipping the special garbage section of which there is one per space.
1629   // Returns nullptr when the iteration has ended.
1630   inline HeapObject* Next() override;
1631 
1632  private:
1633   // Fast (inlined) path of next().
1634   inline HeapObject* FromCurrentPage();
1635 
1636   // Slow path of next(), goes into the next page.  Returns false if the
1637   // iteration has ended.
1638   bool AdvanceToNextPage();
1639 
1640   Address cur_addr_;  // Current iteration point.
1641   Address cur_end_;   // End iteration point.
1642   PagedSpace* space_;
1643   PageRange page_range_;
1644   PageRange::iterator current_page_;
1645 };
1646 
1647 
1648 // -----------------------------------------------------------------------------
1649 // A space has a circular list of pages. The next page can be accessed via
1650 // Page::next_page() call.
1651 
1652 // An abstraction of allocation and relocation pointers in a page-structured
1653 // space.
1654 class LinearAllocationArea {
1655  public:
LinearAllocationArea()1656   LinearAllocationArea() : top_(kNullAddress), limit_(kNullAddress) {}
LinearAllocationArea(Address top,Address limit)1657   LinearAllocationArea(Address top, Address limit) : top_(top), limit_(limit) {}
1658 
Reset(Address top,Address limit)1659   void Reset(Address top, Address limit) {
1660     set_top(top);
1661     set_limit(limit);
1662   }
1663 
set_top(Address top)1664   V8_INLINE void set_top(Address top) {
1665     SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
1666     top_ = top;
1667   }
1668 
top()1669   V8_INLINE Address top() const {
1670     SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
1671     return top_;
1672   }
1673 
top_address()1674   Address* top_address() { return &top_; }
1675 
set_limit(Address limit)1676   V8_INLINE void set_limit(Address limit) { limit_ = limit; }
1677 
limit()1678   V8_INLINE Address limit() const { return limit_; }
1679 
limit_address()1680   Address* limit_address() { return &limit_; }
1681 
1682 #ifdef DEBUG
VerifyPagedAllocation()1683   bool VerifyPagedAllocation() {
1684     return (Page::FromAllocationAreaAddress(top_) ==
1685             Page::FromAllocationAreaAddress(limit_)) &&
1686            (top_ <= limit_);
1687   }
1688 #endif
1689 
1690  private:
1691   // Current allocation top.
1692   Address top_;
1693   // Current allocation limit.
1694   Address limit_;
1695 };
1696 
1697 
1698 // An abstraction of the accounting statistics of a page-structured space.
1699 //
1700 // The stats are only set by functions that ensure they stay balanced. These
1701 // functions increase or decrease one of the non-capacity stats in conjunction
1702 // with capacity, or else they always balance increases and decreases to the
1703 // non-capacity stats.
1704 class AllocationStats BASE_EMBEDDED {
1705  public:
AllocationStats()1706   AllocationStats() { Clear(); }
1707 
1708   // Zero out all the allocation statistics (i.e., no capacity).
Clear()1709   void Clear() {
1710     capacity_ = 0;
1711     max_capacity_ = 0;
1712     ClearSize();
1713   }
1714 
ClearSize()1715   void ClearSize() {
1716     size_ = 0;
1717 #ifdef DEBUG
1718     allocated_on_page_.clear();
1719 #endif
1720   }
1721 
1722   // Accessors for the allocation statistics.
Capacity()1723   size_t Capacity() { return capacity_; }
MaxCapacity()1724   size_t MaxCapacity() { return max_capacity_; }
Size()1725   size_t Size() { return size_; }
1726 #ifdef DEBUG
AllocatedOnPage(Page * page)1727   size_t AllocatedOnPage(Page* page) { return allocated_on_page_[page]; }
1728 #endif
1729 
IncreaseAllocatedBytes(size_t bytes,Page * page)1730   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
1731     DCHECK_GE(size_ + bytes, size_);
1732     size_ += bytes;
1733 #ifdef DEBUG
1734     allocated_on_page_[page] += bytes;
1735 #endif
1736   }
1737 
DecreaseAllocatedBytes(size_t bytes,Page * page)1738   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
1739     DCHECK_GE(size_, bytes);
1740     size_ -= bytes;
1741 #ifdef DEBUG
1742     DCHECK_GE(allocated_on_page_[page], bytes);
1743     allocated_on_page_[page] -= bytes;
1744 #endif
1745   }
1746 
DecreaseCapacity(size_t bytes)1747   void DecreaseCapacity(size_t bytes) {
1748     DCHECK_GE(capacity_, bytes);
1749     DCHECK_GE(capacity_ - bytes, size_);
1750     capacity_ -= bytes;
1751   }
1752 
IncreaseCapacity(size_t bytes)1753   void IncreaseCapacity(size_t bytes) {
1754     DCHECK_GE(capacity_ + bytes, capacity_);
1755     capacity_ += bytes;
1756     if (capacity_ > max_capacity_) {
1757       max_capacity_ = capacity_;
1758     }
1759   }
1760 
1761  private:
1762   // |capacity_|: The number of object-area bytes (i.e., not including page
1763   // bookkeeping structures) currently in the space.
1764   // During evacuation capacity of the main spaces is accessed from multiple
1765   // threads to check the old generation hard limit.
1766   std::atomic<size_t> capacity_;
1767 
1768   // |max_capacity_|: The maximum capacity ever observed.
1769   size_t max_capacity_;
1770 
1771   // |size_|: The number of allocated bytes.
1772   size_t size_;
1773 
1774 #ifdef DEBUG
1775   std::unordered_map<Page*, size_t, Page::Hasher> allocated_on_page_;
1776 #endif
1777 };
1778 
1779 // A free list maintaining free blocks of memory. The free list is organized in
1780 // a way to encourage objects allocated around the same time to be near each
1781 // other. The normal way to allocate is intended to be by bumping a 'top'
1782 // pointer until it hits a 'limit' pointer.  When the limit is hit we need to
1783 // find a new space to allocate from. This is done with the free list, which is
1784 // divided up into rough categories to cut down on waste. Having finer
1785 // categories would scatter allocation more.
1786 
1787 // The free list is organized in categories as follows:
1788 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1789 //   allocation, when categories >= small do not have entries anymore.
1790 // 11-31 words (tiny): The tiny blocks are only used for allocation, when
1791 //   categories >= small do not have entries anymore.
1792 // 32-255 words (small): Used for allocating free space between 1-31 words in
1793 //   size.
1794 // 256-2047 words (medium): Used for allocating free space between 32-255 words
1795 //   in size.
1796 // 1048-16383 words (large): Used for allocating free space between 256-2047
1797 //   words in size.
1798 // At least 16384 words (huge): This list is for objects of 2048 words or
1799 //   larger. Empty pages are also added to this list.
1800 class V8_EXPORT_PRIVATE FreeList {
1801  public:
1802   // This method returns how much memory can be allocated after freeing
1803   // maximum_freed memory.
GuaranteedAllocatable(size_t maximum_freed)1804   static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
1805     if (maximum_freed <= kTiniestListMax) {
1806       // Since we are not iterating over all list entries, we cannot guarantee
1807       // that we can find the maximum freed block in that free list.
1808       return 0;
1809     } else if (maximum_freed <= kTinyListMax) {
1810       return kTinyAllocationMax;
1811     } else if (maximum_freed <= kSmallListMax) {
1812       return kSmallAllocationMax;
1813     } else if (maximum_freed <= kMediumListMax) {
1814       return kMediumAllocationMax;
1815     } else if (maximum_freed <= kLargeListMax) {
1816       return kLargeAllocationMax;
1817     }
1818     return maximum_freed;
1819   }
1820 
SelectFreeListCategoryType(size_t size_in_bytes)1821   static FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1822     if (size_in_bytes <= kTiniestListMax) {
1823       return kTiniest;
1824     } else if (size_in_bytes <= kTinyListMax) {
1825       return kTiny;
1826     } else if (size_in_bytes <= kSmallListMax) {
1827       return kSmall;
1828     } else if (size_in_bytes <= kMediumListMax) {
1829       return kMedium;
1830     } else if (size_in_bytes <= kLargeListMax) {
1831       return kLarge;
1832     }
1833     return kHuge;
1834   }
1835 
1836   FreeList();
1837 
1838   // Adds a node on the free list. The block of size {size_in_bytes} starting
1839   // at {start} is placed on the free list. The return value is the number of
1840   // bytes that were not added to the free list, because they freed memory block
1841   // was too small. Bookkeeping information will be written to the block, i.e.,
1842   // its contents will be destroyed. The start address should be word aligned,
1843   // and the size should be a non-zero multiple of the word size.
1844   size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
1845 
1846   // Allocates a free space node frome the free list of at least size_in_bytes
1847   // bytes. Returns the actual node size in node_size which can be bigger than
1848   // size_in_bytes. This method returns null if the allocation request cannot be
1849   // handled by the free list.
1850   V8_WARN_UNUSED_RESULT FreeSpace* Allocate(size_t size_in_bytes,
1851                                             size_t* node_size);
1852 
1853   // Clear the free list.
1854   void Reset();
1855 
ResetStats()1856   void ResetStats() {
1857     wasted_bytes_ = 0;
1858     ForAllFreeListCategories(
1859         [](FreeListCategory* category) { category->ResetStats(); });
1860   }
1861 
1862   // Return the number of bytes available on the free list.
Available()1863   size_t Available() {
1864     size_t available = 0;
1865     ForAllFreeListCategories([&available](FreeListCategory* category) {
1866       available += category->available();
1867     });
1868     return available;
1869   }
1870 
IsEmpty()1871   bool IsEmpty() {
1872     bool empty = true;
1873     ForAllFreeListCategories([&empty](FreeListCategory* category) {
1874       if (!category->is_empty()) empty = false;
1875     });
1876     return empty;
1877   }
1878 
1879   // Used after booting the VM.
1880   void RepairLists(Heap* heap);
1881 
1882   size_t EvictFreeListItems(Page* page);
1883   bool ContainsPageFreeListItems(Page* page);
1884 
wasted_bytes()1885   size_t wasted_bytes() { return wasted_bytes_; }
1886 
1887   template <typename Callback>
ForAllFreeListCategories(FreeListCategoryType type,Callback callback)1888   void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1889     FreeListCategory* current = categories_[type];
1890     while (current != nullptr) {
1891       FreeListCategory* next = current->next();
1892       callback(current);
1893       current = next;
1894     }
1895   }
1896 
1897   template <typename Callback>
ForAllFreeListCategories(Callback callback)1898   void ForAllFreeListCategories(Callback callback) {
1899     for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1900       ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1901     }
1902   }
1903 
1904   bool AddCategory(FreeListCategory* category);
1905   void RemoveCategory(FreeListCategory* category);
1906   void PrintCategories(FreeListCategoryType type);
1907 
1908   // Returns a page containing an entry for a given type, or nullptr otherwise.
1909   inline Page* GetPageForCategoryType(FreeListCategoryType type);
1910 
1911 #ifdef DEBUG
1912   size_t SumFreeLists();
1913   bool IsVeryLong();
1914 #endif
1915 
1916  private:
1917   class FreeListCategoryIterator {
1918    public:
FreeListCategoryIterator(FreeList * free_list,FreeListCategoryType type)1919     FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1920         : current_(free_list->categories_[type]) {}
1921 
HasNext()1922     bool HasNext() { return current_ != nullptr; }
1923 
Next()1924     FreeListCategory* Next() {
1925       DCHECK(HasNext());
1926       FreeListCategory* tmp = current_;
1927       current_ = current_->next();
1928       return tmp;
1929     }
1930 
1931    private:
1932     FreeListCategory* current_;
1933   };
1934 
1935   // The size range of blocks, in bytes.
1936   static const size_t kMinBlockSize = 3 * kPointerSize;
1937   static const size_t kMaxBlockSize = Page::kAllocatableMemory;
1938 
1939   static const size_t kTiniestListMax = 0xa * kPointerSize;
1940   static const size_t kTinyListMax = 0x1f * kPointerSize;
1941   static const size_t kSmallListMax = 0xff * kPointerSize;
1942   static const size_t kMediumListMax = 0x7ff * kPointerSize;
1943   static const size_t kLargeListMax = 0x3fff * kPointerSize;
1944   static const size_t kTinyAllocationMax = kTiniestListMax;
1945   static const size_t kSmallAllocationMax = kTinyListMax;
1946   static const size_t kMediumAllocationMax = kSmallListMax;
1947   static const size_t kLargeAllocationMax = kMediumListMax;
1948 
1949   // Walks all available categories for a given |type| and tries to retrieve
1950   // a node. Returns nullptr if the category is empty.
1951   FreeSpace* FindNodeIn(FreeListCategoryType type, size_t minimum_size,
1952                         size_t* node_size);
1953 
1954   // Tries to retrieve a node from the first category in a given |type|.
1955   // Returns nullptr if the category is empty or the top entry is smaller
1956   // than minimum_size.
1957   FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t minimum_size,
1958                            size_t* node_size);
1959 
1960   // Searches a given |type| for a node of at least |minimum_size|.
1961   FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
1962                                  size_t minimum_size);
1963 
1964   // The tiny categories are not used for fast allocation.
SelectFastAllocationFreeListCategoryType(size_t size_in_bytes)1965   FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1966       size_t size_in_bytes) {
1967     if (size_in_bytes <= kSmallAllocationMax) {
1968       return kSmall;
1969     } else if (size_in_bytes <= kMediumAllocationMax) {
1970       return kMedium;
1971     } else if (size_in_bytes <= kLargeAllocationMax) {
1972       return kLarge;
1973     }
1974     return kHuge;
1975   }
1976 
top(FreeListCategoryType type)1977   FreeListCategory* top(FreeListCategoryType type) const {
1978     return categories_[type];
1979   }
1980 
1981   std::atomic<size_t> wasted_bytes_;
1982   FreeListCategory* categories_[kNumberOfCategories];
1983 
1984   friend class FreeListCategory;
1985 };
1986 
1987 // LocalAllocationBuffer represents a linear allocation area that is created
1988 // from a given {AllocationResult} and can be used to allocate memory without
1989 // synchronization.
1990 //
1991 // The buffer is properly closed upon destruction and reassignment.
1992 // Example:
1993 //   {
1994 //     AllocationResult result = ...;
1995 //     LocalAllocationBuffer a(heap, result, size);
1996 //     LocalAllocationBuffer b = a;
1997 //     CHECK(!a.IsValid());
1998 //     CHECK(b.IsValid());
1999 //     // {a} is invalid now and cannot be used for further allocations.
2000 //   }
2001 //   // Since {b} went out of scope, the LAB is closed, resulting in creating a
2002 //   // filler object for the remaining area.
2003 class LocalAllocationBuffer {
2004  public:
2005   // Indicates that a buffer cannot be used for allocations anymore. Can result
2006   // from either reassigning a buffer, or trying to construct it from an
2007   // invalid {AllocationResult}.
InvalidBuffer()2008   static LocalAllocationBuffer InvalidBuffer() {
2009     return LocalAllocationBuffer(
2010         nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
2011   }
2012 
2013   // Creates a new LAB from a given {AllocationResult}. Results in
2014   // InvalidBuffer if the result indicates a retry.
2015   static inline LocalAllocationBuffer FromResult(Heap* heap,
2016                                                  AllocationResult result,
2017                                                  intptr_t size);
2018 
~LocalAllocationBuffer()2019   ~LocalAllocationBuffer() { Close(); }
2020 
2021   // Convert to C++11 move-semantics once allowed by the style guide.
2022   LocalAllocationBuffer(const LocalAllocationBuffer& other);
2023   LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
2024 
2025   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
2026       int size_in_bytes, AllocationAlignment alignment);
2027 
IsValid()2028   inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
2029 
2030   // Try to merge LABs, which is only possible when they are adjacent in memory.
2031   // Returns true if the merge was successful, false otherwise.
2032   inline bool TryMerge(LocalAllocationBuffer* other);
2033 
2034   inline bool TryFreeLast(HeapObject* object, int object_size);
2035 
2036   // Close a LAB, effectively invalidating it. Returns the unused area.
2037   LinearAllocationArea Close();
2038 
2039  private:
2040   LocalAllocationBuffer(Heap* heap, LinearAllocationArea allocation_info);
2041 
2042   Heap* heap_;
2043   LinearAllocationArea allocation_info_;
2044 };
2045 
2046 class SpaceWithLinearArea : public Space {
2047  public:
SpaceWithLinearArea(Heap * heap,AllocationSpace id)2048   SpaceWithLinearArea(Heap* heap, AllocationSpace id)
2049       : Space(heap, id), top_on_previous_step_(0) {
2050     allocation_info_.Reset(kNullAddress, kNullAddress);
2051   }
2052 
2053   virtual bool SupportsInlineAllocation() = 0;
2054 
2055   // Returns the allocation pointer in this space.
top()2056   Address top() { return allocation_info_.top(); }
limit()2057   Address limit() { return allocation_info_.limit(); }
2058 
2059   // The allocation top address.
allocation_top_address()2060   Address* allocation_top_address() { return allocation_info_.top_address(); }
2061 
2062   // The allocation limit address.
allocation_limit_address()2063   Address* allocation_limit_address() {
2064     return allocation_info_.limit_address();
2065   }
2066 
2067   V8_EXPORT_PRIVATE void AddAllocationObserver(
2068       AllocationObserver* observer) override;
2069   V8_EXPORT_PRIVATE void RemoveAllocationObserver(
2070       AllocationObserver* observer) override;
2071   V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
2072   V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
2073 
2074   // When allocation observers are active we may use a lower limit to allow the
2075   // observers to 'interrupt' earlier than the natural limit. Given a linear
2076   // area bounded by [start, end), this function computes the limit to use to
2077   // allow proper observation based on existing observers. min_size specifies
2078   // the minimum size that the limited area should have.
2079   Address ComputeLimit(Address start, Address end, size_t min_size);
2080   V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
2081       size_t min_size) = 0;
2082 
2083  protected:
2084   // If we are doing inline allocation in steps, this method performs the 'step'
2085   // operation. top is the memory address of the bump pointer at the last
2086   // inline allocation (i.e. it determines the numbers of bytes actually
2087   // allocated since the last step.) top_for_next_step is the address of the
2088   // bump pointer where the next byte is going to be allocated from. top and
2089   // top_for_next_step may be different when we cross a page boundary or reset
2090   // the space.
2091   // TODO(ofrobots): clarify the precise difference between this and
2092   // Space::AllocationStep.
2093   void InlineAllocationStep(Address top, Address top_for_next_step,
2094                             Address soon_object, size_t size);
2095   V8_EXPORT_PRIVATE void StartNextInlineAllocationStep() override;
2096 
2097   // TODO(ofrobots): make these private after refactoring is complete.
2098   LinearAllocationArea allocation_info_;
2099   Address top_on_previous_step_;
2100 };
2101 
2102 class V8_EXPORT_PRIVATE PagedSpace
NON_EXPORTED_BASE(public SpaceWithLinearArea)2103     : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
2104  public:
2105   typedef PageIterator iterator;
2106 
2107   static const size_t kCompactionMemoryWanted = 500 * KB;
2108 
2109   // Creates a space with an id.
2110   PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
2111 
2112   ~PagedSpace() override { TearDown(); }
2113 
2114   // Checks whether an object/address is in this space.
2115   inline bool Contains(Address a);
2116   inline bool Contains(Object* o);
2117   bool ContainsSlow(Address addr);
2118 
2119   // Does the space need executable memory?
2120   Executability executable() { return executable_; }
2121 
2122   // During boot the free_space_map is created, and afterwards we may need
2123   // to write it into the free list nodes that were already created.
2124   void RepairFreeListsAfterDeserialization();
2125 
2126   // Prepares for a mark-compact GC.
2127   void PrepareForMarkCompact();
2128 
2129   // Current capacity without growing (Size() + Available()).
2130   size_t Capacity() { return accounting_stats_.Capacity(); }
2131 
2132   // Approximate amount of physical memory committed for this space.
2133   size_t CommittedPhysicalMemory() override;
2134 
2135   void ResetFreeListStatistics();
2136 
2137   // Sets the capacity, the available space and the wasted space to zero.
2138   // The stats are rebuilt during sweeping by adding each page to the
2139   // capacity and the size when it is encountered.  As free spaces are
2140   // discovered during the sweeping they are subtracted from the size and added
2141   // to the available and wasted totals.
2142   void ClearStats() {
2143     accounting_stats_.ClearSize();
2144     free_list_.ResetStats();
2145     ResetFreeListStatistics();
2146   }
2147 
2148   // Available bytes without growing.  These are the bytes on the free list.
2149   // The bytes in the linear allocation area are not included in this total
2150   // because updating the stats would slow down allocation.  New pages are
2151   // immediately added to the free list so they show up here.
2152   size_t Available() override { return free_list_.Available(); }
2153 
2154   // Allocated bytes in this space.  Garbage bytes that were not found due to
2155   // concurrent sweeping are counted as being allocated!  The bytes in the
2156   // current linear allocation area (between top and limit) are also counted
2157   // here.
2158   size_t Size() override { return accounting_stats_.Size(); }
2159 
2160   // As size, but the bytes in lazily swept pages are estimated and the bytes
2161   // in the current linear allocation area are not included.
2162   size_t SizeOfObjects() override;
2163 
2164   // Wasted bytes in this space.  These are just the bytes that were thrown away
2165   // due to being too small to use for allocation.
2166   virtual size_t Waste() { return free_list_.wasted_bytes(); }
2167 
2168   enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
2169 
2170   // Allocate the requested number of bytes in the space if possible, return a
2171   // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
2172   // to be manually updated later.
2173   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
2174       int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
2175 
2176   // Allocate the requested number of bytes in the space double aligned if
2177   // possible, return a failure object if not.
2178   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
2179       int size_in_bytes, AllocationAlignment alignment);
2180 
2181   // Allocate the requested number of bytes in the space and consider allocation
2182   // alignment if needed.
2183   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
2184       int size_in_bytes, AllocationAlignment alignment);
2185 
2186   size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
2187     if (size_in_bytes == 0) return 0;
2188     heap()->CreateFillerObjectAt(start, static_cast<int>(size_in_bytes),
2189                                  ClearRecordedSlots::kNo);
2190     if (mode == SpaceAccountingMode::kSpaceAccounted) {
2191       return AccountedFree(start, size_in_bytes);
2192     } else {
2193       return UnaccountedFree(start, size_in_bytes);
2194     }
2195   }
2196 
2197   // Give a block of memory to the space's free list.  It might be added to
2198   // the free list or accounted as waste.
2199   // If add_to_freelist is false then just accounting stats are updated and
2200   // no attempt to add area to free list is made.
2201   size_t AccountedFree(Address start, size_t size_in_bytes) {
2202     size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2203     Page* page = Page::FromAddress(start);
2204     accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
2205     DCHECK_GE(size_in_bytes, wasted);
2206     return size_in_bytes - wasted;
2207   }
2208 
2209   size_t UnaccountedFree(Address start, size_t size_in_bytes) {
2210     size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2211     DCHECK_GE(size_in_bytes, wasted);
2212     return size_in_bytes - wasted;
2213   }
2214 
2215   inline bool TryFreeLast(HeapObject* object, int object_size);
2216 
2217   void ResetFreeList();
2218 
2219   // Empty space linear allocation area, returning unused area to free list.
2220   void FreeLinearAllocationArea();
2221 
2222   void MarkLinearAllocationAreaBlack();
2223   void UnmarkLinearAllocationArea();
2224 
2225   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
2226     accounting_stats_.DecreaseAllocatedBytes(bytes, page);
2227   }
2228   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
2229     accounting_stats_.IncreaseAllocatedBytes(bytes, page);
2230   }
2231   void DecreaseCapacity(size_t bytes) {
2232     accounting_stats_.DecreaseCapacity(bytes);
2233   }
2234   void IncreaseCapacity(size_t bytes) {
2235     accounting_stats_.IncreaseCapacity(bytes);
2236   }
2237 
2238   void RefineAllocatedBytesAfterSweeping(Page* page);
2239 
2240   Page* InitializePage(MemoryChunk* chunk, Executability executable);
2241 
2242   void ReleasePage(Page* page);
2243 
2244   // Adds the page to this space and returns the number of bytes added to the
2245   // free list of the space.
2246   size_t AddPage(Page* page);
2247   void RemovePage(Page* page);
2248   // Remove a page if it has at least |size_in_bytes| bytes available that can
2249   // be used for allocation.
2250   Page* RemovePageSafe(int size_in_bytes);
2251 
2252   void SetReadAndExecutable();
2253   void SetReadAndWritable();
2254 
2255 #ifdef VERIFY_HEAP
2256   // Verify integrity of this space.
2257   virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
2258 
2259   void VerifyLiveBytes();
2260 
2261   // Overridden by subclasses to verify space-specific object
2262   // properties (e.g., only maps or free-list nodes are in map space).
2263   virtual void VerifyObject(HeapObject* obj) {}
2264 #endif
2265 
2266 #ifdef DEBUG
2267   void VerifyCountersAfterSweeping();
2268   void VerifyCountersBeforeConcurrentSweeping();
2269   // Print meta info and objects in this space.
2270   void Print() override;
2271 
2272   // Report code object related statistics
2273   static void ReportCodeStatistics(Isolate* isolate);
2274   static void ResetCodeStatistics(Isolate* isolate);
2275 #endif
2276 
2277   bool CanExpand(size_t size);
2278 
2279   // Returns the number of total pages in this space.
2280   int CountTotalPages();
2281 
2282   // Return size of allocatable area on a page in this space.
2283   inline int AreaSize() { return static_cast<int>(area_size_); }
2284 
2285   virtual bool is_local() { return false; }
2286 
2287   // Merges {other} into the current space. Note that this modifies {other},
2288   // e.g., removes its bump pointer area and resets statistics.
2289   void MergeCompactionSpace(CompactionSpace* other);
2290 
2291   // Refills the free list from the corresponding free list filled by the
2292   // sweeper.
2293   virtual void RefillFreeList();
2294 
2295   FreeList* free_list() { return &free_list_; }
2296 
2297   base::Mutex* mutex() { return &space_mutex_; }
2298 
2299   inline void UnlinkFreeListCategories(Page* page);
2300   inline size_t RelinkFreeListCategories(Page* page);
2301 
2302   Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
2303 
2304   iterator begin() { return iterator(first_page()); }
2305   iterator end() { return iterator(nullptr); }
2306 
2307   // Shrink immortal immovable pages of the space to be exactly the size needed
2308   // using the high water mark.
2309   void ShrinkImmortalImmovablePages();
2310 
2311   size_t ShrinkPageToHighWaterMark(Page* page);
2312 
2313   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2314 
2315   void SetLinearAllocationArea(Address top, Address limit);
2316 
2317  private:
2318   // Set space linear allocation area.
2319   void SetTopAndLimit(Address top, Address limit) {
2320     DCHECK(top == limit ||
2321            Page::FromAddress(top) == Page::FromAddress(limit - 1));
2322     MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2323     allocation_info_.Reset(top, limit);
2324   }
2325   void DecreaseLimit(Address new_limit);
2326   void UpdateInlineAllocationLimit(size_t min_size) override;
2327   bool SupportsInlineAllocation() override {
2328     return identity() == OLD_SPACE && !is_local();
2329   }
2330 
2331  protected:
2332   // PagedSpaces that should be included in snapshots have different, i.e.,
2333   // smaller, initial pages.
2334   virtual bool snapshotable() { return true; }
2335 
2336   bool HasPages() { return first_page() != nullptr; }
2337 
2338   // Cleans up the space, frees all pages in this space except those belonging
2339   // to the initial chunk, uncommits addresses in the initial chunk.
2340   void TearDown();
2341 
2342   // Expands the space by allocating a fixed number of pages. Returns false if
2343   // it cannot allocate requested number of pages from OS, or if the hard heap
2344   // size limit has been hit.
2345   bool Expand();
2346 
2347   // Sets up a linear allocation area that fits the given number of bytes.
2348   // Returns false if there is not enough space and the caller has to retry
2349   // after collecting garbage.
2350   inline bool EnsureLinearAllocationArea(int size_in_bytes);
2351   // Allocates an object from the linear allocation area. Assumes that the
2352   // linear allocation area is large enought to fit the object.
2353   inline HeapObject* AllocateLinearly(int size_in_bytes);
2354   // Tries to allocate an aligned object from the linear allocation area.
2355   // Returns nullptr if the linear allocation area does not fit the object.
2356   // Otherwise, returns the object pointer and writes the allocation size
2357   // (object size + alignment filler size) to the size_in_bytes.
2358   inline HeapObject* TryAllocateLinearlyAligned(int* size_in_bytes,
2359                                                 AllocationAlignment alignment);
2360 
2361   V8_WARN_UNUSED_RESULT bool RefillLinearAllocationAreaFromFreeList(
2362       size_t size_in_bytes);
2363 
2364   // If sweeping is still in progress try to sweep unswept pages. If that is
2365   // not successful, wait for the sweeper threads and retry free-list
2366   // allocation. Returns false if there is not enough space and the caller
2367   // has to retry after collecting garbage.
2368   V8_WARN_UNUSED_RESULT virtual bool SweepAndRetryAllocation(int size_in_bytes);
2369 
2370   // Slow path of AllocateRaw. This function is space-dependent. Returns false
2371   // if there is not enough space and the caller has to retry after
2372   // collecting garbage.
2373   V8_WARN_UNUSED_RESULT virtual bool SlowRefillLinearAllocationArea(
2374       int size_in_bytes);
2375 
2376   // Implementation of SlowAllocateRaw. Returns false if there is not enough
2377   // space and the caller has to retry after collecting garbage.
2378   V8_WARN_UNUSED_RESULT bool RawSlowRefillLinearAllocationArea(
2379       int size_in_bytes);
2380 
2381   Executability executable_;
2382 
2383   size_t area_size_;
2384 
2385   // Accounting information for this space.
2386   AllocationStats accounting_stats_;
2387 
2388   // The space's free list.
2389   FreeList free_list_;
2390 
2391   // Mutex guarding any concurrent access to the space.
2392   base::Mutex space_mutex_;
2393 
2394   friend class IncrementalMarking;
2395   friend class MarkCompactCollector;
2396 
2397   // Used in cctest.
2398   friend class heap::HeapTester;
2399 };
2400 
2401 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2402 
2403 // -----------------------------------------------------------------------------
2404 // SemiSpace in young generation
2405 //
2406 // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2407 // The mark-compact collector  uses the memory of the first page in the from
2408 // space as a marking stack when tracing live objects.
2409 class SemiSpace : public Space {
2410  public:
2411   typedef PageIterator iterator;
2412 
2413   static void Swap(SemiSpace* from, SemiSpace* to);
2414 
SemiSpace(Heap * heap,SemiSpaceId semispace)2415   SemiSpace(Heap* heap, SemiSpaceId semispace)
2416       : Space(heap, NEW_SPACE),
2417         current_capacity_(0),
2418         maximum_capacity_(0),
2419         minimum_capacity_(0),
2420         age_mark_(kNullAddress),
2421         committed_(false),
2422         id_(semispace),
2423         current_page_(nullptr),
2424         pages_used_(0) {}
2425 
2426   inline bool Contains(HeapObject* o);
2427   inline bool Contains(Object* o);
2428   inline bool ContainsSlow(Address a);
2429 
2430   void SetUp(size_t initial_capacity, size_t maximum_capacity);
2431   void TearDown();
2432 
2433   bool Commit();
2434   bool Uncommit();
is_committed()2435   bool is_committed() { return committed_; }
2436 
2437   // Grow the semispace to the new capacity.  The new capacity requested must
2438   // be larger than the current capacity and less than the maximum capacity.
2439   bool GrowTo(size_t new_capacity);
2440 
2441   // Shrinks the semispace to the new capacity.  The new capacity requested
2442   // must be more than the amount of used memory in the semispace and less
2443   // than the current capacity.
2444   bool ShrinkTo(size_t new_capacity);
2445 
2446   bool EnsureCurrentCapacity();
2447 
space_end()2448   Address space_end() { return memory_chunk_list_.back()->area_end(); }
2449 
2450   // Returns the start address of the first page of the space.
space_start()2451   Address space_start() {
2452     DCHECK_NE(memory_chunk_list_.front(), nullptr);
2453     return memory_chunk_list_.front()->area_start();
2454   }
2455 
current_page()2456   Page* current_page() { return current_page_; }
pages_used()2457   int pages_used() { return pages_used_; }
2458 
2459   // Returns the start address of the current page of the space.
page_low()2460   Address page_low() { return current_page_->area_start(); }
2461 
2462   // Returns one past the end address of the current page of the space.
page_high()2463   Address page_high() { return current_page_->area_end(); }
2464 
AdvancePage()2465   bool AdvancePage() {
2466     Page* next_page = current_page_->next_page();
2467     // We cannot expand if we reached the maximum number of pages already. Note
2468     // that we need to account for the next page already for this check as we
2469     // could potentially fill the whole page after advancing.
2470     const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2471     if (next_page == nullptr || reached_max_pages) {
2472       return false;
2473     }
2474     current_page_ = next_page;
2475     pages_used_++;
2476     return true;
2477   }
2478 
2479   // Resets the space to using the first page.
2480   void Reset();
2481 
2482   void RemovePage(Page* page);
2483   void PrependPage(Page* page);
2484 
2485   Page* InitializePage(MemoryChunk* chunk, Executability executable);
2486 
2487   // Age mark accessors.
age_mark()2488   Address age_mark() { return age_mark_; }
2489   void set_age_mark(Address mark);
2490 
2491   // Returns the current capacity of the semispace.
current_capacity()2492   size_t current_capacity() { return current_capacity_; }
2493 
2494   // Returns the maximum capacity of the semispace.
maximum_capacity()2495   size_t maximum_capacity() { return maximum_capacity_; }
2496 
2497   // Returns the initial capacity of the semispace.
minimum_capacity()2498   size_t minimum_capacity() { return minimum_capacity_; }
2499 
id()2500   SemiSpaceId id() { return id_; }
2501 
2502   // Approximate amount of physical memory committed for this space.
2503   size_t CommittedPhysicalMemory() override;
2504 
2505   // If we don't have these here then SemiSpace will be abstract.  However
2506   // they should never be called:
2507 
Size()2508   size_t Size() override {
2509     UNREACHABLE();
2510   }
2511 
SizeOfObjects()2512   size_t SizeOfObjects() override { return Size(); }
2513 
Available()2514   size_t Available() override {
2515     UNREACHABLE();
2516   }
2517 
first_page()2518   Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
last_page()2519   Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
2520 
begin()2521   iterator begin() { return iterator(first_page()); }
end()2522   iterator end() { return iterator(nullptr); }
2523 
2524   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2525 
2526 #ifdef DEBUG
2527   void Print() override;
2528   // Validate a range of of addresses in a SemiSpace.
2529   // The "from" address must be on a page prior to the "to" address,
2530   // in the linked page order, or it must be earlier on the same page.
2531   static void AssertValidRange(Address from, Address to);
2532 #else
2533   // Do nothing.
AssertValidRange(Address from,Address to)2534   inline static void AssertValidRange(Address from, Address to) {}
2535 #endif
2536 
2537 #ifdef VERIFY_HEAP
2538   virtual void Verify();
2539 #endif
2540 
2541  private:
2542   void RewindPages(int num_pages);
2543 
max_pages()2544   inline int max_pages() {
2545     return static_cast<int>(current_capacity_ / Page::kPageSize);
2546   }
2547 
2548   // Copies the flags into the masked positions on all pages in the space.
2549   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2550 
2551   // The currently committed space capacity.
2552   size_t current_capacity_;
2553 
2554   // The maximum capacity that can be used by this space. A space cannot grow
2555   // beyond that size.
2556   size_t maximum_capacity_;
2557 
2558   // The minimum capacity for the space. A space cannot shrink below this size.
2559   size_t minimum_capacity_;
2560 
2561   // Used to govern object promotion during mark-compact collection.
2562   Address age_mark_;
2563 
2564   bool committed_;
2565   SemiSpaceId id_;
2566 
2567   Page* current_page_;
2568 
2569   int pages_used_;
2570 
2571   friend class NewSpace;
2572   friend class SemiSpaceIterator;
2573 };
2574 
2575 
2576 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2577 // semispace of the heap's new space.  It iterates over the objects in the
2578 // semispace from a given start address (defaulting to the bottom of the
2579 // semispace) to the top of the semispace.  New objects allocated after the
2580 // iterator is created are not iterated.
2581 class SemiSpaceIterator : public ObjectIterator {
2582  public:
2583   // Create an iterator over the allocated objects in the given to-space.
2584   explicit SemiSpaceIterator(NewSpace* space);
2585 
2586   inline HeapObject* Next() override;
2587 
2588  private:
2589   void Initialize(Address start, Address end);
2590 
2591   // The current iteration point.
2592   Address current_;
2593   // The end of iteration.
2594   Address limit_;
2595 };
2596 
2597 // -----------------------------------------------------------------------------
2598 // The young generation space.
2599 //
2600 // The new space consists of a contiguous pair of semispaces.  It simply
2601 // forwards most functions to the appropriate semispace.
2602 
2603 class NewSpace : public SpaceWithLinearArea {
2604  public:
2605   typedef PageIterator iterator;
2606 
2607   NewSpace(Heap* heap, size_t initial_semispace_capacity,
2608            size_t max_semispace_capacity);
2609 
~NewSpace()2610   ~NewSpace() override { TearDown(); }
2611 
2612   inline bool Contains(HeapObject* o);
2613   inline bool ContainsSlow(Address a);
2614   inline bool Contains(Object* o);
2615 
2616   // Tears down the space.  Heap memory was not allocated by the space, so it
2617   // is not deallocated here.
2618   void TearDown();
2619 
2620   // Flip the pair of spaces.
2621   void Flip();
2622 
2623   // Grow the capacity of the semispaces.  Assumes that they are not at
2624   // their maximum capacity.
2625   void Grow();
2626 
2627   // Shrink the capacity of the semispaces.
2628   void Shrink();
2629 
2630   // Return the allocated bytes in the active semispace.
Size()2631   size_t Size() override {
2632     DCHECK_GE(top(), to_space_.page_low());
2633     return to_space_.pages_used() * Page::kAllocatableMemory +
2634            static_cast<size_t>(top() - to_space_.page_low());
2635   }
2636 
SizeOfObjects()2637   size_t SizeOfObjects() override { return Size(); }
2638 
2639   // Return the allocatable capacity of a semispace.
Capacity()2640   size_t Capacity() {
2641     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2642     return (to_space_.current_capacity() / Page::kPageSize) *
2643            Page::kAllocatableMemory;
2644   }
2645 
2646   // Return the current size of a semispace, allocatable and non-allocatable
2647   // memory.
TotalCapacity()2648   size_t TotalCapacity() {
2649     DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2650     return to_space_.current_capacity();
2651   }
2652 
2653   // Committed memory for NewSpace is the committed memory of both semi-spaces
2654   // combined.
CommittedMemory()2655   size_t CommittedMemory() override {
2656     return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2657   }
2658 
MaximumCommittedMemory()2659   size_t MaximumCommittedMemory() override {
2660     return from_space_.MaximumCommittedMemory() +
2661            to_space_.MaximumCommittedMemory();
2662   }
2663 
2664   // Approximate amount of physical memory committed for this space.
2665   size_t CommittedPhysicalMemory() override;
2666 
2667   // Return the available bytes without growing.
Available()2668   size_t Available() override {
2669     DCHECK_GE(Capacity(), Size());
2670     return Capacity() - Size();
2671   }
2672 
ExternalBackingStoreBytes(ExternalBackingStoreType type)2673   size_t ExternalBackingStoreBytes(
2674       ExternalBackingStoreType type) const override {
2675     DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
2676     return to_space_.ExternalBackingStoreBytes(type);
2677   }
2678 
AllocatedSinceLastGC()2679   size_t AllocatedSinceLastGC() {
2680     const Address age_mark = to_space_.age_mark();
2681     DCHECK_NE(age_mark, kNullAddress);
2682     DCHECK_NE(top(), kNullAddress);
2683     Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
2684     Page* const last_page = Page::FromAllocationAreaAddress(top());
2685     Page* current_page = age_mark_page;
2686     size_t allocated = 0;
2687     if (current_page != last_page) {
2688       DCHECK_EQ(current_page, age_mark_page);
2689       DCHECK_GE(age_mark_page->area_end(), age_mark);
2690       allocated += age_mark_page->area_end() - age_mark;
2691       current_page = current_page->next_page();
2692     } else {
2693       DCHECK_GE(top(), age_mark);
2694       return top() - age_mark;
2695     }
2696     while (current_page != last_page) {
2697       DCHECK_NE(current_page, age_mark_page);
2698       allocated += Page::kAllocatableMemory;
2699       current_page = current_page->next_page();
2700     }
2701     DCHECK_GE(top(), current_page->area_start());
2702     allocated += top() - current_page->area_start();
2703     DCHECK_LE(allocated, Size());
2704     return allocated;
2705   }
2706 
MovePageFromSpaceToSpace(Page * page)2707   void MovePageFromSpaceToSpace(Page* page) {
2708     DCHECK(page->InFromSpace());
2709     from_space_.RemovePage(page);
2710     to_space_.PrependPage(page);
2711   }
2712 
2713   bool Rebalance();
2714 
2715   // Return the maximum capacity of a semispace.
MaximumCapacity()2716   size_t MaximumCapacity() {
2717     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2718     return to_space_.maximum_capacity();
2719   }
2720 
IsAtMaximumCapacity()2721   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2722 
2723   // Returns the initial capacity of a semispace.
InitialTotalCapacity()2724   size_t InitialTotalCapacity() {
2725     DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2726     return to_space_.minimum_capacity();
2727   }
2728 
ResetOriginalTop()2729   void ResetOriginalTop() {
2730     DCHECK_GE(top(), original_top());
2731     DCHECK_LE(top(), original_limit());
2732     original_top_ = top();
2733   }
2734 
original_top()2735   Address original_top() { return original_top_; }
original_limit()2736   Address original_limit() { return original_limit_; }
2737 
2738   // Return the address of the first allocatable address in the active
2739   // semispace. This may be the address where the first object resides.
first_allocatable_address()2740   Address first_allocatable_address() { return to_space_.space_start(); }
2741 
2742   // Get the age mark of the inactive semispace.
age_mark()2743   Address age_mark() { return from_space_.age_mark(); }
2744   // Set the age mark in the active semispace.
set_age_mark(Address mark)2745   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2746 
2747   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2748   AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment);
2749 
2750   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2751   AllocateRawUnaligned(int size_in_bytes);
2752 
2753   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
2754   AllocateRaw(int size_in_bytes, AllocationAlignment alignment);
2755 
2756   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
2757       int size_in_bytes, AllocationAlignment alignment);
2758 
2759   // Reset the allocation pointer to the beginning of the active semispace.
2760   void ResetLinearAllocationArea();
2761 
2762   // When inline allocation stepping is active, either because of incremental
2763   // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2764   // inline allocation every once in a while. This is done by setting
2765   // allocation_info_.limit to be lower than the actual limit and and increasing
2766   // it in steps to guarantee that the observers are notified periodically.
2767   void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
2768 
2769   inline bool ToSpaceContainsSlow(Address a);
2770   inline bool FromSpaceContainsSlow(Address a);
2771   inline bool ToSpaceContains(Object* o);
2772   inline bool FromSpaceContains(Object* o);
2773 
2774   // Try to switch the active semispace to a new, empty, page.
2775   // Returns false if this isn't possible or reasonable (i.e., there
2776   // are no pages, or the current page is already empty), or true
2777   // if successful.
2778   bool AddFreshPage();
2779   bool AddFreshPageSynchronized();
2780 
2781 #ifdef VERIFY_HEAP
2782   // Verify the active semispace.
2783   virtual void Verify(Isolate* isolate);
2784 #endif
2785 
2786 #ifdef DEBUG
2787   // Print the active semispace.
Print()2788   void Print() override { to_space_.Print(); }
2789 #endif
2790 
2791   // Return whether the operation succeeded.
CommitFromSpaceIfNeeded()2792   bool CommitFromSpaceIfNeeded() {
2793     if (from_space_.is_committed()) return true;
2794     return from_space_.Commit();
2795   }
2796 
UncommitFromSpace()2797   bool UncommitFromSpace() {
2798     if (!from_space_.is_committed()) return true;
2799     return from_space_.Uncommit();
2800   }
2801 
IsFromSpaceCommitted()2802   bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2803 
active_space()2804   SemiSpace* active_space() { return &to_space_; }
2805 
first_page()2806   Page* first_page() { return to_space_.first_page(); }
last_page()2807   Page* last_page() { return to_space_.last_page(); }
2808 
begin()2809   iterator begin() { return to_space_.begin(); }
end()2810   iterator end() { return to_space_.end(); }
2811 
2812   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2813 
from_space()2814   SemiSpace& from_space() { return from_space_; }
to_space()2815   SemiSpace& to_space() { return to_space_; }
2816 
2817  private:
2818   // Update linear allocation area to match the current to-space page.
2819   void UpdateLinearAllocationArea();
2820 
2821   base::Mutex mutex_;
2822 
2823   // The top and the limit at the time of setting the linear allocation area.
2824   // These values can be accessed by background tasks.
2825   std::atomic<Address> original_top_;
2826   std::atomic<Address> original_limit_;
2827 
2828   // The semispaces.
2829   SemiSpace to_space_;
2830   SemiSpace from_space_;
2831   VirtualMemory reservation_;
2832 
2833   bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
SupportsInlineAllocation()2834   bool SupportsInlineAllocation() override { return true; }
2835 
2836   friend class SemiSpaceIterator;
2837 };
2838 
2839 class PauseAllocationObserversScope {
2840  public:
2841   explicit PauseAllocationObserversScope(Heap* heap);
2842   ~PauseAllocationObserversScope();
2843 
2844  private:
2845   Heap* heap_;
2846   DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
2847 };
2848 
2849 // -----------------------------------------------------------------------------
2850 // Compaction space that is used temporarily during compaction.
2851 
2852 class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
2853  public:
CompactionSpace(Heap * heap,AllocationSpace id,Executability executable)2854   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2855       : PagedSpace(heap, id, executable) {}
2856 
is_local()2857   bool is_local() override { return true; }
2858 
2859  protected:
2860   // The space is temporary and not included in any snapshots.
snapshotable()2861   bool snapshotable() override { return false; }
2862 
2863   V8_WARN_UNUSED_RESULT bool SweepAndRetryAllocation(
2864       int size_in_bytes) override;
2865 
2866   V8_WARN_UNUSED_RESULT bool SlowRefillLinearAllocationArea(
2867       int size_in_bytes) override;
2868 };
2869 
2870 
2871 // A collection of |CompactionSpace|s used by a single compaction task.
2872 class CompactionSpaceCollection : public Malloced {
2873  public:
CompactionSpaceCollection(Heap * heap)2874   explicit CompactionSpaceCollection(Heap* heap)
2875       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2876         code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2877 
Get(AllocationSpace space)2878   CompactionSpace* Get(AllocationSpace space) {
2879     switch (space) {
2880       case OLD_SPACE:
2881         return &old_space_;
2882       case CODE_SPACE:
2883         return &code_space_;
2884       default:
2885         UNREACHABLE();
2886     }
2887     UNREACHABLE();
2888   }
2889 
2890  private:
2891   CompactionSpace old_space_;
2892   CompactionSpace code_space_;
2893 };
2894 
2895 // -----------------------------------------------------------------------------
2896 // Old generation regular object space.
2897 
2898 class OldSpace : public PagedSpace {
2899  public:
2900   // Creates an old space object. The constructor does not allocate pages
2901   // from OS.
OldSpace(Heap * heap)2902   explicit OldSpace(Heap* heap) : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE) {}
2903 };
2904 
2905 // -----------------------------------------------------------------------------
2906 // Old generation code object space.
2907 
2908 class CodeSpace : public PagedSpace {
2909  public:
2910   // Creates an old space object. The constructor does not allocate pages
2911   // from OS.
CodeSpace(Heap * heap)2912   explicit CodeSpace(Heap* heap) : PagedSpace(heap, CODE_SPACE, EXECUTABLE) {}
2913 };
2914 
2915 
2916 // For contiguous spaces, top should be in the space (or at the end) and limit
2917 // should be the end of the space.
2918 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2919   SLOW_DCHECK((space).page_low() <= (info).top() &&   \
2920               (info).top() <= (space).page_high() &&  \
2921               (info).limit() <= (space).page_high())
2922 
2923 
2924 // -----------------------------------------------------------------------------
2925 // Old space for all map objects
2926 
2927 class MapSpace : public PagedSpace {
2928  public:
2929   // Creates a map space object.
MapSpace(Heap * heap)2930   explicit MapSpace(Heap* heap) : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE) {}
2931 
RoundSizeDownToObjectAlignment(int size)2932   int RoundSizeDownToObjectAlignment(int size) override {
2933     if (base::bits::IsPowerOfTwo(Map::kSize)) {
2934       return RoundDown(size, Map::kSize);
2935     } else {
2936       return (size / Map::kSize) * Map::kSize;
2937     }
2938   }
2939 
2940 #ifdef VERIFY_HEAP
2941   void VerifyObject(HeapObject* obj) override;
2942 #endif
2943 };
2944 
2945 // -----------------------------------------------------------------------------
2946 // Read Only space for all Immortal Immovable and Immutable objects
2947 
2948 class ReadOnlySpace : public PagedSpace {
2949  public:
2950   class WritableScope {
2951    public:
WritableScope(ReadOnlySpace * space)2952     explicit WritableScope(ReadOnlySpace* space) : space_(space) {
2953       space_->MarkAsReadWrite();
2954     }
2955 
~WritableScope()2956     ~WritableScope() { space_->MarkAsReadOnly(); }
2957 
2958    private:
2959     ReadOnlySpace* space_;
2960   };
2961 
2962   explicit ReadOnlySpace(Heap* heap);
2963 
writable()2964   bool writable() const { return !is_marked_read_only_; }
2965 
2966   void ClearStringPaddingIfNeeded();
2967   void MarkAsReadOnly();
2968 
2969  private:
2970   void MarkAsReadWrite();
2971   void SetPermissionsForPages(PageAllocator::Permission access);
2972 
2973   bool is_marked_read_only_ = false;
2974   //
2975   // String padding must be cleared just before serialization and therefore the
2976   // string padding in the space will already have been cleared if the space was
2977   // deserialized.
2978   bool is_string_padding_cleared_;
2979 };
2980 
2981 // -----------------------------------------------------------------------------
2982 // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
2983 // managed by the large object space. A large object is allocated from OS
2984 // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2985 // A large object always starts at Page::kObjectStartOffset to a page.
2986 // Large objects do not move during garbage collections.
2987 
2988 class LargeObjectSpace : public Space {
2989  public:
2990   typedef LargePageIterator iterator;
2991 
2992   explicit LargeObjectSpace(Heap* heap);
2993   LargeObjectSpace(Heap* heap, AllocationSpace id);
2994 
~LargeObjectSpace()2995   ~LargeObjectSpace() override { TearDown(); }
2996 
2997   // Releases internal resources, frees objects in this space.
2998   void TearDown();
2999 
ObjectSizeFor(size_t chunk_size)3000   static size_t ObjectSizeFor(size_t chunk_size) {
3001     if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
3002     return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
3003   }
3004 
3005   V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
3006                                                      Executability executable);
3007 
3008   // Available bytes for objects in this space.
3009   size_t Available() override;
3010 
Size()3011   size_t Size() override { return size_; }
SizeOfObjects()3012   size_t SizeOfObjects() override { return objects_size_; }
3013 
3014   // Approximate amount of physical memory committed for this space.
3015   size_t CommittedPhysicalMemory() override;
3016 
PageCount()3017   int PageCount() { return page_count_; }
3018 
3019   // Finds an object for a given address, returns a Smi if it is not found.
3020   // The function iterates through all objects in this space, may be slow.
3021   Object* FindObject(Address a);
3022 
3023   // Takes the chunk_map_mutex_ and calls FindPage after that.
3024   LargePage* FindPageThreadSafe(Address a);
3025 
3026   // Finds a large object page containing the given address, returns nullptr
3027   // if such a page doesn't exist.
3028   LargePage* FindPage(Address a);
3029 
3030   // Clears the marking state of live objects.
3031   void ClearMarkingStateOfLiveObjects();
3032 
3033   // Frees unmarked objects.
3034   void FreeUnmarkedObjects();
3035 
3036   void InsertChunkMapEntries(LargePage* page);
3037   void RemoveChunkMapEntries(LargePage* page);
3038   void RemoveChunkMapEntries(LargePage* page, Address free_start);
3039 
3040   // Checks whether a heap object is in this space; O(1).
3041   bool Contains(HeapObject* obj);
3042   // Checks whether an address is in the object area in this space. Iterates
3043   // all objects in the space. May be slow.
ContainsSlow(Address addr)3044   bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
3045 
3046   // Checks whether the space is empty.
IsEmpty()3047   bool IsEmpty() { return first_page() == nullptr; }
3048 
first_page()3049   LargePage* first_page() {
3050     return reinterpret_cast<LargePage*>(Space::first_page());
3051   }
3052 
3053   // Collect code statistics.
3054   void CollectCodeStatistics();
3055 
begin()3056   iterator begin() { return iterator(first_page()); }
end()3057   iterator end() { return iterator(nullptr); }
3058 
3059   std::unique_ptr<ObjectIterator> GetObjectIterator() override;
3060 
chunk_map_mutex()3061   base::Mutex* chunk_map_mutex() { return &chunk_map_mutex_; }
3062 
3063 #ifdef VERIFY_HEAP
3064   virtual void Verify(Isolate* isolate);
3065 #endif
3066 
3067 #ifdef DEBUG
3068   void Print() override;
3069 #endif
3070 
3071  protected:
3072   LargePage* AllocateLargePage(int object_size, Executability executable);
3073 
3074  private:
3075   size_t size_;          // allocated bytes
3076   int page_count_;       // number of chunks
3077   size_t objects_size_;  // size of objects
3078 
3079   // The chunk_map_mutex_ has to be used when the chunk map is accessed
3080   // concurrently.
3081   base::Mutex chunk_map_mutex_;
3082 
3083   // Page-aligned addresses to their corresponding LargePage.
3084   std::unordered_map<Address, LargePage*> chunk_map_;
3085 
3086   friend class LargeObjectIterator;
3087 };
3088 
3089 class NewLargeObjectSpace : public LargeObjectSpace {
3090  public:
3091   explicit NewLargeObjectSpace(Heap* heap);
3092 
3093   V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size);
3094 
3095   // Available bytes for objects in this space.
3096   size_t Available() override;
3097 };
3098 
3099 class LargeObjectIterator : public ObjectIterator {
3100  public:
3101   explicit LargeObjectIterator(LargeObjectSpace* space);
3102 
3103   HeapObject* Next() override;
3104 
3105  private:
3106   LargePage* current_;
3107 };
3108 
3109 // Iterates over the chunks (pages and large object pages) that can contain
3110 // pointers to new space or to evacuation candidates.
3111 class MemoryChunkIterator BASE_EMBEDDED {
3112  public:
3113   inline explicit MemoryChunkIterator(Heap* heap);
3114 
3115   // Return nullptr when the iterator is done.
3116   inline MemoryChunk* next();
3117 
3118  private:
3119   enum State {
3120     kOldSpaceState,
3121     kMapState,
3122     kCodeState,
3123     kLargeObjectState,
3124     kFinishedState
3125   };
3126   Heap* heap_;
3127   State state_;
3128   PageIterator old_iterator_;
3129   PageIterator code_iterator_;
3130   PageIterator map_iterator_;
3131   LargePageIterator lo_iterator_;
3132 };
3133 
3134 }  // namespace internal
3135 }  // namespace v8
3136 
3137 #endif  // V8_HEAP_SPACES_H_
3138