1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7
8 #include <list>
9 #include <memory>
10 #include <unordered_set>
11
12 #include "src/allocation.h"
13 #include "src/base/atomic-utils.h"
14 #include "src/base/atomicops.h"
15 #include "src/base/bits.h"
16 #include "src/base/hashmap.h"
17 #include "src/base/platform/mutex.h"
18 #include "src/flags.h"
19 #include "src/globals.h"
20 #include "src/heap/heap.h"
21 #include "src/heap/marking.h"
22 #include "src/list.h"
23 #include "src/objects.h"
24 #include "src/utils.h"
25
26 namespace v8 {
27 namespace internal {
28
29 class AllocationInfo;
30 class AllocationObserver;
31 class CompactionSpace;
32 class CompactionSpaceCollection;
33 class FreeList;
34 class Isolate;
35 class LocalArrayBufferTracker;
36 class MemoryAllocator;
37 class MemoryChunk;
38 class Page;
39 class PagedSpace;
40 class SemiSpace;
41 class SkipList;
42 class SlotsBuffer;
43 class SlotSet;
44 class TypedSlotSet;
45 class Space;
46
47 // -----------------------------------------------------------------------------
48 // Heap structures:
49 //
50 // A JS heap consists of a young generation, an old generation, and a large
51 // object space. The young generation is divided into two semispaces. A
52 // scavenger implements Cheney's copying algorithm. The old generation is
53 // separated into a map space and an old object space. The map space contains
54 // all (and only) map objects, the rest of old objects go into the old space.
55 // The old generation is collected by a mark-sweep-compact collector.
56 //
57 // The semispaces of the young generation are contiguous. The old and map
58 // spaces consists of a list of pages. A page has a page header and an object
59 // area.
60 //
61 // There is a separate large object space for objects larger than
62 // kMaxRegularHeapObjectSize, so that they do not have to move during
63 // collection. The large object space is paged. Pages in large object space
64 // may be larger than the page size.
65 //
66 // A store-buffer based write barrier is used to keep track of intergenerational
67 // references. See heap/store-buffer.h.
68 //
69 // During scavenges and mark-sweep collections we sometimes (after a store
70 // buffer overflow) iterate intergenerational pointers without decoding heap
71 // object maps so if the page belongs to old space or large object space
72 // it is essential to guarantee that the page does not contain any
73 // garbage pointers to new space: every pointer aligned word which satisfies
74 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
75 // new space. Thus objects in old space and large object spaces should have a
76 // special layout (e.g. no bare integer fields). This requirement does not
77 // apply to map space which is iterated in a special fashion. However we still
78 // require pointer fields of dead maps to be cleaned.
79 //
80 // To enable lazy cleaning of old space pages we can mark chunks of the page
81 // as being garbage. Garbage sections are marked with a special map. These
82 // sections are skipped when scanning the page, even if we are otherwise
83 // scanning without regard for object boundaries. Garbage sections are chained
84 // together to form a free list after a GC. Garbage sections created outside
85 // of GCs by object trunctation etc. may not be in the free list chain. Very
86 // small free spaces are ignored, they need only be cleaned of bogus pointers
87 // into new space.
88 //
89 // Each page may have up to one special garbage section. The start of this
90 // section is denoted by the top field in the space. The end of the section
91 // is denoted by the limit field in the space. This special garbage section
92 // is not marked with a free space map in the data. The point of this section
93 // is to enable linear allocation without having to constantly update the byte
94 // array every time the top field is updated and a new object is created. The
95 // special garbage section is not in the chain of garbage sections.
96 //
97 // Since the top and limit fields are in the space, not the page, only one page
98 // has a special garbage section, and if the top and limit are equal then there
99 // is no special garbage section.
100
101 // Some assertion macros used in the debugging mode.
102
103 #define DCHECK_PAGE_ALIGNED(address) \
104 DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
105
106 #define DCHECK_OBJECT_ALIGNED(address) \
107 DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
108
109 #define DCHECK_OBJECT_SIZE(size) \
110 DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
111
112 #define DCHECK_CODEOBJECT_SIZE(size, code_space) \
113 DCHECK((0 < size) && (size <= code_space->AreaSize()))
114
115 #define DCHECK_PAGE_OFFSET(offset) \
116 DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
117
118 enum FreeListCategoryType {
119 kTiniest,
120 kTiny,
121 kSmall,
122 kMedium,
123 kLarge,
124 kHuge,
125
126 kFirstCategory = kTiniest,
127 kLastCategory = kHuge,
128 kNumberOfCategories = kLastCategory + 1,
129 kInvalidCategory
130 };
131
132 enum FreeMode { kLinkCategory, kDoNotLinkCategory };
133
134 // A free list category maintains a linked list of free memory blocks.
135 class FreeListCategory {
136 public:
137 static const int kSize = kIntSize + // FreeListCategoryType type_
138 kIntSize + // padding for type_
139 kSizetSize + // size_t available_
140 kPointerSize + // FreeSpace* top_
141 kPointerSize + // FreeListCategory* prev_
142 kPointerSize; // FreeListCategory* next_
143
FreeListCategory()144 FreeListCategory()
145 : type_(kInvalidCategory),
146 available_(0),
147 top_(nullptr),
148 prev_(nullptr),
149 next_(nullptr) {}
150
Initialize(FreeListCategoryType type)151 void Initialize(FreeListCategoryType type) {
152 type_ = type;
153 available_ = 0;
154 top_ = nullptr;
155 prev_ = nullptr;
156 next_ = nullptr;
157 }
158
159 void Invalidate();
160
161 void Reset();
162
ResetStats()163 void ResetStats() { Reset(); }
164
165 void RepairFreeList(Heap* heap);
166
167 // Relinks the category into the currently owning free list. Requires that the
168 // category is currently unlinked.
169 void Relink();
170
171 bool Free(FreeSpace* node, size_t size_in_bytes, FreeMode mode);
172
173 // Picks a node from the list and stores its size in |node_size|. Returns
174 // nullptr if the category is empty.
175 FreeSpace* PickNodeFromList(size_t* node_size);
176
177 // Performs a single try to pick a node of at least |minimum_size| from the
178 // category. Stores the actual size in |node_size|. Returns nullptr if no
179 // node is found.
180 FreeSpace* TryPickNodeFromList(size_t minimum_size, size_t* node_size);
181
182 // Picks a node of at least |minimum_size| from the category. Stores the
183 // actual size in |node_size|. Returns nullptr if no node is found.
184 FreeSpace* SearchForNodeInList(size_t minimum_size, size_t* node_size);
185
186 inline FreeList* owner();
187 inline bool is_linked();
is_empty()188 bool is_empty() { return top() == nullptr; }
available()189 size_t available() const { return available_; }
190
191 #ifdef DEBUG
192 size_t SumFreeList();
193 int FreeListLength();
194 #endif
195
196 private:
197 // For debug builds we accurately compute free lists lengths up until
198 // {kVeryLongFreeList} by manually walking the list.
199 static const int kVeryLongFreeList = 500;
200
201 inline Page* page();
202
top()203 FreeSpace* top() { return top_; }
set_top(FreeSpace * top)204 void set_top(FreeSpace* top) { top_ = top; }
prev()205 FreeListCategory* prev() { return prev_; }
set_prev(FreeListCategory * prev)206 void set_prev(FreeListCategory* prev) { prev_ = prev; }
next()207 FreeListCategory* next() { return next_; }
set_next(FreeListCategory * next)208 void set_next(FreeListCategory* next) { next_ = next; }
209
210 // |type_|: The type of this free list category.
211 FreeListCategoryType type_;
212
213 // |available_|: Total available bytes in all blocks of this free list
214 // category.
215 size_t available_;
216
217 // |top_|: Points to the top FreeSpace* in the free list category.
218 FreeSpace* top_;
219
220 FreeListCategory* prev_;
221 FreeListCategory* next_;
222
223 friend class FreeList;
224 friend class PagedSpace;
225 };
226
227 // MemoryChunk represents a memory region owned by a specific space.
228 // It is divided into the header and the body. Chunk start is always
229 // 1MB aligned. Start of the body is aligned so it can accommodate
230 // any heap object.
231 class MemoryChunk {
232 public:
233 enum Flag {
234 NO_FLAGS = 0u,
235 IS_EXECUTABLE = 1u << 0,
236 POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
237 POINTERS_FROM_HERE_ARE_INTERESTING = 1u << 2,
238 // A page in new space has one of the next to flags set.
239 IN_FROM_SPACE = 1u << 3,
240 IN_TO_SPACE = 1u << 4,
241 NEW_SPACE_BELOW_AGE_MARK = 1u << 5,
242 EVACUATION_CANDIDATE = 1u << 6,
243 NEVER_EVACUATE = 1u << 7,
244
245 // Large objects can have a progress bar in their page header. These object
246 // are scanned in increments and will be kept black while being scanned.
247 // Even if the mutator writes to them they will be kept black and a white
248 // to grey transition is performed in the value.
249 HAS_PROGRESS_BAR = 1u << 8,
250
251 // |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
252 // from new to old space during evacuation.
253 PAGE_NEW_OLD_PROMOTION = 1u << 9,
254
255 // |PAGE_NEW_NEW_PROMOTION|: A page tagged with this flag has been moved
256 // within the new space during evacuation.
257 PAGE_NEW_NEW_PROMOTION = 1u << 10,
258
259 // This flag is intended to be used for testing. Works only when both
260 // FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
261 // are set. It forces the page to become an evacuation candidate at next
262 // candidates selection cycle.
263 FORCE_EVACUATION_CANDIDATE_FOR_TESTING = 1u << 11,
264
265 // This flag is intended to be used for testing.
266 NEVER_ALLOCATE_ON_PAGE = 1u << 12,
267
268 // The memory chunk is already logically freed, however the actual freeing
269 // still has to be performed.
270 PRE_FREED = 1u << 13,
271
272 // |POOLED|: When actually freeing this chunk, only uncommit and do not
273 // give up the reservation as we still reuse the chunk at some point.
274 POOLED = 1u << 14,
275
276 // |COMPACTION_WAS_ABORTED|: Indicates that the compaction in this page
277 // has been aborted and needs special handling by the sweeper.
278 COMPACTION_WAS_ABORTED = 1u << 15,
279
280 // |COMPACTION_WAS_ABORTED_FOR_TESTING|: During stress testing evacuation
281 // on pages is sometimes aborted. The flag is used to avoid repeatedly
282 // triggering on the same page.
283 COMPACTION_WAS_ABORTED_FOR_TESTING = 1u << 16,
284
285 // |ANCHOR|: Flag is set if page is an anchor.
286 ANCHOR = 1u << 17,
287 };
288 typedef base::Flags<Flag, uintptr_t> Flags;
289
290 static const int kPointersToHereAreInterestingMask =
291 POINTERS_TO_HERE_ARE_INTERESTING;
292
293 static const int kPointersFromHereAreInterestingMask =
294 POINTERS_FROM_HERE_ARE_INTERESTING;
295
296 static const int kEvacuationCandidateMask = EVACUATION_CANDIDATE;
297
298 static const int kIsInNewSpaceMask = IN_FROM_SPACE | IN_TO_SPACE;
299
300 static const int kSkipEvacuationSlotsRecordingMask =
301 kEvacuationCandidateMask | kIsInNewSpaceMask;
302
303 // |kSweepingDone|: The page state when sweeping is complete or sweeping must
304 // not be performed on that page. Sweeper threads that are done with their
305 // work will set this value and not touch the page anymore.
306 // |kSweepingPending|: This page is ready for parallel sweeping.
307 // |kSweepingInProgress|: This page is currently swept by a sweeper thread.
308 enum ConcurrentSweepingState {
309 kSweepingDone,
310 kSweepingPending,
311 kSweepingInProgress,
312 };
313
314 static const intptr_t kAlignment =
315 (static_cast<uintptr_t>(1) << kPageSizeBits);
316
317 static const intptr_t kAlignmentMask = kAlignment - 1;
318
319 static const intptr_t kSizeOffset = 0;
320 static const intptr_t kFlagsOffset = kSizeOffset + kSizetSize;
321 static const intptr_t kAreaStartOffset = kFlagsOffset + kIntptrSize;
322 static const intptr_t kAreaEndOffset = kAreaStartOffset + kPointerSize;
323 static const intptr_t kReservationOffset = kAreaEndOffset + kPointerSize;
324 static const intptr_t kOwnerOffset = kReservationOffset + 2 * kPointerSize;
325
326 static const size_t kMinHeaderSize =
327 kSizeOffset + kSizetSize // size_t size
328 + kIntptrSize // Flags flags_
329 + kPointerSize // Address area_start_
330 + kPointerSize // Address area_end_
331 + 2 * kPointerSize // base::VirtualMemory reservation_
332 + kPointerSize // Address owner_
333 + kPointerSize // Heap* heap_
334 + kIntSize // int progress_bar_
335 + kIntSize // int live_bytes_count_
336 + kPointerSize // SlotSet* old_to_new_slots_
337 + kPointerSize // SlotSet* old_to_old_slots_
338 + kPointerSize // TypedSlotSet* typed_old_to_new_slots_
339 + kPointerSize // TypedSlotSet* typed_old_to_old_slots_
340 + kPointerSize // SkipList* skip_list_
341 + kPointerSize // AtomicValue high_water_mark_
342 + kPointerSize // base::Mutex* mutex_
343 + kPointerSize // base::AtomicWord concurrent_sweeping_
344 + 2 * kSizetSize // AtomicNumber free-list statistics
345 + kPointerSize // AtomicValue next_chunk_
346 + kPointerSize // AtomicValue prev_chunk_
347 // FreeListCategory categories_[kNumberOfCategories]
348 + FreeListCategory::kSize * kNumberOfCategories +
349 kPointerSize; // LocalArrayBufferTracker* local_tracker_
350
351 // We add some more space to the computed header size to amount for missing
352 // alignment requirements in our computation.
353 // Try to get kHeaderSize properly aligned on 32-bit and 64-bit machines.
354 static const size_t kHeaderSize = kMinHeaderSize;
355
356 static const int kBodyOffset =
357 CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
358
359 // The start offset of the object area in a page. Aligned to both maps and
360 // code alignment to be suitable for both. Also aligned to 32 words because
361 // the marking bitmap is arranged in 32 bit chunks.
362 static const int kObjectStartAlignment = 32 * kPointerSize;
363 static const int kObjectStartOffset =
364 kBodyOffset - 1 +
365 (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
366
367 // Page size in bytes. This must be a multiple of the OS page size.
368 static const int kPageSize = 1 << kPageSizeBits;
369 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
370
371 static const int kAllocatableMemory = kPageSize - kObjectStartOffset;
372
373 static inline void IncrementLiveBytes(HeapObject* object, int by);
374
375 // Only works if the pointer is in the first kPageSize of the MemoryChunk.
FromAddress(Address a)376 static MemoryChunk* FromAddress(Address a) {
377 return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
378 }
379
380 static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
381
UpdateHighWaterMark(Address mark)382 static inline void UpdateHighWaterMark(Address mark) {
383 if (mark == nullptr) return;
384 // Need to subtract one from the mark because when a chunk is full the
385 // top points to the next address after the chunk, which effectively belongs
386 // to another chunk. See the comment to Page::FromTopOrLimit.
387 MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
388 intptr_t new_mark = static_cast<intptr_t>(mark - chunk->address());
389 intptr_t old_mark = 0;
390 do {
391 old_mark = chunk->high_water_mark_.Value();
392 } while ((new_mark > old_mark) &&
393 !chunk->high_water_mark_.TrySetValue(old_mark, new_mark));
394 }
395
IsValid(MemoryChunk * chunk)396 static bool IsValid(MemoryChunk* chunk) { return chunk != nullptr; }
397
address()398 Address address() { return reinterpret_cast<Address>(this); }
399
mutex()400 base::Mutex* mutex() { return mutex_; }
401
Contains(Address addr)402 bool Contains(Address addr) {
403 return addr >= area_start() && addr < area_end();
404 }
405
406 // Checks whether |addr| can be a limit of addresses in this page. It's a
407 // limit if it's in the page, or if it's just after the last byte of the page.
ContainsLimit(Address addr)408 bool ContainsLimit(Address addr) {
409 return addr >= area_start() && addr <= area_end();
410 }
411
concurrent_sweeping_state()412 base::AtomicValue<ConcurrentSweepingState>& concurrent_sweeping_state() {
413 return concurrent_sweeping_;
414 }
415
SweepingDone()416 bool SweepingDone() {
417 return concurrent_sweeping_state().Value() == kSweepingDone;
418 }
419
420 // Manage live byte count, i.e., count of bytes in black objects.
421 inline void ResetLiveBytes();
422 inline void IncrementLiveBytes(int by);
423
LiveBytes()424 int LiveBytes() {
425 DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
426 return live_byte_count_;
427 }
428
SetLiveBytes(int live_bytes)429 void SetLiveBytes(int live_bytes) {
430 DCHECK_GE(live_bytes, 0);
431 DCHECK_LE(static_cast<size_t>(live_bytes), size_);
432 live_byte_count_ = live_bytes;
433 }
434
size()435 size_t size() const { return size_; }
set_size(size_t size)436 void set_size(size_t size) { size_ = size; }
437
heap()438 inline Heap* heap() const { return heap_; }
439
skip_list()440 inline SkipList* skip_list() { return skip_list_; }
441
set_skip_list(SkipList * skip_list)442 inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
443
old_to_new_slots()444 inline SlotSet* old_to_new_slots() { return old_to_new_slots_.Value(); }
old_to_old_slots()445 inline SlotSet* old_to_old_slots() { return old_to_old_slots_; }
typed_old_to_new_slots()446 inline TypedSlotSet* typed_old_to_new_slots() {
447 return typed_old_to_new_slots_.Value();
448 }
typed_old_to_old_slots()449 inline TypedSlotSet* typed_old_to_old_slots() {
450 return typed_old_to_old_slots_;
451 }
local_tracker()452 inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
453
454 V8_EXPORT_PRIVATE void AllocateOldToNewSlots();
455 void ReleaseOldToNewSlots();
456 V8_EXPORT_PRIVATE void AllocateOldToOldSlots();
457 void ReleaseOldToOldSlots();
458 void AllocateTypedOldToNewSlots();
459 void ReleaseTypedOldToNewSlots();
460 void AllocateTypedOldToOldSlots();
461 void ReleaseTypedOldToOldSlots();
462 void AllocateLocalTracker();
463 void ReleaseLocalTracker();
464
area_start()465 Address area_start() { return area_start_; }
area_end()466 Address area_end() { return area_end_; }
area_size()467 size_t area_size() { return static_cast<size_t>(area_end() - area_start()); }
468
469 bool CommitArea(size_t requested);
470
471 // Approximate amount of physical memory committed for this chunk.
472 size_t CommittedPhysicalMemory();
473
HighWaterMark()474 Address HighWaterMark() { return address() + high_water_mark_.Value(); }
475
progress_bar()476 int progress_bar() {
477 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
478 return progress_bar_;
479 }
480
set_progress_bar(int progress_bar)481 void set_progress_bar(int progress_bar) {
482 DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
483 progress_bar_ = progress_bar;
484 }
485
ResetProgressBar()486 void ResetProgressBar() {
487 if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
488 set_progress_bar(0);
489 }
490 }
491
markbits()492 inline Bitmap* markbits() {
493 return Bitmap::FromAddress(address() + kHeaderSize);
494 }
495
AddressToMarkbitIndex(Address addr)496 inline uint32_t AddressToMarkbitIndex(Address addr) {
497 return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
498 }
499
MarkbitIndexToAddress(uint32_t index)500 inline Address MarkbitIndexToAddress(uint32_t index) {
501 return this->address() + (index << kPointerSizeLog2);
502 }
503
504 void ClearLiveness();
505
PrintMarkbits()506 void PrintMarkbits() { markbits()->Print(); }
507
SetFlag(Flag flag)508 void SetFlag(Flag flag) { flags_ |= flag; }
ClearFlag(Flag flag)509 void ClearFlag(Flag flag) { flags_ &= ~Flags(flag); }
IsFlagSet(Flag flag)510 bool IsFlagSet(Flag flag) { return flags_ & flag; }
511
512 // Set or clear multiple flags at a time. The flags in the mask are set to
513 // the value in "flags", the rest retain the current value in |flags_|.
SetFlags(uintptr_t flags,uintptr_t mask)514 void SetFlags(uintptr_t flags, uintptr_t mask) {
515 flags_ = (flags_ & ~Flags(mask)) | (Flags(flags) & Flags(mask));
516 }
517
518 // Return all current flags.
GetFlags()519 uintptr_t GetFlags() { return flags_; }
520
NeverEvacuate()521 bool NeverEvacuate() { return IsFlagSet(NEVER_EVACUATE); }
522
MarkNeverEvacuate()523 void MarkNeverEvacuate() { SetFlag(NEVER_EVACUATE); }
524
IsEvacuationCandidate()525 bool IsEvacuationCandidate() {
526 DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
527 return IsFlagSet(EVACUATION_CANDIDATE);
528 }
529
CanAllocate()530 bool CanAllocate() {
531 return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
532 }
533
ShouldSkipEvacuationSlotRecording()534 bool ShouldSkipEvacuationSlotRecording() {
535 return ((flags_ & kSkipEvacuationSlotsRecordingMask) != 0) &&
536 !IsFlagSet(COMPACTION_WAS_ABORTED);
537 }
538
executable()539 Executability executable() {
540 return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
541 }
542
InNewSpace()543 bool InNewSpace() { return (flags_ & kIsInNewSpaceMask) != 0; }
544
InToSpace()545 bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
546
InFromSpace()547 bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
548
next_chunk()549 MemoryChunk* next_chunk() { return next_chunk_.Value(); }
550
prev_chunk()551 MemoryChunk* prev_chunk() { return prev_chunk_.Value(); }
552
set_next_chunk(MemoryChunk * next)553 void set_next_chunk(MemoryChunk* next) { next_chunk_.SetValue(next); }
554
set_prev_chunk(MemoryChunk * prev)555 void set_prev_chunk(MemoryChunk* prev) { prev_chunk_.SetValue(prev); }
556
owner()557 Space* owner() const {
558 intptr_t owner_value = base::NoBarrierAtomicValue<intptr_t>::FromAddress(
559 const_cast<Address*>(&owner_))
560 ->Value();
561 if ((owner_value & kPageHeaderTagMask) == kPageHeaderTag) {
562 return reinterpret_cast<Space*>(owner_value - kPageHeaderTag);
563 } else {
564 return nullptr;
565 }
566 }
567
set_owner(Space * space)568 void set_owner(Space* space) {
569 DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
570 owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
571 DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
572 kPageHeaderTag);
573 }
574
HasPageHeader()575 bool HasPageHeader() { return owner() != nullptr; }
576
577 void InsertAfter(MemoryChunk* other);
578 void Unlink();
579
580 protected:
581 static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
582 Address area_start, Address area_end,
583 Executability executable, Space* owner,
584 base::VirtualMemory* reservation);
585
586 // Should be called when memory chunk is about to be freed.
587 void ReleaseAllocatedMemory();
588
reserved_memory()589 base::VirtualMemory* reserved_memory() { return &reservation_; }
590
591 size_t size_;
592 Flags flags_;
593
594 // Start and end of allocatable memory on this chunk.
595 Address area_start_;
596 Address area_end_;
597
598 // If the chunk needs to remember its memory reservation, it is stored here.
599 base::VirtualMemory reservation_;
600
601 // The identity of the owning space. This is tagged as a failure pointer, but
602 // no failure can be in an object, so this can be distinguished from any entry
603 // in a fixed array.
604 Address owner_;
605
606 Heap* heap_;
607
608 // Used by the incremental marker to keep track of the scanning progress in
609 // large objects that have a progress bar and are scanned in increments.
610 int progress_bar_;
611
612 // Count of bytes marked black on page.
613 int live_byte_count_;
614
615 // A single slot set for small pages (of size kPageSize) or an array of slot
616 // set for large pages. In the latter case the number of entries in the array
617 // is ceil(size() / kPageSize).
618 base::AtomicValue<SlotSet*> old_to_new_slots_;
619 SlotSet* old_to_old_slots_;
620 base::AtomicValue<TypedSlotSet*> typed_old_to_new_slots_;
621 TypedSlotSet* typed_old_to_old_slots_;
622
623 SkipList* skip_list_;
624
625 // Assuming the initial allocation on a page is sequential,
626 // count highest number of bytes ever allocated on the page.
627 base::AtomicValue<intptr_t> high_water_mark_;
628
629 base::Mutex* mutex_;
630
631 base::AtomicValue<ConcurrentSweepingState> concurrent_sweeping_;
632
633 // PagedSpace free-list statistics.
634 base::AtomicNumber<intptr_t> available_in_free_list_;
635 base::AtomicNumber<intptr_t> wasted_memory_;
636
637 // next_chunk_ holds a pointer of type MemoryChunk
638 base::AtomicValue<MemoryChunk*> next_chunk_;
639 // prev_chunk_ holds a pointer of type MemoryChunk
640 base::AtomicValue<MemoryChunk*> prev_chunk_;
641
642 FreeListCategory categories_[kNumberOfCategories];
643
644 LocalArrayBufferTracker* local_tracker_;
645
646 private:
InitializeReservedMemory()647 void InitializeReservedMemory() { reservation_.Reset(); }
648
649 friend class MemoryAllocator;
650 friend class MemoryChunkValidator;
651 };
652
653 DEFINE_OPERATORS_FOR_FLAGS(MemoryChunk::Flags)
654
655 static_assert(kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory,
656 "kMaxRegularHeapObjectSize <= MemoryChunk::kAllocatableMemory");
657
658 // -----------------------------------------------------------------------------
659 // A page is a memory chunk of a size 1MB. Large object pages may be larger.
660 //
661 // The only way to get a page pointer is by calling factory methods:
662 // Page* p = Page::FromAddress(addr); or
663 // Page* p = Page::FromTopOrLimit(top);
664 class Page : public MemoryChunk {
665 public:
666 static const intptr_t kCopyAllFlags = ~0;
667
668 // Page flags copied from from-space to to-space when flipping semispaces.
669 static const intptr_t kCopyOnFlipFlagsMask =
670 static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
671 static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
672
673 static inline Page* ConvertNewToOld(Page* old_page);
674
675 // Returns the page containing a given address. The address ranges
676 // from [page_addr .. page_addr + kPageSize[. This only works if the object
677 // is in fact in a page.
FromAddress(Address addr)678 static Page* FromAddress(Address addr) {
679 return reinterpret_cast<Page*>(OffsetFrom(addr) & ~kPageAlignmentMask);
680 }
681
682 // Returns the page containing the address provided. The address can
683 // potentially point righter after the page. To be also safe for tagged values
684 // we subtract a hole word. The valid address ranges from
685 // [page_addr + kObjectStartOffset .. page_addr + kPageSize + kPointerSize].
FromAllocationAreaAddress(Address address)686 static Page* FromAllocationAreaAddress(Address address) {
687 return Page::FromAddress(address - kPointerSize);
688 }
689
690 // Checks if address1 and address2 are on the same new space page.
OnSamePage(Address address1,Address address2)691 static bool OnSamePage(Address address1, Address address2) {
692 return Page::FromAddress(address1) == Page::FromAddress(address2);
693 }
694
695 // Checks whether an address is page aligned.
IsAlignedToPageSize(Address addr)696 static bool IsAlignedToPageSize(Address addr) {
697 return (OffsetFrom(addr) & kPageAlignmentMask) == 0;
698 }
699
IsAtObjectStart(Address addr)700 static bool IsAtObjectStart(Address addr) {
701 return (reinterpret_cast<intptr_t>(addr) & kPageAlignmentMask) ==
702 kObjectStartOffset;
703 }
704
705 inline static Page* FromAnyPointerAddress(Heap* heap, Address addr);
706
707 // Create a Page object that is only used as anchor for the doubly-linked
708 // list of real pages.
Page(Space * owner)709 explicit Page(Space* owner) { InitializeAsAnchor(owner); }
710
711 inline void MarkNeverAllocateForTesting();
712 inline void MarkEvacuationCandidate();
713 inline void ClearEvacuationCandidate();
714
next_page()715 Page* next_page() { return static_cast<Page*>(next_chunk()); }
prev_page()716 Page* prev_page() { return static_cast<Page*>(prev_chunk()); }
set_next_page(Page * page)717 void set_next_page(Page* page) { set_next_chunk(page); }
set_prev_page(Page * page)718 void set_prev_page(Page* page) { set_prev_chunk(page); }
719
720 template <typename Callback>
ForAllFreeListCategories(Callback callback)721 inline void ForAllFreeListCategories(Callback callback) {
722 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
723 callback(&categories_[i]);
724 }
725 }
726
727 // Returns the offset of a given address to this page.
Offset(Address a)728 inline size_t Offset(Address a) { return static_cast<size_t>(a - address()); }
729
730 // Returns the address for a given offset to the this page.
OffsetToAddress(size_t offset)731 Address OffsetToAddress(size_t offset) {
732 DCHECK_PAGE_OFFSET(offset);
733 return address() + offset;
734 }
735
736 // WaitUntilSweepingCompleted only works when concurrent sweeping is in
737 // progress. In particular, when we know that right before this call a
738 // sweeper thread was sweeping this page.
WaitUntilSweepingCompleted()739 void WaitUntilSweepingCompleted() {
740 mutex_->Lock();
741 mutex_->Unlock();
742 DCHECK(SweepingDone());
743 }
744
745 void ResetFreeListStatistics();
746
747 size_t AvailableInFreeList();
748
LiveBytesFromFreeList()749 size_t LiveBytesFromFreeList() {
750 DCHECK_GE(area_size(), wasted_memory() + available_in_free_list());
751 return area_size() - wasted_memory() - available_in_free_list();
752 }
753
free_list_category(FreeListCategoryType type)754 FreeListCategory* free_list_category(FreeListCategoryType type) {
755 return &categories_[type];
756 }
757
is_anchor()758 bool is_anchor() { return IsFlagSet(Page::ANCHOR); }
759
wasted_memory()760 size_t wasted_memory() { return wasted_memory_.Value(); }
add_wasted_memory(size_t waste)761 void add_wasted_memory(size_t waste) { wasted_memory_.Increment(waste); }
available_in_free_list()762 size_t available_in_free_list() { return available_in_free_list_.Value(); }
add_available_in_free_list(size_t available)763 void add_available_in_free_list(size_t available) {
764 DCHECK_LE(available, area_size());
765 available_in_free_list_.Increment(available);
766 }
remove_available_in_free_list(size_t available)767 void remove_available_in_free_list(size_t available) {
768 DCHECK_LE(available, area_size());
769 DCHECK_GE(available_in_free_list(), available);
770 available_in_free_list_.Decrement(available);
771 }
772
773 size_t ShrinkToHighWaterMark();
774
775 void CreateBlackArea(Address start, Address end);
776
777 #ifdef DEBUG
778 void Print();
779 #endif // DEBUG
780
781 private:
782 enum InitializationMode { kFreeMemory, kDoNotFreeMemory };
783
784 template <InitializationMode mode = kFreeMemory>
785 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
786 Executability executable, PagedSpace* owner);
787 static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
788 Executability executable, SemiSpace* owner);
789
790 inline void InitializeFreeListCategories();
791
792 void InitializeAsAnchor(Space* owner);
793
794 friend class MemoryAllocator;
795 };
796
797 class LargePage : public MemoryChunk {
798 public:
GetObject()799 HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
800
next_page()801 inline LargePage* next_page() {
802 return static_cast<LargePage*>(next_chunk());
803 }
804
set_next_page(LargePage * page)805 inline void set_next_page(LargePage* page) { set_next_chunk(page); }
806
807 // Uncommit memory that is not in use anymore by the object. If the object
808 // cannot be shrunk 0 is returned.
809 Address GetAddressToShrink();
810
811 void ClearOutOfLiveRangeSlots(Address free_start);
812
813 // A limit to guarantee that we do not overflow typed slot offset in
814 // the old to old remembered set.
815 // Note that this limit is higher than what assembler already imposes on
816 // x64 and ia32 architectures.
817 static const int kMaxCodePageSize = 512 * MB;
818
819 private:
820 static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk,
821 Executability executable, Space* owner);
822
823 friend class MemoryAllocator;
824 };
825
826
827 // ----------------------------------------------------------------------------
828 // Space is the abstract superclass for all allocation spaces.
829 class Space : public Malloced {
830 public:
Space(Heap * heap,AllocationSpace id,Executability executable)831 Space(Heap* heap, AllocationSpace id, Executability executable)
832 : allocation_observers_(new List<AllocationObserver*>()),
833 allocation_observers_paused_(false),
834 heap_(heap),
835 id_(id),
836 executable_(executable),
837 committed_(0),
838 max_committed_(0) {}
839
~Space()840 virtual ~Space() {}
841
heap()842 Heap* heap() const { return heap_; }
843
844 // Does the space need executable memory?
executable()845 Executability executable() { return executable_; }
846
847 // Identity used in error reporting.
identity()848 AllocationSpace identity() { return id_; }
849
AddAllocationObserver(AllocationObserver * observer)850 virtual void AddAllocationObserver(AllocationObserver* observer) {
851 allocation_observers_->Add(observer);
852 }
853
RemoveAllocationObserver(AllocationObserver * observer)854 virtual void RemoveAllocationObserver(AllocationObserver* observer) {
855 bool removed = allocation_observers_->RemoveElement(observer);
856 USE(removed);
857 DCHECK(removed);
858 }
859
PauseAllocationObservers()860 virtual void PauseAllocationObservers() {
861 allocation_observers_paused_ = true;
862 }
863
ResumeAllocationObservers()864 virtual void ResumeAllocationObservers() {
865 allocation_observers_paused_ = false;
866 }
867
868 void AllocationStep(Address soon_object, int size);
869
870 // Return the total amount committed memory for this space, i.e., allocatable
871 // memory and page headers.
CommittedMemory()872 virtual size_t CommittedMemory() { return committed_; }
873
MaximumCommittedMemory()874 virtual size_t MaximumCommittedMemory() { return max_committed_; }
875
876 // Returns allocated size.
877 virtual size_t Size() = 0;
878
879 // Returns size of objects. Can differ from the allocated size
880 // (e.g. see LargeObjectSpace).
SizeOfObjects()881 virtual size_t SizeOfObjects() { return Size(); }
882
883 // Approximate amount of physical memory committed for this space.
884 virtual size_t CommittedPhysicalMemory() = 0;
885
886 // Return the available bytes without growing.
887 virtual size_t Available() = 0;
888
RoundSizeDownToObjectAlignment(int size)889 virtual int RoundSizeDownToObjectAlignment(int size) {
890 if (id_ == CODE_SPACE) {
891 return RoundDown(size, kCodeAlignment);
892 } else {
893 return RoundDown(size, kPointerSize);
894 }
895 }
896
897 virtual std::unique_ptr<ObjectIterator> GetObjectIterator() = 0;
898
AccountCommitted(size_t bytes)899 void AccountCommitted(size_t bytes) {
900 DCHECK_GE(committed_ + bytes, committed_);
901 committed_ += bytes;
902 if (committed_ > max_committed_) {
903 max_committed_ = committed_;
904 }
905 }
906
AccountUncommitted(size_t bytes)907 void AccountUncommitted(size_t bytes) {
908 DCHECK_GE(committed_, committed_ - bytes);
909 committed_ -= bytes;
910 }
911
912 #ifdef DEBUG
913 virtual void Print() = 0;
914 #endif
915
916 protected:
917 std::unique_ptr<List<AllocationObserver*>> allocation_observers_;
918 bool allocation_observers_paused_;
919
920 private:
921 Heap* heap_;
922 AllocationSpace id_;
923 Executability executable_;
924
925 // Keeps track of committed memory in a space.
926 size_t committed_;
927 size_t max_committed_;
928
929 DISALLOW_COPY_AND_ASSIGN(Space);
930 };
931
932
933 class MemoryChunkValidator {
934 // Computed offsets should match the compiler generated ones.
935 STATIC_ASSERT(MemoryChunk::kSizeOffset == offsetof(MemoryChunk, size_));
936
937 // Validate our estimates on the header size.
938 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
939 STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
940 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
941 };
942
943
944 // ----------------------------------------------------------------------------
945 // All heap objects containing executable code (code objects) must be allocated
946 // from a 2 GB range of memory, so that they can call each other using 32-bit
947 // displacements. This happens automatically on 32-bit platforms, where 32-bit
948 // displacements cover the entire 4GB virtual address space. On 64-bit
949 // platforms, we support this using the CodeRange object, which reserves and
950 // manages a range of virtual memory.
951 class CodeRange {
952 public:
953 explicit CodeRange(Isolate* isolate);
~CodeRange()954 ~CodeRange() { TearDown(); }
955
956 // Reserves a range of virtual memory, but does not commit any of it.
957 // Can only be called once, at heap initialization time.
958 // Returns false on failure.
959 bool SetUp(size_t requested_size);
960
valid()961 bool valid() { return code_range_ != NULL; }
start()962 Address start() {
963 DCHECK(valid());
964 return static_cast<Address>(code_range_->address());
965 }
size()966 size_t size() {
967 DCHECK(valid());
968 return code_range_->size();
969 }
contains(Address address)970 bool contains(Address address) {
971 if (!valid()) return false;
972 Address start = static_cast<Address>(code_range_->address());
973 return start <= address && address < start + code_range_->size();
974 }
975
976 // Allocates a chunk of memory from the large-object portion of
977 // the code range. On platforms with no separate code range, should
978 // not be called.
979 MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
980 const size_t commit_size,
981 size_t* allocated);
982 bool CommitRawMemory(Address start, size_t length);
983 bool UncommitRawMemory(Address start, size_t length);
984 void FreeRawMemory(Address buf, size_t length);
985
986 private:
987 class FreeBlock {
988 public:
FreeBlock()989 FreeBlock() : start(0), size(0) {}
FreeBlock(Address start_arg,size_t size_arg)990 FreeBlock(Address start_arg, size_t size_arg)
991 : start(start_arg), size(size_arg) {
992 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
993 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
994 }
FreeBlock(void * start_arg,size_t size_arg)995 FreeBlock(void* start_arg, size_t size_arg)
996 : start(static_cast<Address>(start_arg)), size(size_arg) {
997 DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
998 DCHECK(size >= static_cast<size_t>(Page::kPageSize));
999 }
1000
1001 Address start;
1002 size_t size;
1003 };
1004
1005 // Frees the range of virtual memory, and frees the data structures used to
1006 // manage it.
1007 void TearDown();
1008
1009 // Finds a block on the allocation list that contains at least the
1010 // requested amount of memory. If none is found, sorts and merges
1011 // the existing free memory blocks, and searches again.
1012 // If none can be found, returns false.
1013 bool GetNextAllocationBlock(size_t requested);
1014 // Compares the start addresses of two free blocks.
1015 static int CompareFreeBlockAddress(const FreeBlock* left,
1016 const FreeBlock* right);
1017 bool ReserveBlock(const size_t requested_size, FreeBlock* block);
1018 void ReleaseBlock(const FreeBlock* block);
1019
1020 Isolate* isolate_;
1021
1022 // The reserved range of virtual memory that all code objects are put in.
1023 base::VirtualMemory* code_range_;
1024
1025 // The global mutex guards free_list_ and allocation_list_ as GC threads may
1026 // access both lists concurrently to the main thread.
1027 base::Mutex code_range_mutex_;
1028
1029 // Freed blocks of memory are added to the free list. When the allocation
1030 // list is exhausted, the free list is sorted and merged to make the new
1031 // allocation list.
1032 List<FreeBlock> free_list_;
1033
1034 // Memory is allocated from the free blocks on the allocation list.
1035 // The block at current_allocation_block_index_ is the current block.
1036 List<FreeBlock> allocation_list_;
1037 int current_allocation_block_index_;
1038
1039 DISALLOW_COPY_AND_ASSIGN(CodeRange);
1040 };
1041
1042
1043 class SkipList {
1044 public:
SkipList()1045 SkipList() { Clear(); }
1046
Clear()1047 void Clear() {
1048 for (int idx = 0; idx < kSize; idx++) {
1049 starts_[idx] = reinterpret_cast<Address>(-1);
1050 }
1051 }
1052
StartFor(Address addr)1053 Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
1054
AddObject(Address addr,int size)1055 void AddObject(Address addr, int size) {
1056 int start_region = RegionNumber(addr);
1057 int end_region = RegionNumber(addr + size - kPointerSize);
1058 for (int idx = start_region; idx <= end_region; idx++) {
1059 if (starts_[idx] > addr) {
1060 starts_[idx] = addr;
1061 } else {
1062 // In the first region, there may already be an object closer to the
1063 // start of the region. Do not change the start in that case. If this
1064 // is not the first region, you probably added overlapping objects.
1065 DCHECK_EQ(start_region, idx);
1066 }
1067 }
1068 }
1069
RegionNumber(Address addr)1070 static inline int RegionNumber(Address addr) {
1071 return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
1072 }
1073
Update(Address addr,int size)1074 static void Update(Address addr, int size) {
1075 Page* page = Page::FromAddress(addr);
1076 SkipList* list = page->skip_list();
1077 if (list == NULL) {
1078 list = new SkipList();
1079 page->set_skip_list(list);
1080 }
1081
1082 list->AddObject(addr, size);
1083 }
1084
1085 private:
1086 static const int kRegionSizeLog2 = 13;
1087 static const int kRegionSize = 1 << kRegionSizeLog2;
1088 static const int kSize = Page::kPageSize / kRegionSize;
1089
1090 STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
1091
1092 Address starts_[kSize];
1093 };
1094
1095
1096 // ----------------------------------------------------------------------------
1097 // A space acquires chunks of memory from the operating system. The memory
1098 // allocator allocates and deallocates pages for the paged heap spaces and large
1099 // pages for large object space.
1100 class V8_EXPORT_PRIVATE MemoryAllocator {
1101 public:
1102 // Unmapper takes care of concurrently unmapping and uncommitting memory
1103 // chunks.
1104 class Unmapper {
1105 public:
1106 class UnmapFreeMemoryTask;
1107
Unmapper(MemoryAllocator * allocator)1108 explicit Unmapper(MemoryAllocator* allocator)
1109 : allocator_(allocator),
1110 pending_unmapping_tasks_semaphore_(0),
1111 concurrent_unmapping_tasks_active_(0) {
1112 chunks_[kRegular].reserve(kReservedQueueingSlots);
1113 chunks_[kPooled].reserve(kReservedQueueingSlots);
1114 }
1115
AddMemoryChunkSafe(MemoryChunk * chunk)1116 void AddMemoryChunkSafe(MemoryChunk* chunk) {
1117 if ((chunk->size() == Page::kPageSize) &&
1118 (chunk->executable() != EXECUTABLE)) {
1119 AddMemoryChunkSafe<kRegular>(chunk);
1120 } else {
1121 AddMemoryChunkSafe<kNonRegular>(chunk);
1122 }
1123 }
1124
TryGetPooledMemoryChunkSafe()1125 MemoryChunk* TryGetPooledMemoryChunkSafe() {
1126 // Procedure:
1127 // (1) Try to get a chunk that was declared as pooled and already has
1128 // been uncommitted.
1129 // (2) Try to steal any memory chunk of kPageSize that would've been
1130 // unmapped.
1131 MemoryChunk* chunk = GetMemoryChunkSafe<kPooled>();
1132 if (chunk == nullptr) {
1133 chunk = GetMemoryChunkSafe<kRegular>();
1134 if (chunk != nullptr) {
1135 // For stolen chunks we need to manually free any allocated memory.
1136 chunk->ReleaseAllocatedMemory();
1137 }
1138 }
1139 return chunk;
1140 }
1141
1142 void FreeQueuedChunks();
1143 bool WaitUntilCompleted();
1144 void TearDown();
1145
1146 private:
1147 static const int kReservedQueueingSlots = 64;
1148
1149 enum ChunkQueueType {
1150 kRegular, // Pages of kPageSize that do not live in a CodeRange and
1151 // can thus be used for stealing.
1152 kNonRegular, // Large chunks and executable chunks.
1153 kPooled, // Pooled chunks, already uncommited and ready for reuse.
1154 kNumberOfChunkQueues,
1155 };
1156
1157 enum class FreeMode {
1158 kUncommitPooled,
1159 kReleasePooled,
1160 };
1161
1162 template <ChunkQueueType type>
AddMemoryChunkSafe(MemoryChunk * chunk)1163 void AddMemoryChunkSafe(MemoryChunk* chunk) {
1164 base::LockGuard<base::Mutex> guard(&mutex_);
1165 if (type != kRegular || allocator_->CanFreeMemoryChunk(chunk)) {
1166 chunks_[type].push_back(chunk);
1167 } else {
1168 DCHECK_EQ(type, kRegular);
1169 delayed_regular_chunks_.push_back(chunk);
1170 }
1171 }
1172
1173 template <ChunkQueueType type>
GetMemoryChunkSafe()1174 MemoryChunk* GetMemoryChunkSafe() {
1175 base::LockGuard<base::Mutex> guard(&mutex_);
1176 if (chunks_[type].empty()) return nullptr;
1177 MemoryChunk* chunk = chunks_[type].back();
1178 chunks_[type].pop_back();
1179 return chunk;
1180 }
1181
1182 void ReconsiderDelayedChunks();
1183 template <FreeMode mode>
1184 void PerformFreeMemoryOnQueuedChunks();
1185
1186 base::Mutex mutex_;
1187 MemoryAllocator* allocator_;
1188 std::vector<MemoryChunk*> chunks_[kNumberOfChunkQueues];
1189 // Delayed chunks cannot be processed in the current unmapping cycle because
1190 // of dependencies such as an active sweeper.
1191 // See MemoryAllocator::CanFreeMemoryChunk.
1192 std::list<MemoryChunk*> delayed_regular_chunks_;
1193 base::Semaphore pending_unmapping_tasks_semaphore_;
1194 intptr_t concurrent_unmapping_tasks_active_;
1195
1196 friend class MemoryAllocator;
1197 };
1198
1199 enum AllocationMode {
1200 kRegular,
1201 kPooled,
1202 };
1203
1204 enum FreeMode {
1205 kFull,
1206 kAlreadyPooled,
1207 kPreFreeAndQueue,
1208 kPooledAndQueue,
1209 };
1210
1211 static size_t CodePageGuardStartOffset();
1212
1213 static size_t CodePageGuardSize();
1214
1215 static size_t CodePageAreaStartOffset();
1216
1217 static size_t CodePageAreaEndOffset();
1218
CodePageAreaSize()1219 static size_t CodePageAreaSize() {
1220 return CodePageAreaEndOffset() - CodePageAreaStartOffset();
1221 }
1222
PageAreaSize(AllocationSpace space)1223 static size_t PageAreaSize(AllocationSpace space) {
1224 DCHECK_NE(LO_SPACE, space);
1225 return (space == CODE_SPACE) ? CodePageAreaSize()
1226 : Page::kAllocatableMemory;
1227 }
1228
1229 static intptr_t GetCommitPageSize();
1230
1231 explicit MemoryAllocator(Isolate* isolate);
1232
1233 // Initializes its internal bookkeeping structures.
1234 // Max capacity of the total space and executable memory limit.
1235 bool SetUp(size_t max_capacity, size_t capacity_executable,
1236 size_t code_range_size);
1237
1238 void TearDown();
1239
1240 // Allocates a Page from the allocator. AllocationMode is used to indicate
1241 // whether pooled allocation, which only works for MemoryChunk::kPageSize,
1242 // should be tried first.
1243 template <MemoryAllocator::AllocationMode alloc_mode = kRegular,
1244 typename SpaceType>
1245 Page* AllocatePage(size_t size, SpaceType* owner, Executability executable);
1246
1247 LargePage* AllocateLargePage(size_t size, LargeObjectSpace* owner,
1248 Executability executable);
1249
1250 template <MemoryAllocator::FreeMode mode = kFull>
1251 void Free(MemoryChunk* chunk);
1252
1253 bool CanFreeMemoryChunk(MemoryChunk* chunk);
1254
1255 // Returns allocated spaces in bytes.
Size()1256 size_t Size() { return size_.Value(); }
1257
1258 // Returns allocated executable spaces in bytes.
SizeExecutable()1259 size_t SizeExecutable() { return size_executable_.Value(); }
1260
1261 // Returns the maximum available bytes of heaps.
Available()1262 size_t Available() {
1263 const size_t size = Size();
1264 return capacity_ < size ? 0 : capacity_ - size;
1265 }
1266
1267 // Returns the maximum available executable bytes of heaps.
AvailableExecutable()1268 size_t AvailableExecutable() {
1269 const size_t executable_size = SizeExecutable();
1270 if (capacity_executable_ < executable_size) return 0;
1271 return capacity_executable_ - executable_size;
1272 }
1273
1274 // Returns maximum available bytes that the old space can have.
MaxAvailable()1275 size_t MaxAvailable() {
1276 return (Available() / Page::kPageSize) * Page::kAllocatableMemory;
1277 }
1278
1279 // Returns an indication of whether a pointer is in a space that has
1280 // been allocated by this MemoryAllocator.
IsOutsideAllocatedSpace(const void * address)1281 V8_INLINE bool IsOutsideAllocatedSpace(const void* address) {
1282 return address < lowest_ever_allocated_.Value() ||
1283 address >= highest_ever_allocated_.Value();
1284 }
1285
1286 // Returns a MemoryChunk in which the memory region from commit_area_size to
1287 // reserve_area_size of the chunk area is reserved but not committed, it
1288 // could be committed later by calling MemoryChunk::CommitArea.
1289 MemoryChunk* AllocateChunk(size_t reserve_area_size, size_t commit_area_size,
1290 Executability executable, Space* space);
1291
1292 void ShrinkChunk(MemoryChunk* chunk, size_t bytes_to_shrink);
1293
1294 Address ReserveAlignedMemory(size_t requested, size_t alignment,
1295 base::VirtualMemory* controller);
1296 Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
1297 size_t alignment, Executability executable,
1298 base::VirtualMemory* controller);
1299
1300 bool CommitMemory(Address addr, size_t size, Executability executable);
1301
1302 void FreeMemory(base::VirtualMemory* reservation, Executability executable);
1303 void PartialFreeMemory(MemoryChunk* chunk, Address start_free);
1304 void FreeMemory(Address addr, size_t size, Executability executable);
1305
1306 // Commit a contiguous block of memory from the initial chunk. Assumes that
1307 // the address is not NULL, the size is greater than zero, and that the
1308 // block is contained in the initial chunk. Returns true if it succeeded
1309 // and false otherwise.
1310 bool CommitBlock(Address start, size_t size, Executability executable);
1311
1312 // Uncommit a contiguous block of memory [start..(start+size)[.
1313 // start is not NULL, the size is greater than zero, and the
1314 // block is contained in the initial chunk. Returns true if it succeeded
1315 // and false otherwise.
1316 bool UncommitBlock(Address start, size_t size);
1317
1318 // Zaps a contiguous block of memory [start..(start+size)[ thus
1319 // filling it up with a recognizable non-NULL bit pattern.
1320 void ZapBlock(Address start, size_t size);
1321
1322 MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
1323 Address start, size_t commit_size,
1324 size_t reserved_size);
1325
code_range()1326 CodeRange* code_range() { return code_range_; }
unmapper()1327 Unmapper* unmapper() { return &unmapper_; }
1328
1329 #ifdef DEBUG
1330 // Reports statistic info of the space.
1331 void ReportStatistics();
1332 #endif
1333
1334 private:
1335 // PreFree logically frees the object, i.e., it takes care of the size
1336 // bookkeeping and calls the allocation callback.
1337 void PreFreeMemory(MemoryChunk* chunk);
1338
1339 // FreeMemory can be called concurrently when PreFree was executed before.
1340 void PerformFreeMemory(MemoryChunk* chunk);
1341
1342 // See AllocatePage for public interface. Note that currently we only support
1343 // pools for NOT_EXECUTABLE pages of size MemoryChunk::kPageSize.
1344 template <typename SpaceType>
1345 MemoryChunk* AllocatePagePooled(SpaceType* owner);
1346
1347 // Initializes pages in a chunk. Returns the first page address.
1348 // This function and GetChunkId() are provided for the mark-compact
1349 // collector to rebuild page headers in the from space, which is
1350 // used as a marking stack and its page headers are destroyed.
1351 Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
1352 PagedSpace* owner);
1353
UpdateAllocatedSpaceLimits(void * low,void * high)1354 void UpdateAllocatedSpaceLimits(void* low, void* high) {
1355 // The use of atomic primitives does not guarantee correctness (wrt.
1356 // desired semantics) by default. The loop here ensures that we update the
1357 // values only if they did not change in between.
1358 void* ptr = nullptr;
1359 do {
1360 ptr = lowest_ever_allocated_.Value();
1361 } while ((low < ptr) && !lowest_ever_allocated_.TrySetValue(ptr, low));
1362 do {
1363 ptr = highest_ever_allocated_.Value();
1364 } while ((high > ptr) && !highest_ever_allocated_.TrySetValue(ptr, high));
1365 }
1366
1367 Isolate* isolate_;
1368 CodeRange* code_range_;
1369
1370 // Maximum space size in bytes.
1371 size_t capacity_;
1372 // Maximum subset of capacity_ that can be executable
1373 size_t capacity_executable_;
1374
1375 // Allocated space size in bytes.
1376 base::AtomicNumber<size_t> size_;
1377 // Allocated executable space size in bytes.
1378 base::AtomicNumber<size_t> size_executable_;
1379
1380 // We keep the lowest and highest addresses allocated as a quick way
1381 // of determining that pointers are outside the heap. The estimate is
1382 // conservative, i.e. not all addresses in 'allocated' space are allocated
1383 // to our heap. The range is [lowest, highest[, inclusive on the low end
1384 // and exclusive on the high end.
1385 base::AtomicValue<void*> lowest_ever_allocated_;
1386 base::AtomicValue<void*> highest_ever_allocated_;
1387
1388 base::VirtualMemory last_chunk_;
1389 Unmapper unmapper_;
1390
1391 friend class TestCodeRangeScope;
1392
1393 DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
1394 };
1395
1396 extern template Page*
1397 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1398 size_t size, PagedSpace* owner, Executability executable);
1399 extern template Page*
1400 MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1401 size_t size, SemiSpace* owner, Executability executable);
1402 extern template Page*
1403 MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1404 size_t size, SemiSpace* owner, Executability executable);
1405
1406 // -----------------------------------------------------------------------------
1407 // Interface for heap object iterator to be implemented by all object space
1408 // object iterators.
1409 //
1410 // NOTE: The space specific object iterators also implements the own next()
1411 // method which is used to avoid using virtual functions
1412 // iterating a specific space.
1413
1414 class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
1415 public:
~ObjectIterator()1416 virtual ~ObjectIterator() {}
1417 virtual HeapObject* Next() = 0;
1418 };
1419
1420 template <class PAGE_TYPE>
1421 class PageIteratorImpl
1422 : public std::iterator<std::forward_iterator_tag, PAGE_TYPE> {
1423 public:
PageIteratorImpl(PAGE_TYPE * p)1424 explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE> & other)1425 PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
1426 PAGE_TYPE* operator*() { return p_; }
1427 bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1428 return rhs.p_ == p_;
1429 }
1430 bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
1431 return rhs.p_ != p_;
1432 }
1433 inline PageIteratorImpl<PAGE_TYPE>& operator++();
1434 inline PageIteratorImpl<PAGE_TYPE> operator++(int);
1435
1436 private:
1437 PAGE_TYPE* p_;
1438 };
1439
1440 typedef PageIteratorImpl<Page> PageIterator;
1441 typedef PageIteratorImpl<LargePage> LargePageIterator;
1442
1443 class PageRange {
1444 public:
1445 typedef PageIterator iterator;
PageRange(Page * begin,Page * end)1446 PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
PageRange(Page * page)1447 explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
1448 inline PageRange(Address start, Address limit);
1449
begin()1450 iterator begin() { return iterator(begin_); }
end()1451 iterator end() { return iterator(end_); }
1452
1453 private:
1454 Page* begin_;
1455 Page* end_;
1456 };
1457
1458 // -----------------------------------------------------------------------------
1459 // Heap object iterator in new/old/map spaces.
1460 //
1461 // A HeapObjectIterator iterates objects from the bottom of the given space
1462 // to its top or from the bottom of the given page to its top.
1463 //
1464 // If objects are allocated in the page during iteration the iterator may
1465 // or may not iterate over those objects. The caller must create a new
1466 // iterator in order to be sure to visit these new objects.
1467 class V8_EXPORT_PRIVATE HeapObjectIterator : public ObjectIterator {
1468 public:
1469 // Creates a new object iterator in a given space.
1470 explicit HeapObjectIterator(PagedSpace* space);
1471 explicit HeapObjectIterator(Page* page);
1472
1473 // Advance to the next object, skipping free spaces and other fillers and
1474 // skipping the special garbage section of which there is one per space.
1475 // Returns nullptr when the iteration has ended.
1476 inline HeapObject* Next() override;
1477
1478 private:
1479 // Fast (inlined) path of next().
1480 inline HeapObject* FromCurrentPage();
1481
1482 // Slow path of next(), goes into the next page. Returns false if the
1483 // iteration has ended.
1484 bool AdvanceToNextPage();
1485
1486 Address cur_addr_; // Current iteration point.
1487 Address cur_end_; // End iteration point.
1488 PagedSpace* space_;
1489 PageRange page_range_;
1490 PageRange::iterator current_page_;
1491 };
1492
1493
1494 // -----------------------------------------------------------------------------
1495 // A space has a circular list of pages. The next page can be accessed via
1496 // Page::next_page() call.
1497
1498 // An abstraction of allocation and relocation pointers in a page-structured
1499 // space.
1500 class AllocationInfo {
1501 public:
AllocationInfo()1502 AllocationInfo() : original_top_(nullptr), top_(nullptr), limit_(nullptr) {}
AllocationInfo(Address top,Address limit)1503 AllocationInfo(Address top, Address limit)
1504 : original_top_(top), top_(top), limit_(limit) {}
1505
Reset(Address top,Address limit)1506 void Reset(Address top, Address limit) {
1507 original_top_ = top;
1508 set_top(top);
1509 set_limit(limit);
1510 }
1511
original_top()1512 Address original_top() {
1513 SLOW_DCHECK(top_ == NULL ||
1514 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1515 return original_top_;
1516 }
1517
INLINE(void set_top (Address top))1518 INLINE(void set_top(Address top)) {
1519 SLOW_DCHECK(top == NULL ||
1520 (reinterpret_cast<intptr_t>(top) & kHeapObjectTagMask) == 0);
1521 top_ = top;
1522 }
1523
INLINE(Address top ())1524 INLINE(Address top()) const {
1525 SLOW_DCHECK(top_ == NULL ||
1526 (reinterpret_cast<intptr_t>(top_) & kHeapObjectTagMask) == 0);
1527 return top_;
1528 }
1529
top_address()1530 Address* top_address() { return &top_; }
1531
INLINE(void set_limit (Address limit))1532 INLINE(void set_limit(Address limit)) {
1533 limit_ = limit;
1534 }
1535
INLINE(Address limit ())1536 INLINE(Address limit()) const {
1537 return limit_;
1538 }
1539
limit_address()1540 Address* limit_address() { return &limit_; }
1541
1542 #ifdef DEBUG
VerifyPagedAllocation()1543 bool VerifyPagedAllocation() {
1544 return (Page::FromAllocationAreaAddress(top_) ==
1545 Page::FromAllocationAreaAddress(limit_)) &&
1546 (top_ <= limit_);
1547 }
1548 #endif
1549
1550 private:
1551 // The original top address when the allocation info was initialized.
1552 Address original_top_;
1553 // Current allocation top.
1554 Address top_;
1555 // Current allocation limit.
1556 Address limit_;
1557 };
1558
1559
1560 // An abstraction of the accounting statistics of a page-structured space.
1561 //
1562 // The stats are only set by functions that ensure they stay balanced. These
1563 // functions increase or decrease one of the non-capacity stats in conjunction
1564 // with capacity, or else they always balance increases and decreases to the
1565 // non-capacity stats.
1566 class AllocationStats BASE_EMBEDDED {
1567 public:
AllocationStats()1568 AllocationStats() { Clear(); }
1569
1570 // Zero out all the allocation statistics (i.e., no capacity).
Clear()1571 void Clear() {
1572 capacity_ = 0;
1573 max_capacity_ = 0;
1574 size_ = 0;
1575 }
1576
ClearSize()1577 void ClearSize() { size_ = capacity_; }
1578
1579 // Accessors for the allocation statistics.
Capacity()1580 size_t Capacity() { return capacity_; }
MaxCapacity()1581 size_t MaxCapacity() { return max_capacity_; }
Size()1582 size_t Size() { return size_; }
1583
1584 // Grow the space by adding available bytes. They are initially marked as
1585 // being in use (part of the size), but will normally be immediately freed,
1586 // putting them on the free list and removing them from size_.
ExpandSpace(size_t bytes)1587 void ExpandSpace(size_t bytes) {
1588 DCHECK_GE(size_ + bytes, size_);
1589 DCHECK_GE(capacity_ + bytes, capacity_);
1590 capacity_ += bytes;
1591 size_ += bytes;
1592 if (capacity_ > max_capacity_) {
1593 max_capacity_ = capacity_;
1594 }
1595 }
1596
1597 // Shrink the space by removing available bytes. Since shrinking is done
1598 // during sweeping, bytes have been marked as being in use (part of the size)
1599 // and are hereby freed.
ShrinkSpace(size_t bytes)1600 void ShrinkSpace(size_t bytes) {
1601 DCHECK_GE(capacity_, bytes);
1602 DCHECK_GE(size_, bytes);
1603 capacity_ -= bytes;
1604 size_ -= bytes;
1605 }
1606
AllocateBytes(size_t bytes)1607 void AllocateBytes(size_t bytes) {
1608 DCHECK_GE(size_ + bytes, size_);
1609 size_ += bytes;
1610 }
1611
DeallocateBytes(size_t bytes)1612 void DeallocateBytes(size_t bytes) {
1613 DCHECK_GE(size_, bytes);
1614 size_ -= bytes;
1615 }
1616
DecreaseCapacity(size_t bytes)1617 void DecreaseCapacity(size_t bytes) {
1618 DCHECK_GE(capacity_, bytes);
1619 DCHECK_GE(capacity_ - bytes, size_);
1620 capacity_ -= bytes;
1621 }
1622
IncreaseCapacity(size_t bytes)1623 void IncreaseCapacity(size_t bytes) {
1624 DCHECK_GE(capacity_ + bytes, capacity_);
1625 capacity_ += bytes;
1626 }
1627
1628 // Merge |other| into |this|.
Merge(const AllocationStats & other)1629 void Merge(const AllocationStats& other) {
1630 DCHECK_GE(capacity_ + other.capacity_, capacity_);
1631 DCHECK_GE(size_ + other.size_, size_);
1632 capacity_ += other.capacity_;
1633 size_ += other.size_;
1634 if (other.max_capacity_ > max_capacity_) {
1635 max_capacity_ = other.max_capacity_;
1636 }
1637 }
1638
1639 private:
1640 // |capacity_|: The number of object-area bytes (i.e., not including page
1641 // bookkeeping structures) currently in the space.
1642 size_t capacity_;
1643
1644 // |max_capacity_|: The maximum capacity ever observed.
1645 size_t max_capacity_;
1646
1647 // |size_|: The number of allocated bytes.
1648 size_t size_;
1649 };
1650
1651 // A free list maintaining free blocks of memory. The free list is organized in
1652 // a way to encourage objects allocated around the same time to be near each
1653 // other. The normal way to allocate is intended to be by bumping a 'top'
1654 // pointer until it hits a 'limit' pointer. When the limit is hit we need to
1655 // find a new space to allocate from. This is done with the free list, which is
1656 // divided up into rough categories to cut down on waste. Having finer
1657 // categories would scatter allocation more.
1658
1659 // The free list is organized in categories as follows:
1660 // kMinBlockSize-10 words (tiniest): The tiniest blocks are only used for
1661 // allocation, when categories >= small do not have entries anymore.
1662 // 11-31 words (tiny): The tiny blocks are only used for allocation, when
1663 // categories >= small do not have entries anymore.
1664 // 32-255 words (small): Used for allocating free space between 1-31 words in
1665 // size.
1666 // 256-2047 words (medium): Used for allocating free space between 32-255 words
1667 // in size.
1668 // 1048-16383 words (large): Used for allocating free space between 256-2047
1669 // words in size.
1670 // At least 16384 words (huge): This list is for objects of 2048 words or
1671 // larger. Empty pages are also added to this list.
1672 class V8_EXPORT_PRIVATE FreeList {
1673 public:
1674 // This method returns how much memory can be allocated after freeing
1675 // maximum_freed memory.
GuaranteedAllocatable(size_t maximum_freed)1676 static inline size_t GuaranteedAllocatable(size_t maximum_freed) {
1677 if (maximum_freed <= kTiniestListMax) {
1678 // Since we are not iterating over all list entries, we cannot guarantee
1679 // that we can find the maximum freed block in that free list.
1680 return 0;
1681 } else if (maximum_freed <= kTinyListMax) {
1682 return kTinyAllocationMax;
1683 } else if (maximum_freed <= kSmallListMax) {
1684 return kSmallAllocationMax;
1685 } else if (maximum_freed <= kMediumListMax) {
1686 return kMediumAllocationMax;
1687 } else if (maximum_freed <= kLargeListMax) {
1688 return kLargeAllocationMax;
1689 }
1690 return maximum_freed;
1691 }
1692
1693 explicit FreeList(PagedSpace* owner);
1694
1695 // Adds a node on the free list. The block of size {size_in_bytes} starting
1696 // at {start} is placed on the free list. The return value is the number of
1697 // bytes that were not added to the free list, because they freed memory block
1698 // was too small. Bookkeeping information will be written to the block, i.e.,
1699 // its contents will be destroyed. The start address should be word aligned,
1700 // and the size should be a non-zero multiple of the word size.
1701 size_t Free(Address start, size_t size_in_bytes, FreeMode mode);
1702
1703 // Allocate a block of size {size_in_bytes} from the free list. The block is
1704 // unitialized. A failure is returned if no block is available. The size
1705 // should be a non-zero multiple of the word size.
1706 MUST_USE_RESULT HeapObject* Allocate(size_t size_in_bytes);
1707
1708 // Clear the free list.
1709 void Reset();
1710
ResetStats()1711 void ResetStats() {
1712 wasted_bytes_.SetValue(0);
1713 ForAllFreeListCategories(
1714 [](FreeListCategory* category) { category->ResetStats(); });
1715 }
1716
1717 // Return the number of bytes available on the free list.
Available()1718 size_t Available() {
1719 size_t available = 0;
1720 ForAllFreeListCategories([&available](FreeListCategory* category) {
1721 available += category->available();
1722 });
1723 return available;
1724 }
1725
IsEmpty()1726 bool IsEmpty() {
1727 bool empty = true;
1728 ForAllFreeListCategories([&empty](FreeListCategory* category) {
1729 if (!category->is_empty()) empty = false;
1730 });
1731 return empty;
1732 }
1733
1734 // Used after booting the VM.
1735 void RepairLists(Heap* heap);
1736
1737 size_t EvictFreeListItems(Page* page);
1738 bool ContainsPageFreeListItems(Page* page);
1739
owner()1740 PagedSpace* owner() { return owner_; }
wasted_bytes()1741 size_t wasted_bytes() { return wasted_bytes_.Value(); }
1742
1743 template <typename Callback>
ForAllFreeListCategories(FreeListCategoryType type,Callback callback)1744 void ForAllFreeListCategories(FreeListCategoryType type, Callback callback) {
1745 FreeListCategory* current = categories_[type];
1746 while (current != nullptr) {
1747 FreeListCategory* next = current->next();
1748 callback(current);
1749 current = next;
1750 }
1751 }
1752
1753 template <typename Callback>
ForAllFreeListCategories(Callback callback)1754 void ForAllFreeListCategories(Callback callback) {
1755 for (int i = kFirstCategory; i < kNumberOfCategories; i++) {
1756 ForAllFreeListCategories(static_cast<FreeListCategoryType>(i), callback);
1757 }
1758 }
1759
1760 bool AddCategory(FreeListCategory* category);
1761 void RemoveCategory(FreeListCategory* category);
1762 void PrintCategories(FreeListCategoryType type);
1763
1764 #ifdef DEBUG
1765 size_t SumFreeLists();
1766 bool IsVeryLong();
1767 #endif
1768
1769 private:
1770 class FreeListCategoryIterator {
1771 public:
FreeListCategoryIterator(FreeList * free_list,FreeListCategoryType type)1772 FreeListCategoryIterator(FreeList* free_list, FreeListCategoryType type)
1773 : current_(free_list->categories_[type]) {}
1774
HasNext()1775 bool HasNext() { return current_ != nullptr; }
1776
Next()1777 FreeListCategory* Next() {
1778 DCHECK(HasNext());
1779 FreeListCategory* tmp = current_;
1780 current_ = current_->next();
1781 return tmp;
1782 }
1783
1784 private:
1785 FreeListCategory* current_;
1786 };
1787
1788 // The size range of blocks, in bytes.
1789 static const size_t kMinBlockSize = 3 * kPointerSize;
1790 static const size_t kMaxBlockSize = Page::kAllocatableMemory;
1791
1792 static const size_t kTiniestListMax = 0xa * kPointerSize;
1793 static const size_t kTinyListMax = 0x1f * kPointerSize;
1794 static const size_t kSmallListMax = 0xff * kPointerSize;
1795 static const size_t kMediumListMax = 0x7ff * kPointerSize;
1796 static const size_t kLargeListMax = 0x3fff * kPointerSize;
1797 static const size_t kTinyAllocationMax = kTiniestListMax;
1798 static const size_t kSmallAllocationMax = kTinyListMax;
1799 static const size_t kMediumAllocationMax = kSmallListMax;
1800 static const size_t kLargeAllocationMax = kMediumListMax;
1801
1802 FreeSpace* FindNodeFor(size_t size_in_bytes, size_t* node_size);
1803
1804 // Walks all available categories for a given |type| and tries to retrieve
1805 // a node. Returns nullptr if the category is empty.
1806 FreeSpace* FindNodeIn(FreeListCategoryType type, size_t* node_size);
1807
1808 // Tries to retrieve a node from the first category in a given |type|.
1809 // Returns nullptr if the category is empty.
1810 FreeSpace* TryFindNodeIn(FreeListCategoryType type, size_t* node_size,
1811 size_t minimum_size);
1812
1813 // Searches a given |type| for a node of at least |minimum_size|.
1814 FreeSpace* SearchForNodeInList(FreeListCategoryType type, size_t* node_size,
1815 size_t minimum_size);
1816
SelectFreeListCategoryType(size_t size_in_bytes)1817 FreeListCategoryType SelectFreeListCategoryType(size_t size_in_bytes) {
1818 if (size_in_bytes <= kTiniestListMax) {
1819 return kTiniest;
1820 } else if (size_in_bytes <= kTinyListMax) {
1821 return kTiny;
1822 } else if (size_in_bytes <= kSmallListMax) {
1823 return kSmall;
1824 } else if (size_in_bytes <= kMediumListMax) {
1825 return kMedium;
1826 } else if (size_in_bytes <= kLargeListMax) {
1827 return kLarge;
1828 }
1829 return kHuge;
1830 }
1831
1832 // The tiny categories are not used for fast allocation.
SelectFastAllocationFreeListCategoryType(size_t size_in_bytes)1833 FreeListCategoryType SelectFastAllocationFreeListCategoryType(
1834 size_t size_in_bytes) {
1835 if (size_in_bytes <= kSmallAllocationMax) {
1836 return kSmall;
1837 } else if (size_in_bytes <= kMediumAllocationMax) {
1838 return kMedium;
1839 } else if (size_in_bytes <= kLargeAllocationMax) {
1840 return kLarge;
1841 }
1842 return kHuge;
1843 }
1844
top(FreeListCategoryType type)1845 FreeListCategory* top(FreeListCategoryType type) { return categories_[type]; }
1846
1847 PagedSpace* owner_;
1848 base::AtomicNumber<size_t> wasted_bytes_;
1849 FreeListCategory* categories_[kNumberOfCategories];
1850
1851 friend class FreeListCategory;
1852
1853 DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
1854 };
1855
1856 // LocalAllocationBuffer represents a linear allocation area that is created
1857 // from a given {AllocationResult} and can be used to allocate memory without
1858 // synchronization.
1859 //
1860 // The buffer is properly closed upon destruction and reassignment.
1861 // Example:
1862 // {
1863 // AllocationResult result = ...;
1864 // LocalAllocationBuffer a(heap, result, size);
1865 // LocalAllocationBuffer b = a;
1866 // CHECK(!a.IsValid());
1867 // CHECK(b.IsValid());
1868 // // {a} is invalid now and cannot be used for further allocations.
1869 // }
1870 // // Since {b} went out of scope, the LAB is closed, resulting in creating a
1871 // // filler object for the remaining area.
1872 class LocalAllocationBuffer {
1873 public:
1874 // Indicates that a buffer cannot be used for allocations anymore. Can result
1875 // from either reassigning a buffer, or trying to construct it from an
1876 // invalid {AllocationResult}.
1877 static inline LocalAllocationBuffer InvalidBuffer();
1878
1879 // Creates a new LAB from a given {AllocationResult}. Results in
1880 // InvalidBuffer if the result indicates a retry.
1881 static inline LocalAllocationBuffer FromResult(Heap* heap,
1882 AllocationResult result,
1883 intptr_t size);
1884
~LocalAllocationBuffer()1885 ~LocalAllocationBuffer() { Close(); }
1886
1887 // Convert to C++11 move-semantics once allowed by the style guide.
1888 LocalAllocationBuffer(const LocalAllocationBuffer& other);
1889 LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other);
1890
1891 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
1892 int size_in_bytes, AllocationAlignment alignment);
1893
IsValid()1894 inline bool IsValid() { return allocation_info_.top() != nullptr; }
1895
1896 // Try to merge LABs, which is only possible when they are adjacent in memory.
1897 // Returns true if the merge was successful, false otherwise.
1898 inline bool TryMerge(LocalAllocationBuffer* other);
1899
1900 private:
1901 LocalAllocationBuffer(Heap* heap, AllocationInfo allocation_info);
1902
1903 void Close();
1904
1905 Heap* heap_;
1906 AllocationInfo allocation_info_;
1907 };
1908
NON_EXPORTED_BASE(public Space)1909 class V8_EXPORT_PRIVATE PagedSpace : NON_EXPORTED_BASE(public Space) {
1910 public:
1911 typedef PageIterator iterator;
1912
1913 static const intptr_t kCompactionMemoryWanted = 500 * KB;
1914
1915 // Creates a space with an id.
1916 PagedSpace(Heap* heap, AllocationSpace id, Executability executable);
1917
1918 ~PagedSpace() override { TearDown(); }
1919
1920 // Set up the space using the given address range of virtual memory (from
1921 // the memory allocator's initial chunk) if possible. If the block of
1922 // addresses is not big enough to contain a single page-aligned page, a
1923 // fresh chunk will be allocated.
1924 bool SetUp();
1925
1926 // Returns true if the space has been successfully set up and not
1927 // subsequently torn down.
1928 bool HasBeenSetUp();
1929
1930 // Checks whether an object/address is in this space.
1931 inline bool Contains(Address a);
1932 inline bool Contains(Object* o);
1933 bool ContainsSlow(Address addr);
1934
1935 // During boot the free_space_map is created, and afterwards we may need
1936 // to write it into the free list nodes that were already created.
1937 void RepairFreeListsAfterDeserialization();
1938
1939 // Prepares for a mark-compact GC.
1940 void PrepareForMarkCompact();
1941
1942 // Current capacity without growing (Size() + Available()).
1943 size_t Capacity() { return accounting_stats_.Capacity(); }
1944
1945 // Approximate amount of physical memory committed for this space.
1946 size_t CommittedPhysicalMemory() override;
1947
1948 void ResetFreeListStatistics();
1949
1950 // Sets the capacity, the available space and the wasted space to zero.
1951 // The stats are rebuilt during sweeping by adding each page to the
1952 // capacity and the size when it is encountered. As free spaces are
1953 // discovered during the sweeping they are subtracted from the size and added
1954 // to the available and wasted totals.
1955 void ClearStats() {
1956 accounting_stats_.ClearSize();
1957 free_list_.ResetStats();
1958 ResetFreeListStatistics();
1959 }
1960
1961 // Available bytes without growing. These are the bytes on the free list.
1962 // The bytes in the linear allocation area are not included in this total
1963 // because updating the stats would slow down allocation. New pages are
1964 // immediately added to the free list so they show up here.
1965 size_t Available() override { return free_list_.Available(); }
1966
1967 // Allocated bytes in this space. Garbage bytes that were not found due to
1968 // concurrent sweeping are counted as being allocated! The bytes in the
1969 // current linear allocation area (between top and limit) are also counted
1970 // here.
1971 size_t Size() override { return accounting_stats_.Size(); }
1972
1973 // As size, but the bytes in lazily swept pages are estimated and the bytes
1974 // in the current linear allocation area are not included.
1975 size_t SizeOfObjects() override;
1976
1977 // Wasted bytes in this space. These are just the bytes that were thrown away
1978 // due to being too small to use for allocation.
1979 virtual size_t Waste() { return free_list_.wasted_bytes(); }
1980
1981 // Returns the allocation pointer in this space.
1982 Address top() { return allocation_info_.top(); }
1983 Address limit() { return allocation_info_.limit(); }
1984
1985 // The allocation top address.
1986 Address* allocation_top_address() { return allocation_info_.top_address(); }
1987
1988 // The allocation limit address.
1989 Address* allocation_limit_address() {
1990 return allocation_info_.limit_address();
1991 }
1992
1993 enum UpdateSkipList { UPDATE_SKIP_LIST, IGNORE_SKIP_LIST };
1994
1995 // Allocate the requested number of bytes in the space if possible, return a
1996 // failure object if not. Only use IGNORE_SKIP_LIST if the skip list is going
1997 // to be manually updated later.
1998 MUST_USE_RESULT inline AllocationResult AllocateRawUnaligned(
1999 int size_in_bytes, UpdateSkipList update_skip_list = UPDATE_SKIP_LIST);
2000
2001 MUST_USE_RESULT inline AllocationResult AllocateRawUnalignedSynchronized(
2002 int size_in_bytes);
2003
2004 // Allocate the requested number of bytes in the space double aligned if
2005 // possible, return a failure object if not.
2006 MUST_USE_RESULT inline AllocationResult AllocateRawAligned(
2007 int size_in_bytes, AllocationAlignment alignment);
2008
2009 // Allocate the requested number of bytes in the space and consider allocation
2010 // alignment if needed.
2011 MUST_USE_RESULT inline AllocationResult AllocateRaw(
2012 int size_in_bytes, AllocationAlignment alignment);
2013
2014 // Give a block of memory to the space's free list. It might be added to
2015 // the free list or accounted as waste.
2016 // If add_to_freelist is false then just accounting stats are updated and
2017 // no attempt to add area to free list is made.
2018 size_t Free(Address start, size_t size_in_bytes) {
2019 size_t wasted = free_list_.Free(start, size_in_bytes, kLinkCategory);
2020 accounting_stats_.DeallocateBytes(size_in_bytes);
2021 DCHECK_GE(size_in_bytes, wasted);
2022 return size_in_bytes - wasted;
2023 }
2024
2025 size_t UnaccountedFree(Address start, size_t size_in_bytes) {
2026 size_t wasted = free_list_.Free(start, size_in_bytes, kDoNotLinkCategory);
2027 DCHECK_GE(size_in_bytes, wasted);
2028 return size_in_bytes - wasted;
2029 }
2030
2031 void ResetFreeList() { free_list_.Reset(); }
2032
2033 // Set space allocation info.
2034 void SetTopAndLimit(Address top, Address limit) {
2035 DCHECK(top == limit ||
2036 Page::FromAddress(top) == Page::FromAddress(limit - 1));
2037 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2038 allocation_info_.Reset(top, limit);
2039 }
2040
2041 void SetAllocationInfo(Address top, Address limit);
2042
2043 // Empty space allocation info, returning unused area to free list.
2044 void EmptyAllocationInfo();
2045
2046 void MarkAllocationInfoBlack();
2047
2048 void AccountAllocatedBytes(size_t bytes) {
2049 accounting_stats_.AllocateBytes(bytes);
2050 }
2051
2052 void IncreaseCapacity(size_t bytes);
2053
2054 // Releases an unused page and shrinks the space.
2055 void ReleasePage(Page* page);
2056
2057 // The dummy page that anchors the linked list of pages.
2058 Page* anchor() { return &anchor_; }
2059
2060
2061 #ifdef VERIFY_HEAP
2062 // Verify integrity of this space.
2063 virtual void Verify(ObjectVisitor* visitor);
2064
2065 // Overridden by subclasses to verify space-specific object
2066 // properties (e.g., only maps or free-list nodes are in map space).
2067 virtual void VerifyObject(HeapObject* obj) {}
2068 #endif
2069
2070 #ifdef DEBUG
2071 // Print meta info and objects in this space.
2072 void Print() override;
2073
2074 // Reports statistics for the space
2075 void ReportStatistics();
2076
2077 // Report code object related statistics
2078 static void ReportCodeStatistics(Isolate* isolate);
2079 static void ResetCodeStatistics(Isolate* isolate);
2080 #endif
2081
2082 Page* FirstPage() { return anchor_.next_page(); }
2083 Page* LastPage() { return anchor_.prev_page(); }
2084
2085 bool CanExpand(size_t size);
2086
2087 // Returns the number of total pages in this space.
2088 int CountTotalPages();
2089
2090 // Return size of allocatable area on a page in this space.
2091 inline int AreaSize() { return static_cast<int>(area_size_); }
2092
2093 virtual bool is_local() { return false; }
2094
2095 // Merges {other} into the current space. Note that this modifies {other},
2096 // e.g., removes its bump pointer area and resets statistics.
2097 void MergeCompactionSpace(CompactionSpace* other);
2098
2099 // Refills the free list from the corresponding free list filled by the
2100 // sweeper.
2101 virtual void RefillFreeList();
2102
2103 FreeList* free_list() { return &free_list_; }
2104
2105 base::Mutex* mutex() { return &space_mutex_; }
2106
2107 inline void UnlinkFreeListCategories(Page* page);
2108 inline intptr_t RelinkFreeListCategories(Page* page);
2109
2110 iterator begin() { return iterator(anchor_.next_page()); }
2111 iterator end() { return iterator(&anchor_); }
2112
2113 // Shrink immortal immovable pages of the space to be exactly the size needed
2114 // using the high water mark.
2115 void ShrinkImmortalImmovablePages();
2116
2117 std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2118
2119 protected:
2120 // PagedSpaces that should be included in snapshots have different, i.e.,
2121 // smaller, initial pages.
2122 virtual bool snapshotable() { return true; }
2123
2124 bool HasPages() { return anchor_.next_page() != &anchor_; }
2125
2126 // Cleans up the space, frees all pages in this space except those belonging
2127 // to the initial chunk, uncommits addresses in the initial chunk.
2128 void TearDown();
2129
2130 // Expands the space by allocating a fixed number of pages. Returns false if
2131 // it cannot allocate requested number of pages from OS, or if the hard heap
2132 // size limit has been hit.
2133 bool Expand();
2134
2135 // Generic fast case allocation function that tries linear allocation at the
2136 // address denoted by top in allocation_info_.
2137 inline HeapObject* AllocateLinearly(int size_in_bytes);
2138
2139 // Generic fast case allocation function that tries aligned linear allocation
2140 // at the address denoted by top in allocation_info_. Writes the aligned
2141 // allocation size, which includes the filler size, to size_in_bytes.
2142 inline HeapObject* AllocateLinearlyAligned(int* size_in_bytes,
2143 AllocationAlignment alignment);
2144
2145 // If sweeping is still in progress try to sweep unswept pages. If that is
2146 // not successful, wait for the sweeper threads and re-try free-list
2147 // allocation.
2148 MUST_USE_RESULT virtual HeapObject* SweepAndRetryAllocation(
2149 int size_in_bytes);
2150
2151 // Slow path of AllocateRaw. This function is space-dependent.
2152 MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
2153
2154 size_t area_size_;
2155
2156 // Accounting information for this space.
2157 AllocationStats accounting_stats_;
2158
2159 // The dummy page that anchors the double linked list of pages.
2160 Page anchor_;
2161
2162 // The space's free list.
2163 FreeList free_list_;
2164
2165 // Normal allocation information.
2166 AllocationInfo allocation_info_;
2167
2168 // Mutex guarding any concurrent access to the space.
2169 base::Mutex space_mutex_;
2170
2171 friend class IncrementalMarking;
2172 friend class MarkCompactCollector;
2173
2174 // Used in cctest.
2175 friend class HeapTester;
2176 };
2177
2178 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
2179
2180 // -----------------------------------------------------------------------------
2181 // SemiSpace in young generation
2182 //
2183 // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
2184 // The mark-compact collector uses the memory of the first page in the from
2185 // space as a marking stack when tracing live objects.
2186 class SemiSpace : public Space {
2187 public:
2188 typedef PageIterator iterator;
2189
2190 static void Swap(SemiSpace* from, SemiSpace* to);
2191
SemiSpace(Heap * heap,SemiSpaceId semispace)2192 SemiSpace(Heap* heap, SemiSpaceId semispace)
2193 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2194 current_capacity_(0),
2195 maximum_capacity_(0),
2196 minimum_capacity_(0),
2197 age_mark_(nullptr),
2198 committed_(false),
2199 id_(semispace),
2200 anchor_(this),
2201 current_page_(nullptr),
2202 pages_used_(0) {}
2203
2204 inline bool Contains(HeapObject* o);
2205 inline bool Contains(Object* o);
2206 inline bool ContainsSlow(Address a);
2207
2208 void SetUp(size_t initial_capacity, size_t maximum_capacity);
2209 void TearDown();
HasBeenSetUp()2210 bool HasBeenSetUp() { return maximum_capacity_ != 0; }
2211
2212 bool Commit();
2213 bool Uncommit();
is_committed()2214 bool is_committed() { return committed_; }
2215
2216 // Grow the semispace to the new capacity. The new capacity requested must
2217 // be larger than the current capacity and less than the maximum capacity.
2218 bool GrowTo(size_t new_capacity);
2219
2220 // Shrinks the semispace to the new capacity. The new capacity requested
2221 // must be more than the amount of used memory in the semispace and less
2222 // than the current capacity.
2223 bool ShrinkTo(size_t new_capacity);
2224
2225 bool EnsureCurrentCapacity();
2226
2227 // Returns the start address of the first page of the space.
space_start()2228 Address space_start() {
2229 DCHECK_NE(anchor_.next_page(), anchor());
2230 return anchor_.next_page()->area_start();
2231 }
2232
first_page()2233 Page* first_page() { return anchor_.next_page(); }
current_page()2234 Page* current_page() { return current_page_; }
pages_used()2235 int pages_used() { return pages_used_; }
2236
2237 // Returns one past the end address of the space.
space_end()2238 Address space_end() { return anchor_.prev_page()->area_end(); }
2239
2240 // Returns the start address of the current page of the space.
page_low()2241 Address page_low() { return current_page_->area_start(); }
2242
2243 // Returns one past the end address of the current page of the space.
page_high()2244 Address page_high() { return current_page_->area_end(); }
2245
AdvancePage()2246 bool AdvancePage() {
2247 Page* next_page = current_page_->next_page();
2248 // We cannot expand if we reached the maximum number of pages already. Note
2249 // that we need to account for the next page already for this check as we
2250 // could potentially fill the whole page after advancing.
2251 const bool reached_max_pages = (pages_used_ + 1) == max_pages();
2252 if (next_page == anchor() || reached_max_pages) {
2253 return false;
2254 }
2255 current_page_ = next_page;
2256 pages_used_++;
2257 return true;
2258 }
2259
2260 // Resets the space to using the first page.
2261 void Reset();
2262
2263 void RemovePage(Page* page);
2264 void PrependPage(Page* page);
2265
2266 // Age mark accessors.
age_mark()2267 Address age_mark() { return age_mark_; }
2268 void set_age_mark(Address mark);
2269
2270 // Returns the current capacity of the semispace.
current_capacity()2271 size_t current_capacity() { return current_capacity_; }
2272
2273 // Returns the maximum capacity of the semispace.
maximum_capacity()2274 size_t maximum_capacity() { return maximum_capacity_; }
2275
2276 // Returns the initial capacity of the semispace.
minimum_capacity()2277 size_t minimum_capacity() { return minimum_capacity_; }
2278
id()2279 SemiSpaceId id() { return id_; }
2280
2281 // Approximate amount of physical memory committed for this space.
2282 size_t CommittedPhysicalMemory() override;
2283
2284 // If we don't have these here then SemiSpace will be abstract. However
2285 // they should never be called:
2286
Size()2287 size_t Size() override {
2288 UNREACHABLE();
2289 return 0;
2290 }
2291
SizeOfObjects()2292 size_t SizeOfObjects() override { return Size(); }
2293
Available()2294 size_t Available() override {
2295 UNREACHABLE();
2296 return 0;
2297 }
2298
begin()2299 iterator begin() { return iterator(anchor_.next_page()); }
end()2300 iterator end() { return iterator(anchor()); }
2301
2302 std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2303
2304 #ifdef DEBUG
2305 void Print() override;
2306 // Validate a range of of addresses in a SemiSpace.
2307 // The "from" address must be on a page prior to the "to" address,
2308 // in the linked page order, or it must be earlier on the same page.
2309 static void AssertValidRange(Address from, Address to);
2310 #else
2311 // Do nothing.
AssertValidRange(Address from,Address to)2312 inline static void AssertValidRange(Address from, Address to) {}
2313 #endif
2314
2315 #ifdef VERIFY_HEAP
2316 virtual void Verify();
2317 #endif
2318
2319 private:
2320 void RewindPages(Page* start, int num_pages);
2321
anchor()2322 inline Page* anchor() { return &anchor_; }
max_pages()2323 inline int max_pages() {
2324 return static_cast<int>(current_capacity_ / Page::kPageSize);
2325 }
2326
2327 // Copies the flags into the masked positions on all pages in the space.
2328 void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
2329
2330 // The currently committed space capacity.
2331 size_t current_capacity_;
2332
2333 // The maximum capacity that can be used by this space. A space cannot grow
2334 // beyond that size.
2335 size_t maximum_capacity_;
2336
2337 // The minimum capacity for the space. A space cannot shrink below this size.
2338 size_t minimum_capacity_;
2339
2340 // Used to govern object promotion during mark-compact collection.
2341 Address age_mark_;
2342
2343 bool committed_;
2344 SemiSpaceId id_;
2345
2346 Page anchor_;
2347 Page* current_page_;
2348 int pages_used_;
2349
2350 friend class NewSpace;
2351 friend class SemiSpaceIterator;
2352 };
2353
2354
2355 // A SemiSpaceIterator is an ObjectIterator that iterates over the active
2356 // semispace of the heap's new space. It iterates over the objects in the
2357 // semispace from a given start address (defaulting to the bottom of the
2358 // semispace) to the top of the semispace. New objects allocated after the
2359 // iterator is created are not iterated.
2360 class SemiSpaceIterator : public ObjectIterator {
2361 public:
2362 // Create an iterator over the allocated objects in the given to-space.
2363 explicit SemiSpaceIterator(NewSpace* space);
2364
2365 inline HeapObject* Next() override;
2366
2367 private:
2368 void Initialize(Address start, Address end);
2369
2370 // The current iteration point.
2371 Address current_;
2372 // The end of iteration.
2373 Address limit_;
2374 };
2375
2376 // -----------------------------------------------------------------------------
2377 // The young generation space.
2378 //
2379 // The new space consists of a contiguous pair of semispaces. It simply
2380 // forwards most functions to the appropriate semispace.
2381
2382 class NewSpace : public Space {
2383 public:
2384 typedef PageIterator iterator;
2385
NewSpace(Heap * heap)2386 explicit NewSpace(Heap* heap)
2387 : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
2388 to_space_(heap, kToSpace),
2389 from_space_(heap, kFromSpace),
2390 reservation_(),
2391 top_on_previous_step_(0),
2392 allocated_histogram_(nullptr),
2393 promoted_histogram_(nullptr) {}
2394
2395 inline bool Contains(HeapObject* o);
2396 inline bool ContainsSlow(Address a);
2397 inline bool Contains(Object* o);
2398
2399 bool SetUp(size_t initial_semispace_capacity, size_t max_semispace_capacity);
2400
2401 // Tears down the space. Heap memory was not allocated by the space, so it
2402 // is not deallocated here.
2403 void TearDown();
2404
2405 // True if the space has been set up but not torn down.
HasBeenSetUp()2406 bool HasBeenSetUp() {
2407 return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
2408 }
2409
2410 // Flip the pair of spaces.
2411 void Flip();
2412
2413 // Grow the capacity of the semispaces. Assumes that they are not at
2414 // their maximum capacity.
2415 void Grow();
2416
2417 // Shrink the capacity of the semispaces.
2418 void Shrink();
2419
2420 // Return the allocated bytes in the active semispace.
Size()2421 size_t Size() override {
2422 DCHECK_GE(top(), to_space_.page_low());
2423 return to_space_.pages_used() * Page::kAllocatableMemory +
2424 static_cast<size_t>(top() - to_space_.page_low());
2425 }
2426
SizeOfObjects()2427 size_t SizeOfObjects() override { return Size(); }
2428
2429 // Return the allocatable capacity of a semispace.
Capacity()2430 size_t Capacity() {
2431 SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2432 return (to_space_.current_capacity() / Page::kPageSize) *
2433 Page::kAllocatableMemory;
2434 }
2435
2436 // Return the current size of a semispace, allocatable and non-allocatable
2437 // memory.
TotalCapacity()2438 size_t TotalCapacity() {
2439 DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
2440 return to_space_.current_capacity();
2441 }
2442
2443 // Committed memory for NewSpace is the committed memory of both semi-spaces
2444 // combined.
CommittedMemory()2445 size_t CommittedMemory() override {
2446 return from_space_.CommittedMemory() + to_space_.CommittedMemory();
2447 }
2448
MaximumCommittedMemory()2449 size_t MaximumCommittedMemory() override {
2450 return from_space_.MaximumCommittedMemory() +
2451 to_space_.MaximumCommittedMemory();
2452 }
2453
2454 // Approximate amount of physical memory committed for this space.
2455 size_t CommittedPhysicalMemory() override;
2456
2457 // Return the available bytes without growing.
Available()2458 size_t Available() override {
2459 DCHECK_GE(Capacity(), Size());
2460 return Capacity() - Size();
2461 }
2462
AllocatedSinceLastGC()2463 size_t AllocatedSinceLastGC() {
2464 const Address age_mark = to_space_.age_mark();
2465 DCHECK_NOT_NULL(age_mark);
2466 DCHECK_NOT_NULL(top());
2467 Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
2468 Page* const last_page = Page::FromAllocationAreaAddress(top());
2469 Page* current_page = age_mark_page;
2470 size_t allocated = 0;
2471 if (current_page != last_page) {
2472 DCHECK_EQ(current_page, age_mark_page);
2473 DCHECK_GE(age_mark_page->area_end(), age_mark);
2474 allocated += age_mark_page->area_end() - age_mark;
2475 current_page = current_page->next_page();
2476 } else {
2477 DCHECK_GE(top(), age_mark);
2478 return top() - age_mark;
2479 }
2480 while (current_page != last_page) {
2481 DCHECK_NE(current_page, age_mark_page);
2482 allocated += Page::kAllocatableMemory;
2483 current_page = current_page->next_page();
2484 }
2485 DCHECK_GE(top(), current_page->area_start());
2486 allocated += top() - current_page->area_start();
2487 DCHECK_LE(allocated, Size());
2488 return allocated;
2489 }
2490
MovePageFromSpaceToSpace(Page * page)2491 void MovePageFromSpaceToSpace(Page* page) {
2492 DCHECK(page->InFromSpace());
2493 from_space_.RemovePage(page);
2494 to_space_.PrependPage(page);
2495 }
2496
2497 bool Rebalance();
2498
2499 // Return the maximum capacity of a semispace.
MaximumCapacity()2500 size_t MaximumCapacity() {
2501 DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
2502 return to_space_.maximum_capacity();
2503 }
2504
IsAtMaximumCapacity()2505 bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
2506
2507 // Returns the initial capacity of a semispace.
InitialTotalCapacity()2508 size_t InitialTotalCapacity() {
2509 DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
2510 return to_space_.minimum_capacity();
2511 }
2512
2513 // Return the address of the allocation pointer in the active semispace.
top()2514 Address top() {
2515 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
2516 return allocation_info_.top();
2517 }
2518
2519 // Return the address of the allocation pointer limit in the active semispace.
limit()2520 Address limit() {
2521 DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
2522 return allocation_info_.limit();
2523 }
2524
2525 // Return the address of the first object in the active semispace.
bottom()2526 Address bottom() { return to_space_.space_start(); }
2527
2528 // Get the age mark of the inactive semispace.
age_mark()2529 Address age_mark() { return from_space_.age_mark(); }
2530 // Set the age mark in the active semispace.
set_age_mark(Address mark)2531 void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
2532
2533 // The allocation top and limit address.
allocation_top_address()2534 Address* allocation_top_address() { return allocation_info_.top_address(); }
2535
2536 // The allocation limit address.
allocation_limit_address()2537 Address* allocation_limit_address() {
2538 return allocation_info_.limit_address();
2539 }
2540
2541 MUST_USE_RESULT INLINE(AllocationResult AllocateRawAligned(
2542 int size_in_bytes, AllocationAlignment alignment));
2543
2544 MUST_USE_RESULT INLINE(
2545 AllocationResult AllocateRawUnaligned(int size_in_bytes));
2546
2547 MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(
2548 int size_in_bytes, AllocationAlignment alignment));
2549
2550 MUST_USE_RESULT inline AllocationResult AllocateRawSynchronized(
2551 int size_in_bytes, AllocationAlignment alignment);
2552
2553 // Reset the allocation pointer to the beginning of the active semispace.
2554 void ResetAllocationInfo();
2555
2556 // When inline allocation stepping is active, either because of incremental
2557 // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
2558 // inline allocation every once in a while. This is done by setting
2559 // allocation_info_.limit to be lower than the actual limit and and increasing
2560 // it in steps to guarantee that the observers are notified periodically.
2561 void UpdateInlineAllocationLimit(int size_in_bytes);
2562
DisableInlineAllocationSteps()2563 void DisableInlineAllocationSteps() {
2564 top_on_previous_step_ = 0;
2565 UpdateInlineAllocationLimit(0);
2566 }
2567
2568 // Allows observation of inline allocation. The observer->Step() method gets
2569 // called after every step_size bytes have been allocated (approximately).
2570 // This works by adjusting the allocation limit to a lower value and adjusting
2571 // it after each step.
2572 void AddAllocationObserver(AllocationObserver* observer) override;
2573
2574 void RemoveAllocationObserver(AllocationObserver* observer) override;
2575
2576 // Get the extent of the inactive semispace (for use as a marking stack,
2577 // or to zap it). Notice: space-addresses are not necessarily on the
2578 // same page, so FromSpaceStart() might be above FromSpaceEnd().
FromSpacePageLow()2579 Address FromSpacePageLow() { return from_space_.page_low(); }
FromSpacePageHigh()2580 Address FromSpacePageHigh() { return from_space_.page_high(); }
FromSpaceStart()2581 Address FromSpaceStart() { return from_space_.space_start(); }
FromSpaceEnd()2582 Address FromSpaceEnd() { return from_space_.space_end(); }
2583
2584 // Get the extent of the active semispace's pages' memory.
ToSpaceStart()2585 Address ToSpaceStart() { return to_space_.space_start(); }
ToSpaceEnd()2586 Address ToSpaceEnd() { return to_space_.space_end(); }
2587
2588 inline bool ToSpaceContainsSlow(Address a);
2589 inline bool FromSpaceContainsSlow(Address a);
2590 inline bool ToSpaceContains(Object* o);
2591 inline bool FromSpaceContains(Object* o);
2592
2593 // Try to switch the active semispace to a new, empty, page.
2594 // Returns false if this isn't possible or reasonable (i.e., there
2595 // are no pages, or the current page is already empty), or true
2596 // if successful.
2597 bool AddFreshPage();
2598 bool AddFreshPageSynchronized();
2599
2600 #ifdef VERIFY_HEAP
2601 // Verify the active semispace.
2602 virtual void Verify();
2603 #endif
2604
2605 #ifdef DEBUG
2606 // Print the active semispace.
Print()2607 void Print() override { to_space_.Print(); }
2608 #endif
2609
2610 // Iterates the active semispace to collect statistics.
2611 void CollectStatistics();
2612 // Reports previously collected statistics of the active semispace.
2613 void ReportStatistics();
2614 // Clears previously collected statistics.
2615 void ClearHistograms();
2616
2617 // Record the allocation or promotion of a heap object. Note that we don't
2618 // record every single allocation, but only those that happen in the
2619 // to space during a scavenge GC.
2620 void RecordAllocation(HeapObject* obj);
2621 void RecordPromotion(HeapObject* obj);
2622
2623 // Return whether the operation succeded.
CommitFromSpaceIfNeeded()2624 bool CommitFromSpaceIfNeeded() {
2625 if (from_space_.is_committed()) return true;
2626 return from_space_.Commit();
2627 }
2628
UncommitFromSpace()2629 bool UncommitFromSpace() {
2630 if (!from_space_.is_committed()) return true;
2631 return from_space_.Uncommit();
2632 }
2633
IsFromSpaceCommitted()2634 bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
2635
active_space()2636 SemiSpace* active_space() { return &to_space_; }
2637
2638 void PauseAllocationObservers() override;
2639 void ResumeAllocationObservers() override;
2640
begin()2641 iterator begin() { return to_space_.begin(); }
end()2642 iterator end() { return to_space_.end(); }
2643
2644 std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2645
2646 private:
2647 // Update allocation info to match the current to-space page.
2648 void UpdateAllocationInfo();
2649
2650 base::Mutex mutex_;
2651
2652 // The semispaces.
2653 SemiSpace to_space_;
2654 SemiSpace from_space_;
2655 base::VirtualMemory reservation_;
2656
2657 // Allocation pointer and limit for normal allocation and allocation during
2658 // mark-compact collection.
2659 AllocationInfo allocation_info_;
2660
2661 Address top_on_previous_step_;
2662
2663 HistogramInfo* allocated_histogram_;
2664 HistogramInfo* promoted_histogram_;
2665
2666 bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
2667
2668 // If we are doing inline allocation in steps, this method performs the 'step'
2669 // operation. top is the memory address of the bump pointer at the last
2670 // inline allocation (i.e. it determines the numbers of bytes actually
2671 // allocated since the last step.) new_top is the address of the bump pointer
2672 // where the next byte is going to be allocated from. top and new_top may be
2673 // different when we cross a page boundary or reset the space.
2674 void InlineAllocationStep(Address top, Address new_top, Address soon_object,
2675 size_t size);
2676 intptr_t GetNextInlineAllocationStepSize();
2677 void StartNextInlineAllocationStep();
2678
2679 friend class SemiSpaceIterator;
2680 };
2681
2682 class PauseAllocationObserversScope {
2683 public:
2684 explicit PauseAllocationObserversScope(Heap* heap);
2685 ~PauseAllocationObserversScope();
2686
2687 private:
2688 Heap* heap_;
2689 DISALLOW_COPY_AND_ASSIGN(PauseAllocationObserversScope);
2690 };
2691
2692 // -----------------------------------------------------------------------------
2693 // Compaction space that is used temporarily during compaction.
2694
2695 class CompactionSpace : public PagedSpace {
2696 public:
CompactionSpace(Heap * heap,AllocationSpace id,Executability executable)2697 CompactionSpace(Heap* heap, AllocationSpace id, Executability executable)
2698 : PagedSpace(heap, id, executable) {}
2699
is_local()2700 bool is_local() override { return true; }
2701
2702 protected:
2703 // The space is temporary and not included in any snapshots.
snapshotable()2704 bool snapshotable() override { return false; }
2705
2706 MUST_USE_RESULT HeapObject* SweepAndRetryAllocation(
2707 int size_in_bytes) override;
2708 };
2709
2710
2711 // A collection of |CompactionSpace|s used by a single compaction task.
2712 class CompactionSpaceCollection : public Malloced {
2713 public:
CompactionSpaceCollection(Heap * heap)2714 explicit CompactionSpaceCollection(Heap* heap)
2715 : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE),
2716 code_space_(heap, CODE_SPACE, Executability::EXECUTABLE) {}
2717
Get(AllocationSpace space)2718 CompactionSpace* Get(AllocationSpace space) {
2719 switch (space) {
2720 case OLD_SPACE:
2721 return &old_space_;
2722 case CODE_SPACE:
2723 return &code_space_;
2724 default:
2725 UNREACHABLE();
2726 }
2727 UNREACHABLE();
2728 return nullptr;
2729 }
2730
2731 private:
2732 CompactionSpace old_space_;
2733 CompactionSpace code_space_;
2734 };
2735
2736
2737 // -----------------------------------------------------------------------------
2738 // Old object space (includes the old space of objects and code space)
2739
2740 class OldSpace : public PagedSpace {
2741 public:
2742 // Creates an old space object. The constructor does not allocate pages
2743 // from OS.
OldSpace(Heap * heap,AllocationSpace id,Executability executable)2744 OldSpace(Heap* heap, AllocationSpace id, Executability executable)
2745 : PagedSpace(heap, id, executable) {}
2746 };
2747
2748
2749 // For contiguous spaces, top should be in the space (or at the end) and limit
2750 // should be the end of the space.
2751 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
2752 SLOW_DCHECK((space).page_low() <= (info).top() && \
2753 (info).top() <= (space).page_high() && \
2754 (info).limit() <= (space).page_high())
2755
2756
2757 // -----------------------------------------------------------------------------
2758 // Old space for all map objects
2759
2760 class MapSpace : public PagedSpace {
2761 public:
2762 // Creates a map space object.
MapSpace(Heap * heap,AllocationSpace id)2763 MapSpace(Heap* heap, AllocationSpace id)
2764 : PagedSpace(heap, id, NOT_EXECUTABLE) {}
2765
RoundSizeDownToObjectAlignment(int size)2766 int RoundSizeDownToObjectAlignment(int size) override {
2767 if (base::bits::IsPowerOfTwo32(Map::kSize)) {
2768 return RoundDown(size, Map::kSize);
2769 } else {
2770 return (size / Map::kSize) * Map::kSize;
2771 }
2772 }
2773
2774 #ifdef VERIFY_HEAP
2775 void VerifyObject(HeapObject* obj) override;
2776 #endif
2777 };
2778
2779
2780 // -----------------------------------------------------------------------------
2781 // Large objects ( > kMaxRegularHeapObjectSize ) are allocated and
2782 // managed by the large object space. A large object is allocated from OS
2783 // heap with extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
2784 // A large object always starts at Page::kObjectStartOffset to a page.
2785 // Large objects do not move during garbage collections.
2786
2787 class LargeObjectSpace : public Space {
2788 public:
2789 typedef LargePageIterator iterator;
2790
2791 LargeObjectSpace(Heap* heap, AllocationSpace id);
2792 virtual ~LargeObjectSpace();
2793
2794 // Initializes internal data structures.
2795 bool SetUp();
2796
2797 // Releases internal resources, frees objects in this space.
2798 void TearDown();
2799
ObjectSizeFor(size_t chunk_size)2800 static size_t ObjectSizeFor(size_t chunk_size) {
2801 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
2802 return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
2803 }
2804
2805 // Shared implementation of AllocateRaw, AllocateRawCode and
2806 // AllocateRawFixedArray.
2807 MUST_USE_RESULT AllocationResult
2808 AllocateRaw(int object_size, Executability executable);
2809
2810 // Available bytes for objects in this space.
2811 inline size_t Available() override;
2812
Size()2813 size_t Size() override { return size_; }
2814
SizeOfObjects()2815 size_t SizeOfObjects() override { return objects_size_; }
2816
2817 // Approximate amount of physical memory committed for this space.
2818 size_t CommittedPhysicalMemory() override;
2819
PageCount()2820 int PageCount() { return page_count_; }
2821
2822 // Finds an object for a given address, returns a Smi if it is not found.
2823 // The function iterates through all objects in this space, may be slow.
2824 Object* FindObject(Address a);
2825
2826 // Takes the chunk_map_mutex_ and calls FindPage after that.
2827 LargePage* FindPageThreadSafe(Address a);
2828
2829 // Finds a large object page containing the given address, returns NULL
2830 // if such a page doesn't exist.
2831 LargePage* FindPage(Address a);
2832
2833 // Clears the marking state of live objects.
2834 void ClearMarkingStateOfLiveObjects();
2835
2836 // Frees unmarked objects.
2837 void FreeUnmarkedObjects();
2838
2839 void InsertChunkMapEntries(LargePage* page);
2840 void RemoveChunkMapEntries(LargePage* page);
2841 void RemoveChunkMapEntries(LargePage* page, Address free_start);
2842
2843 // Checks whether a heap object is in this space; O(1).
2844 bool Contains(HeapObject* obj);
2845 // Checks whether an address is in the object area in this space. Iterates
2846 // all objects in the space. May be slow.
ContainsSlow(Address addr)2847 bool ContainsSlow(Address addr) { return FindObject(addr)->IsHeapObject(); }
2848
2849 // Checks whether the space is empty.
IsEmpty()2850 bool IsEmpty() { return first_page_ == NULL; }
2851
AdjustLiveBytes(int by)2852 void AdjustLiveBytes(int by) { objects_size_ += by; }
2853
first_page()2854 LargePage* first_page() { return first_page_; }
2855
2856 // Collect code statistics.
2857 void CollectCodeStatistics();
2858
begin()2859 iterator begin() { return iterator(first_page_); }
end()2860 iterator end() { return iterator(nullptr); }
2861
2862 std::unique_ptr<ObjectIterator> GetObjectIterator() override;
2863
2864 #ifdef VERIFY_HEAP
2865 virtual void Verify();
2866 #endif
2867
2868 #ifdef DEBUG
2869 void Print() override;
2870 void ReportStatistics();
2871 #endif
2872
2873 private:
2874 // The head of the linked list of large object chunks.
2875 LargePage* first_page_;
2876 size_t size_; // allocated bytes
2877 int page_count_; // number of chunks
2878 size_t objects_size_; // size of objects
2879 // The chunk_map_mutex_ has to be used when the chunk map is accessed
2880 // concurrently.
2881 base::Mutex chunk_map_mutex_;
2882 // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
2883 base::HashMap chunk_map_;
2884
2885 friend class LargeObjectIterator;
2886 };
2887
2888
2889 class LargeObjectIterator : public ObjectIterator {
2890 public:
2891 explicit LargeObjectIterator(LargeObjectSpace* space);
2892
2893 HeapObject* Next() override;
2894
2895 private:
2896 LargePage* current_;
2897 };
2898
2899 // Iterates over the chunks (pages and large object pages) that can contain
2900 // pointers to new space or to evacuation candidates.
2901 class MemoryChunkIterator BASE_EMBEDDED {
2902 public:
2903 inline explicit MemoryChunkIterator(Heap* heap);
2904
2905 // Return NULL when the iterator is done.
2906 inline MemoryChunk* next();
2907
2908 private:
2909 enum State {
2910 kOldSpaceState,
2911 kMapState,
2912 kCodeState,
2913 kLargeObjectState,
2914 kFinishedState
2915 };
2916 Heap* heap_;
2917 State state_;
2918 PageIterator old_iterator_;
2919 PageIterator code_iterator_;
2920 PageIterator map_iterator_;
2921 LargePageIterator lo_iterator_;
2922 };
2923
2924 } // namespace internal
2925 } // namespace v8
2926
2927 #endif // V8_HEAP_SPACES_H_
2928