• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7 
8 #include <atomic>
9 #include <memory>
10 
11 #include "src/base/iterator.h"
12 #include "src/base/macros.h"
13 #include "src/common/globals.h"
14 #include "src/heap/allocation-observer.h"
15 #include "src/heap/base-space.h"
16 #include "src/heap/base/active-system-pages.h"
17 #include "src/heap/basic-memory-chunk.h"
18 #include "src/heap/free-list.h"
19 #include "src/heap/heap.h"
20 #include "src/heap/linear-allocation-area.h"
21 #include "src/heap/list.h"
22 #include "src/heap/memory-chunk-layout.h"
23 #include "src/heap/memory-chunk.h"
24 #include "src/heap/slot-set.h"
25 #include "src/objects/objects.h"
26 #include "src/utils/allocation.h"
27 #include "src/utils/utils.h"
28 #include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck
29 
30 namespace v8 {
31 namespace internal {
32 
33 namespace heap {
34 class HeapTester;
35 class TestCodePageAllocatorScope;
36 }  // namespace heap
37 
38 class AllocationObserver;
39 class FreeList;
40 class Isolate;
41 class LargeObjectSpace;
42 class LargePage;
43 class Page;
44 class PagedSpace;
45 class SemiSpace;
46 
47 // -----------------------------------------------------------------------------
48 // Heap structures:
49 //
50 // A JS heap consists of a young generation, an old generation, and a large
51 // object space. The young generation is divided into two semispaces. A
52 // scavenger implements Cheney's copying algorithm. The old generation is
53 // separated into a map space and an old object space. The map space contains
54 // all (and only) map objects, the rest of old objects go into the old space.
55 // The old generation is collected by a mark-sweep-compact collector.
56 //
57 // The semispaces of the young generation are contiguous.  The old and map
58 // spaces consists of a list of pages. A page has a page header and an object
59 // area.
60 //
61 // There is a separate large object space for objects larger than
62 // kMaxRegularHeapObjectSize, so that they do not have to move during
63 // collection. The large object space is paged. Pages in large object space
64 // may be larger than the page size.
65 //
66 // A remembered set is used to keep track of intergenerational references.
67 //
68 // During scavenges and mark-sweep collections we sometimes (after a store
69 // buffer overflow) iterate intergenerational pointers without decoding heap
70 // object maps so if the page belongs to old space or large object space
71 // it is essential to guarantee that the page does not contain any
72 // garbage pointers to new space: every pointer aligned word which satisfies
73 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
74 // new space. Thus objects in old space and large object spaces should have a
75 // special layout (e.g. no bare integer fields). This requirement does not
76 // apply to map space which is iterated in a special fashion. However we still
77 // require pointer fields of dead maps to be cleaned.
78 //
79 // To enable lazy cleaning of old space pages we can mark chunks of the page
80 // as being garbage.  Garbage sections are marked with a special map.  These
81 // sections are skipped when scanning the page, even if we are otherwise
82 // scanning without regard for object boundaries.  Garbage sections are chained
83 // together to form a free list after a GC.  Garbage sections created outside
84 // of GCs by object trunctation etc. may not be in the free list chain.  Very
85 // small free spaces are ignored, they need only be cleaned of bogus pointers
86 // into new space.
87 //
88 // Each page may have up to one special garbage section.  The start of this
89 // section is denoted by the top field in the space.  The end of the section
90 // is denoted by the limit field in the space.  This special garbage section
91 // is not marked with a free space map in the data.  The point of this section
92 // is to enable linear allocation without having to constantly update the byte
93 // array every time the top field is updated and a new object is created.  The
94 // special garbage section is not in the chain of garbage sections.
95 //
96 // Since the top and limit fields are in the space, not the page, only one page
97 // has a special garbage section, and if the top and limit are equal then there
98 // is no special garbage section.
99 
100 // Some assertion macros used in the debugging mode.
101 
102 #define DCHECK_OBJECT_SIZE(size) \
103   DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
104 
105 #define DCHECK_CODEOBJECT_SIZE(size, code_space)                          \
106   DCHECK((0 < size) &&                                                    \
107          (size <= std::min(MemoryChunkLayout::MaxRegularCodeObjectSize(), \
108                            code_space->AreaSize())))
109 
110 // ----------------------------------------------------------------------------
111 // Space is the abstract superclass for all allocation spaces that are not
112 // sealed after startup (i.e. not ReadOnlySpace).
113 class V8_EXPORT_PRIVATE Space : public BaseSpace {
114  public:
Space(Heap * heap,AllocationSpace id,FreeList * free_list)115   Space(Heap* heap, AllocationSpace id, FreeList* free_list)
116       : BaseSpace(heap, id),
117         free_list_(std::unique_ptr<FreeList>(free_list)) {
118     external_backing_store_bytes_ =
119         new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
120     external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
121     external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
122         0;
123   }
124 
125   Space(const Space&) = delete;
126   Space& operator=(const Space&) = delete;
127 
128   static inline void MoveExternalBackingStoreBytes(
129       ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
130 
~Space()131   ~Space() override {
132     delete[] external_backing_store_bytes_;
133     external_backing_store_bytes_ = nullptr;
134   }
135 
136   virtual void AddAllocationObserver(AllocationObserver* observer);
137 
138   virtual void RemoveAllocationObserver(AllocationObserver* observer);
139 
140   virtual void PauseAllocationObservers();
141 
142   virtual void ResumeAllocationObservers();
143 
StartNextInlineAllocationStep()144   virtual void StartNextInlineAllocationStep() {}
145 
146   // Returns size of objects. Can differ from the allocated size
147   // (e.g. see OldLargeObjectSpace).
SizeOfObjects()148   virtual size_t SizeOfObjects() const { return Size(); }
149 
150   // Return the available bytes without growing.
151   virtual size_t Available() const = 0;
152 
RoundSizeDownToObjectAlignment(int size)153   virtual int RoundSizeDownToObjectAlignment(int size) const {
154     if (id_ == CODE_SPACE) {
155       return RoundDown(size, kCodeAlignment);
156     } else {
157       return RoundDown(size, kTaggedSize);
158     }
159   }
160 
161   virtual std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) = 0;
162 
163   inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
164                                                  size_t amount);
165 
166   inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
167                                                  size_t amount);
168 
169   // Returns amount of off-heap memory in-use by objects in this Space.
ExternalBackingStoreBytes(ExternalBackingStoreType type)170   virtual size_t ExternalBackingStoreBytes(
171       ExternalBackingStoreType type) const {
172     return external_backing_store_bytes_[type];
173   }
174 
first_page()175   virtual MemoryChunk* first_page() { return memory_chunk_list_.front(); }
last_page()176   virtual MemoryChunk* last_page() { return memory_chunk_list_.back(); }
177 
first_page()178   virtual const MemoryChunk* first_page() const {
179     return memory_chunk_list_.front();
180   }
last_page()181   virtual const MemoryChunk* last_page() const {
182     return memory_chunk_list_.back();
183   }
184 
memory_chunk_list()185   heap::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
186 
InitializePage(MemoryChunk * chunk)187   virtual Page* InitializePage(MemoryChunk* chunk) {
188     UNREACHABLE();
189     return nullptr;
190   }
191 
free_list()192   FreeList* free_list() { return free_list_.get(); }
193 
FirstPageAddress()194   Address FirstPageAddress() const { return first_page()->address(); }
195 
196 #ifdef DEBUG
197   virtual void Print() = 0;
198 #endif
199 
200  protected:
201   AllocationCounter allocation_counter_;
202 
203   // The List manages the pages that belong to the given space.
204   heap::List<MemoryChunk> memory_chunk_list_;
205 
206   // Tracks off-heap memory used by this space.
207   std::atomic<size_t>* external_backing_store_bytes_;
208 
209   std::unique_ptr<FreeList> free_list_;
210 };
211 
212 STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
213 
214 // -----------------------------------------------------------------------------
215 // A page is a memory chunk of a size 256K. Large object pages may be larger.
216 //
217 // The only way to get a page pointer is by calling factory methods:
218 //   Page* p = Page::FromAddress(addr); or
219 //   Page* p = Page::FromAllocationAreaAddress(address);
220 class Page : public MemoryChunk {
221  public:
222   // Page flags copied from from-space to to-space when flipping semispaces.
223   static constexpr MainThreadFlags kCopyOnFlipFlagsMask =
224       MainThreadFlags(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
225       MainThreadFlags(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
226       MainThreadFlags(MemoryChunk::INCREMENTAL_MARKING);
227 
228   Page(Heap* heap, BaseSpace* space, size_t size, Address area_start,
229        Address area_end, VirtualMemory reservation, Executability executable);
230 
231   // Returns the page containing a given address. The address ranges
232   // from [page_addr .. page_addr + kPageSize[. This only works if the object
233   // is in fact in a page.
FromAddress(Address addr)234   static Page* FromAddress(Address addr) {
235     DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
236     return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
237   }
FromHeapObject(HeapObject o)238   static Page* FromHeapObject(HeapObject o) {
239     DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
240     return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
241   }
242 
cast(MemoryChunk * chunk)243   static Page* cast(MemoryChunk* chunk) {
244     DCHECK(!chunk->IsLargePage());
245     return static_cast<Page*>(chunk);
246   }
247 
248   // Returns the page containing the address provided. The address can
249   // potentially point righter after the page. To be also safe for tagged values
250   // we subtract a hole word. The valid address ranges from
251   // [page_addr + area_start_ .. page_addr + kPageSize + kTaggedSize].
FromAllocationAreaAddress(Address address)252   static Page* FromAllocationAreaAddress(Address address) {
253     DCHECK(!V8_ENABLE_THIRD_PARTY_HEAP_BOOL);
254     return Page::FromAddress(address - kTaggedSize);
255   }
256 
257   // Checks if address1 and address2 are on the same new space page.
OnSamePage(Address address1,Address address2)258   static bool OnSamePage(Address address1, Address address2) {
259     return Page::FromAddress(address1) == Page::FromAddress(address2);
260   }
261 
262   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address addr)263   static bool IsAlignedToPageSize(Address addr) {
264     return (addr & kPageAlignmentMask) == 0;
265   }
266 
267   static Page* ConvertNewToOld(Page* old_page);
268 
269   inline void MarkNeverAllocateForTesting();
270   inline void MarkEvacuationCandidate();
271   inline void ClearEvacuationCandidate();
272 
next_page()273   Page* next_page() { return static_cast<Page*>(list_node_.next()); }
prev_page()274   Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
275 
next_page()276   const Page* next_page() const {
277     return static_cast<const Page*>(list_node_.next());
278   }
prev_page()279   const Page* prev_page() const {
280     return static_cast<const Page*>(list_node_.prev());
281   }
282 
283   template <typename Callback>
ForAllFreeListCategories(Callback callback)284   inline void ForAllFreeListCategories(Callback callback) {
285     for (int i = kFirstCategory;
286          i < owner()->free_list()->number_of_categories(); i++) {
287       callback(categories_[i]);
288     }
289   }
290 
291   size_t AvailableInFreeList();
292 
AvailableInFreeListFromAllocatedBytes()293   size_t AvailableInFreeListFromAllocatedBytes() {
294     DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
295     return area_size() - wasted_memory() - allocated_bytes();
296   }
297 
free_list_category(FreeListCategoryType type)298   FreeListCategory* free_list_category(FreeListCategoryType type) {
299     return categories_[type];
300   }
301 
302   size_t ShrinkToHighWaterMark();
303 
304   V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
305   V8_EXPORT_PRIVATE void CreateBlackAreaBackground(Address start, Address end);
306   void DestroyBlackArea(Address start, Address end);
307   void DestroyBlackAreaBackground(Address start, Address end);
308 
309   void InitializeFreeListCategories();
310   void AllocateFreeListCategories();
311   void ReleaseFreeListCategories();
312 
active_system_pages()313   ActiveSystemPages* active_system_pages() { return &active_system_pages_; }
314 
315   template <RememberedSetType remembered_set>
ClearInvalidTypedSlots(const TypedSlotSet::FreeRangesMap & ranges)316   void ClearInvalidTypedSlots(const TypedSlotSet::FreeRangesMap& ranges) {
317     TypedSlotSet* typed_slot_set = this->typed_slot_set<remembered_set>();
318     if (typed_slot_set != nullptr) {
319       typed_slot_set->ClearInvalidSlots(ranges);
320     }
321   }
322 
323   template <RememberedSetType remembered_set>
AssertNoInvalidTypedSlots(const TypedSlotSet::FreeRangesMap & ranges)324   void AssertNoInvalidTypedSlots(const TypedSlotSet::FreeRangesMap& ranges) {
325     // TODO(dinfuehr): Make this a DCHECK eventually.
326     TypedSlotSet* typed_slot_set = this->typed_slot_set<OLD_TO_OLD>();
327     if (typed_slot_set != nullptr) {
328       typed_slot_set->AssertNoInvalidSlots(ranges);
329     }
330   }
331 
332  private:
333   friend class MemoryAllocator;
334 };
335 
336 // Validate our estimates on the header size.
337 STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
338 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
339 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
340 
341 // -----------------------------------------------------------------------------
342 // Interface for heap object iterator to be implemented by all object space
343 // object iterators.
344 
345 class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
346  public:
347   virtual ~ObjectIterator() = default;
348   virtual HeapObject Next() = 0;
349 };
350 
351 template <class PAGE_TYPE>
352 class PageIteratorImpl
353     : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
354  public:
PageIteratorImpl(PAGE_TYPE * p)355   explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE> & other)356   PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
357   PAGE_TYPE* operator*() { return p_; }
358   bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
359     return rhs.p_ == p_;
360   }
361   bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
362     return rhs.p_ != p_;
363   }
364   inline PageIteratorImpl<PAGE_TYPE>& operator++();
365   inline PageIteratorImpl<PAGE_TYPE> operator++(int);
366 
367  private:
368   PAGE_TYPE* p_;
369 };
370 
371 using PageIterator = PageIteratorImpl<Page>;
372 using ConstPageIterator = PageIteratorImpl<const Page>;
373 using LargePageIterator = PageIteratorImpl<LargePage>;
374 using ConstLargePageIterator = PageIteratorImpl<const LargePage>;
375 
376 class PageRange {
377  public:
378   using iterator = PageIterator;
PageRange(Page * begin,Page * end)379   PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
PageRange(Page * page)380   explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
381   inline PageRange(Address start, Address limit);
382 
begin()383   iterator begin() { return iterator(begin_); }
end()384   iterator end() { return iterator(end_); }
385 
386  private:
387   Page* begin_;
388   Page* end_;
389 };
390 
391 // -----------------------------------------------------------------------------
392 // A space has a circular list of pages. The next page can be accessed via
393 // Page::next_page() call.
394 
395 // LocalAllocationBuffer represents a linear allocation area that is created
396 // from a given {AllocationResult} and can be used to allocate memory without
397 // synchronization.
398 //
399 // The buffer is properly closed upon destruction and reassignment.
400 // Example:
401 //   {
402 //     AllocationResult result = ...;
403 //     LocalAllocationBuffer a(heap, result, size);
404 //     LocalAllocationBuffer b = a;
405 //     CHECK(!a.IsValid());
406 //     CHECK(b.IsValid());
407 //     // {a} is invalid now and cannot be used for further allocations.
408 //   }
409 //   // Since {b} went out of scope, the LAB is closed, resulting in creating a
410 //   // filler object for the remaining area.
411 class LocalAllocationBuffer {
412  public:
413   // Indicates that a buffer cannot be used for allocations anymore. Can result
414   // from either reassigning a buffer, or trying to construct it from an
415   // invalid {AllocationResult}.
InvalidBuffer()416   static LocalAllocationBuffer InvalidBuffer() {
417     return LocalAllocationBuffer(
418         nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
419   }
420 
421   // Creates a new LAB from a given {AllocationResult}. Results in
422   // InvalidBuffer if the result indicates a retry.
423   static inline LocalAllocationBuffer FromResult(Heap* heap,
424                                                  AllocationResult result,
425                                                  intptr_t size);
426 
~LocalAllocationBuffer()427   ~LocalAllocationBuffer() { CloseAndMakeIterable(); }
428 
429   LocalAllocationBuffer(const LocalAllocationBuffer& other) = delete;
430   V8_EXPORT_PRIVATE LocalAllocationBuffer(LocalAllocationBuffer&& other)
431       V8_NOEXCEPT;
432 
433   LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other) = delete;
434   V8_EXPORT_PRIVATE LocalAllocationBuffer& operator=(
435       LocalAllocationBuffer&& other) V8_NOEXCEPT;
436 
437   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
438       int size_in_bytes, AllocationAlignment alignment);
439 
IsValid()440   inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
441 
442   // Try to merge LABs, which is only possible when they are adjacent in memory.
443   // Returns true if the merge was successful, false otherwise.
444   inline bool TryMerge(LocalAllocationBuffer* other);
445 
446   inline bool TryFreeLast(HeapObject object, int object_size);
447 
448   // Close a LAB, effectively invalidating it. Returns the unused area.
449   V8_EXPORT_PRIVATE LinearAllocationArea CloseAndMakeIterable();
450   void MakeIterable();
451 
top()452   Address top() const { return allocation_info_.top(); }
limit()453   Address limit() const { return allocation_info_.limit(); }
454 
455  private:
456   V8_EXPORT_PRIVATE LocalAllocationBuffer(
457       Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT;
458 
459   Heap* heap_;
460   LinearAllocationArea allocation_info_;
461 };
462 
463 class SpaceWithLinearArea : public Space {
464  public:
SpaceWithLinearArea(Heap * heap,AllocationSpace id,FreeList * free_list,LinearAllocationArea * allocation_info)465   SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list,
466                       LinearAllocationArea* allocation_info)
467       : Space(heap, id, free_list), allocation_info_(allocation_info) {}
468 
469   virtual bool SupportsAllocationObserver() const = 0;
470 
471   // Returns the allocation pointer in this space.
top()472   Address top() const { return allocation_info_->top(); }
limit()473   Address limit() const { return allocation_info_->limit(); }
474 
475   // The allocation top address.
allocation_top_address()476   Address* allocation_top_address() const {
477     return allocation_info_->top_address();
478   }
479 
480   // The allocation limit address.
allocation_limit_address()481   Address* allocation_limit_address() const {
482     return allocation_info_->limit_address();
483   }
484 
485   // Methods needed for allocation observers.
486   V8_EXPORT_PRIVATE void AddAllocationObserver(
487       AllocationObserver* observer) override;
488   V8_EXPORT_PRIVATE void RemoveAllocationObserver(
489       AllocationObserver* observer) override;
490   V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
491   V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
492 
493   V8_EXPORT_PRIVATE void AdvanceAllocationObservers();
494   V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
495                                                    size_t size_in_bytes,
496                                                    size_t aligned_size_in_bytes,
497                                                    size_t allocation_size);
498 
499   void MarkLabStartInitialized();
500   virtual void FreeLinearAllocationArea() = 0;
501 
502   // When allocation observers are active we may use a lower limit to allow the
503   // observers to 'interrupt' earlier than the natural limit. Given a linear
504   // area bounded by [start, end), this function computes the limit to use to
505   // allow proper observation based on existing observers. min_size specifies
506   // the minimum size that the limited area should have.
507   Address ComputeLimit(Address start, Address end, size_t min_size) const;
508   V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
509       size_t min_size) = 0;
510 
511   void DisableInlineAllocation();
512   void EnableInlineAllocation();
IsInlineAllocationEnabled()513   bool IsInlineAllocationEnabled() const { return use_lab_; }
514 
515   void PrintAllocationsOrigins() const;
516 
517   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
518   AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
519               AllocationOrigin origin = AllocationOrigin::kRuntime);
520 
521   // Allocate the requested number of bytes in the space if possible, return a
522   // failure object if not.
523   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult AllocateRawUnaligned(
524       int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
525 
526   // Allocate the requested number of bytes in the space double aligned if
527   // possible, return a failure object if not.
528   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
529   AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
530                      AllocationOrigin origin = AllocationOrigin::kRuntime);
531 
532  protected:
533   V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
534 
535   // Allocates an object from the linear allocation area. Assumes that the
536   // linear allocation area is large enought to fit the object.
537   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
538   AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
539   // Tries to allocate an aligned object from the linear allocation area.
540   // Returns nullptr if the linear allocation area does not fit the object.
541   // Otherwise, returns the object pointer and writes the allocation size
542   // (object size + alignment filler size) to the size_in_bytes.
543   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
544   AllocateFastAligned(int size_in_bytes, int* aligned_size_in_bytes,
545                       AllocationAlignment alignment, AllocationOrigin origin);
546 
547   // Slow path of allocation function
548   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
549   AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
550                   AllocationOrigin origin);
551 
552   // Sets up a linear allocation area that fits the given number of bytes.
553   // Returns false if there is not enough space and the caller has to retry
554   // after collecting garbage.
555   // Writes to `max_aligned_size` the actual number of bytes used for checking
556   // that there is enough space.
557   virtual bool EnsureAllocation(int size_in_bytes,
558                                 AllocationAlignment alignment,
559                                 AllocationOrigin origin,
560                                 int* out_max_aligned_size) = 0;
561 
562 #if DEBUG
563   V8_EXPORT_PRIVATE void VerifyTop() const;
564 #endif  // DEBUG
565 
566   LinearAllocationArea* const allocation_info_;
567   bool use_lab_ = true;
568 
569   size_t allocations_origins_[static_cast<int>(
570       AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
571 };
572 
573 // Iterates over all memory chunks in the heap (across all spaces).
574 class MemoryChunkIterator {
575  public:
MemoryChunkIterator(Heap * heap)576   explicit MemoryChunkIterator(Heap* heap) : space_iterator_(heap) {}
577 
578   V8_INLINE bool HasNext();
579   V8_INLINE MemoryChunk* Next();
580 
581  private:
582   SpaceIterator space_iterator_;
583   MemoryChunk* current_chunk_ = nullptr;
584 };
585 
586 }  // namespace internal
587 }  // namespace v8
588 
589 #endif  // V8_HEAP_SPACES_H_
590