• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SPACES_H_
6 #define V8_HEAP_SPACES_H_
7 
8 #include <atomic>
9 #include <memory>
10 #include <vector>
11 
12 #include "src/base/iterator.h"
13 #include "src/base/macros.h"
14 #include "src/common/globals.h"
15 #include "src/heap/allocation-observer.h"
16 #include "src/heap/base-space.h"
17 #include "src/heap/basic-memory-chunk.h"
18 #include "src/heap/free-list.h"
19 #include "src/heap/heap.h"
20 #include "src/heap/list.h"
21 #include "src/heap/memory-chunk.h"
22 #include "src/objects/objects.h"
23 #include "src/utils/allocation.h"
24 #include "src/utils/utils.h"
25 #include "testing/gtest/include/gtest/gtest_prod.h"  // nogncheck
26 
27 namespace v8 {
28 namespace internal {
29 
30 namespace heap {
31 class HeapTester;
32 class TestCodePageAllocatorScope;
33 }  // namespace heap
34 
35 class AllocationObserver;
36 class FreeList;
37 class Isolate;
38 class LargeObjectSpace;
39 class LargePage;
40 class LinearAllocationArea;
41 class Page;
42 class PagedSpace;
43 class SemiSpace;
44 
45 // -----------------------------------------------------------------------------
46 // Heap structures:
47 //
48 // A JS heap consists of a young generation, an old generation, and a large
49 // object space. The young generation is divided into two semispaces. A
50 // scavenger implements Cheney's copying algorithm. The old generation is
51 // separated into a map space and an old object space. The map space contains
52 // all (and only) map objects, the rest of old objects go into the old space.
53 // The old generation is collected by a mark-sweep-compact collector.
54 //
55 // The semispaces of the young generation are contiguous.  The old and map
56 // spaces consists of a list of pages. A page has a page header and an object
57 // area.
58 //
59 // There is a separate large object space for objects larger than
60 // kMaxRegularHeapObjectSize, so that they do not have to move during
61 // collection. The large object space is paged. Pages in large object space
62 // may be larger than the page size.
63 //
64 // A store-buffer based write barrier is used to keep track of intergenerational
65 // references.  See heap/store-buffer.h.
66 //
67 // During scavenges and mark-sweep collections we sometimes (after a store
68 // buffer overflow) iterate intergenerational pointers without decoding heap
69 // object maps so if the page belongs to old space or large object space
70 // it is essential to guarantee that the page does not contain any
71 // garbage pointers to new space: every pointer aligned word which satisfies
72 // the Heap::InNewSpace() predicate must be a pointer to a live heap object in
73 // new space. Thus objects in old space and large object spaces should have a
74 // special layout (e.g. no bare integer fields). This requirement does not
75 // apply to map space which is iterated in a special fashion. However we still
76 // require pointer fields of dead maps to be cleaned.
77 //
78 // To enable lazy cleaning of old space pages we can mark chunks of the page
79 // as being garbage.  Garbage sections are marked with a special map.  These
80 // sections are skipped when scanning the page, even if we are otherwise
81 // scanning without regard for object boundaries.  Garbage sections are chained
82 // together to form a free list after a GC.  Garbage sections created outside
83 // of GCs by object trunctation etc. may not be in the free list chain.  Very
84 // small free spaces are ignored, they need only be cleaned of bogus pointers
85 // into new space.
86 //
87 // Each page may have up to one special garbage section.  The start of this
88 // section is denoted by the top field in the space.  The end of the section
89 // is denoted by the limit field in the space.  This special garbage section
90 // is not marked with a free space map in the data.  The point of this section
91 // is to enable linear allocation without having to constantly update the byte
92 // array every time the top field is updated and a new object is created.  The
93 // special garbage section is not in the chain of garbage sections.
94 //
95 // Since the top and limit fields are in the space, not the page, only one page
96 // has a special garbage section, and if the top and limit are equal then there
97 // is no special garbage section.
98 
99 // Some assertion macros used in the debugging mode.
100 
101 #define DCHECK_OBJECT_SIZE(size) \
102   DCHECK((0 < size) && (size <= kMaxRegularHeapObjectSize))
103 
104 #define DCHECK_CODEOBJECT_SIZE(size, code_space)                          \
105   DCHECK((0 < size) &&                                                    \
106          (size <= std::min(MemoryChunkLayout::MaxRegularCodeObjectSize(), \
107                            code_space->AreaSize())))
108 
109 // ----------------------------------------------------------------------------
110 // Space is the abstract superclass for all allocation spaces that are not
111 // sealed after startup (i.e. not ReadOnlySpace).
112 class V8_EXPORT_PRIVATE Space : public BaseSpace {
113  public:
Space(Heap * heap,AllocationSpace id,FreeList * free_list)114   Space(Heap* heap, AllocationSpace id, FreeList* free_list)
115       : BaseSpace(heap, id),
116         free_list_(std::unique_ptr<FreeList>(free_list)) {
117     external_backing_store_bytes_ =
118         new std::atomic<size_t>[ExternalBackingStoreType::kNumTypes];
119     external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] = 0;
120     external_backing_store_bytes_[ExternalBackingStoreType::kExternalString] =
121         0;
122   }
123 
124   static inline void MoveExternalBackingStoreBytes(
125       ExternalBackingStoreType type, Space* from, Space* to, size_t amount);
126 
~Space()127   ~Space() override {
128     delete[] external_backing_store_bytes_;
129     external_backing_store_bytes_ = nullptr;
130   }
131 
132   virtual void AddAllocationObserver(AllocationObserver* observer);
133 
134   virtual void RemoveAllocationObserver(AllocationObserver* observer);
135 
136   virtual void PauseAllocationObservers();
137 
138   virtual void ResumeAllocationObservers();
139 
StartNextInlineAllocationStep()140   virtual void StartNextInlineAllocationStep() {}
141 
142   // Returns size of objects. Can differ from the allocated size
143   // (e.g. see OldLargeObjectSpace).
SizeOfObjects()144   virtual size_t SizeOfObjects() { return Size(); }
145 
146   // Return the available bytes without growing.
147   virtual size_t Available() = 0;
148 
RoundSizeDownToObjectAlignment(int size)149   virtual int RoundSizeDownToObjectAlignment(int size) {
150     if (id_ == CODE_SPACE) {
151       return RoundDown(size, kCodeAlignment);
152     } else {
153       return RoundDown(size, kTaggedSize);
154     }
155   }
156 
157   virtual std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) = 0;
158 
159   inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
160                                                  size_t amount);
161 
162   inline void DecrementExternalBackingStoreBytes(ExternalBackingStoreType type,
163                                                  size_t amount);
164 
165   // Returns amount of off-heap memory in-use by objects in this Space.
ExternalBackingStoreBytes(ExternalBackingStoreType type)166   virtual size_t ExternalBackingStoreBytes(
167       ExternalBackingStoreType type) const {
168     return external_backing_store_bytes_[type];
169   }
170 
first_page()171   MemoryChunk* first_page() { return memory_chunk_list_.front(); }
last_page()172   MemoryChunk* last_page() { return memory_chunk_list_.back(); }
173 
first_page()174   const MemoryChunk* first_page() const { return memory_chunk_list_.front(); }
last_page()175   const MemoryChunk* last_page() const { return memory_chunk_list_.back(); }
176 
memory_chunk_list()177   heap::List<MemoryChunk>& memory_chunk_list() { return memory_chunk_list_; }
178 
free_list()179   FreeList* free_list() { return free_list_.get(); }
180 
FirstPageAddress()181   Address FirstPageAddress() const { return first_page()->address(); }
182 
183 #ifdef DEBUG
184   virtual void Print() = 0;
185 #endif
186 
187  protected:
188   AllocationCounter allocation_counter_;
189 
190   // The List manages the pages that belong to the given space.
191   heap::List<MemoryChunk> memory_chunk_list_;
192 
193   // Tracks off-heap memory used by this space.
194   std::atomic<size_t>* external_backing_store_bytes_;
195 
196   std::unique_ptr<FreeList> free_list_;
197 
198   DISALLOW_COPY_AND_ASSIGN(Space);
199 };
200 
201 STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
202 
203 // -----------------------------------------------------------------------------
204 // A page is a memory chunk of a size 256K. Large object pages may be larger.
205 //
206 // The only way to get a page pointer is by calling factory methods:
207 //   Page* p = Page::FromAddress(addr); or
208 //   Page* p = Page::FromAllocationAreaAddress(address);
209 class Page : public MemoryChunk {
210  public:
211   static const intptr_t kCopyAllFlags = ~0;
212 
213   // Page flags copied from from-space to to-space when flipping semispaces.
214   static const intptr_t kCopyOnFlipFlagsMask =
215       static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
216       static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
217       static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
218 
219   // Returns the page containing a given address. The address ranges
220   // from [page_addr .. page_addr + kPageSize[. This only works if the object
221   // is in fact in a page.
FromAddress(Address addr)222   static Page* FromAddress(Address addr) {
223     return reinterpret_cast<Page*>(addr & ~kPageAlignmentMask);
224   }
FromHeapObject(HeapObject o)225   static Page* FromHeapObject(HeapObject o) {
226     return reinterpret_cast<Page*>(o.ptr() & ~kAlignmentMask);
227   }
228 
229   // Returns the page containing the address provided. The address can
230   // potentially point righter after the page. To be also safe for tagged values
231   // we subtract a hole word. The valid address ranges from
232   // [page_addr + area_start_ .. page_addr + kPageSize + kTaggedSize].
FromAllocationAreaAddress(Address address)233   static Page* FromAllocationAreaAddress(Address address) {
234     return Page::FromAddress(address - kTaggedSize);
235   }
236 
237   // Checks if address1 and address2 are on the same new space page.
OnSamePage(Address address1,Address address2)238   static bool OnSamePage(Address address1, Address address2) {
239     return Page::FromAddress(address1) == Page::FromAddress(address2);
240   }
241 
242   // Checks whether an address is page aligned.
IsAlignedToPageSize(Address addr)243   static bool IsAlignedToPageSize(Address addr) {
244     return (addr & kPageAlignmentMask) == 0;
245   }
246 
247   static Page* ConvertNewToOld(Page* old_page);
248 
249   inline void MarkNeverAllocateForTesting();
250   inline void MarkEvacuationCandidate();
251   inline void ClearEvacuationCandidate();
252 
next_page()253   Page* next_page() { return static_cast<Page*>(list_node_.next()); }
prev_page()254   Page* prev_page() { return static_cast<Page*>(list_node_.prev()); }
255 
next_page()256   const Page* next_page() const {
257     return static_cast<const Page*>(list_node_.next());
258   }
prev_page()259   const Page* prev_page() const {
260     return static_cast<const Page*>(list_node_.prev());
261   }
262 
263   template <typename Callback>
ForAllFreeListCategories(Callback callback)264   inline void ForAllFreeListCategories(Callback callback) {
265     for (int i = kFirstCategory;
266          i < owner()->free_list()->number_of_categories(); i++) {
267       callback(categories_[i]);
268     }
269   }
270 
271   size_t AvailableInFreeList();
272 
AvailableInFreeListFromAllocatedBytes()273   size_t AvailableInFreeListFromAllocatedBytes() {
274     DCHECK_GE(area_size(), wasted_memory() + allocated_bytes());
275     return area_size() - wasted_memory() - allocated_bytes();
276   }
277 
free_list_category(FreeListCategoryType type)278   FreeListCategory* free_list_category(FreeListCategoryType type) {
279     return categories_[type];
280   }
281 
282   size_t ShrinkToHighWaterMark();
283 
284   V8_EXPORT_PRIVATE void CreateBlackArea(Address start, Address end);
285   V8_EXPORT_PRIVATE void CreateBlackAreaBackground(Address start, Address end);
286   void DestroyBlackArea(Address start, Address end);
287   void DestroyBlackAreaBackground(Address start, Address end);
288 
289   void InitializeFreeListCategories();
290   void AllocateFreeListCategories();
291   void ReleaseFreeListCategories();
292 
293   void MoveOldToNewRememberedSetForSweeping();
294   void MergeOldToNewRememberedSets();
295 
296  private:
297   friend class MemoryAllocator;
298 };
299 
300 // Validate our estimates on the header size.
301 STATIC_ASSERT(sizeof(BasicMemoryChunk) <= BasicMemoryChunk::kHeaderSize);
302 STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
303 STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
304 
305 // -----------------------------------------------------------------------------
306 // Interface for heap object iterator to be implemented by all object space
307 // object iterators.
308 
309 class V8_EXPORT_PRIVATE ObjectIterator : public Malloced {
310  public:
311   virtual ~ObjectIterator() = default;
312   virtual HeapObject Next() = 0;
313 };
314 
315 template <class PAGE_TYPE>
316 class PageIteratorImpl
317     : public base::iterator<std::forward_iterator_tag, PAGE_TYPE> {
318  public:
PageIteratorImpl(PAGE_TYPE * p)319   explicit PageIteratorImpl(PAGE_TYPE* p) : p_(p) {}
PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE> & other)320   PageIteratorImpl(const PageIteratorImpl<PAGE_TYPE>& other) : p_(other.p_) {}
321   PAGE_TYPE* operator*() { return p_; }
322   bool operator==(const PageIteratorImpl<PAGE_TYPE>& rhs) {
323     return rhs.p_ == p_;
324   }
325   bool operator!=(const PageIteratorImpl<PAGE_TYPE>& rhs) {
326     return rhs.p_ != p_;
327   }
328   inline PageIteratorImpl<PAGE_TYPE>& operator++();
329   inline PageIteratorImpl<PAGE_TYPE> operator++(int);
330 
331  private:
332   PAGE_TYPE* p_;
333 };
334 
335 using PageIterator = PageIteratorImpl<Page>;
336 using ConstPageIterator = PageIteratorImpl<const Page>;
337 using LargePageIterator = PageIteratorImpl<LargePage>;
338 
339 class PageRange {
340  public:
341   using iterator = PageIterator;
PageRange(Page * begin,Page * end)342   PageRange(Page* begin, Page* end) : begin_(begin), end_(end) {}
PageRange(Page * page)343   explicit PageRange(Page* page) : PageRange(page, page->next_page()) {}
344   inline PageRange(Address start, Address limit);
345 
begin()346   iterator begin() { return iterator(begin_); }
end()347   iterator end() { return iterator(end_); }
348 
349  private:
350   Page* begin_;
351   Page* end_;
352 };
353 
354 // -----------------------------------------------------------------------------
355 // A space has a circular list of pages. The next page can be accessed via
356 // Page::next_page() call.
357 
358 // An abstraction of allocation and relocation pointers in a page-structured
359 // space.
360 class LinearAllocationArea {
361  public:
LinearAllocationArea()362   LinearAllocationArea()
363       : start_(kNullAddress), top_(kNullAddress), limit_(kNullAddress) {}
LinearAllocationArea(Address top,Address limit)364   LinearAllocationArea(Address top, Address limit)
365       : start_(top), top_(top), limit_(limit) {}
366 
Reset(Address top,Address limit)367   void Reset(Address top, Address limit) {
368     start_ = top;
369     set_top(top);
370     set_limit(limit);
371   }
372 
MoveStartToTop()373   void MoveStartToTop() { start_ = top_; }
374 
start()375   V8_INLINE Address start() const { return start_; }
376 
set_top(Address top)377   V8_INLINE void set_top(Address top) {
378     SLOW_DCHECK(top == kNullAddress || (top & kHeapObjectTagMask) == 0);
379     top_ = top;
380   }
381 
top()382   V8_INLINE Address top() const {
383     SLOW_DCHECK(top_ == kNullAddress || (top_ & kHeapObjectTagMask) == 0);
384     return top_;
385   }
386 
top_address()387   Address* top_address() { return &top_; }
388 
set_limit(Address limit)389   V8_INLINE void set_limit(Address limit) { limit_ = limit; }
390 
limit()391   V8_INLINE Address limit() const { return limit_; }
392 
limit_address()393   Address* limit_address() { return &limit_; }
394 
395 #ifdef DEBUG
VerifyPagedAllocation()396   bool VerifyPagedAllocation() {
397     return (Page::FromAllocationAreaAddress(top_) ==
398             Page::FromAllocationAreaAddress(limit_)) &&
399            (top_ <= limit_);
400   }
401 #endif
402 
403  private:
404   // Current allocation top.
405   Address start_;
406   // Current allocation top.
407   Address top_;
408   // Current allocation limit.
409   Address limit_;
410 };
411 
412 
413 // LocalAllocationBuffer represents a linear allocation area that is created
414 // from a given {AllocationResult} and can be used to allocate memory without
415 // synchronization.
416 //
417 // The buffer is properly closed upon destruction and reassignment.
418 // Example:
419 //   {
420 //     AllocationResult result = ...;
421 //     LocalAllocationBuffer a(heap, result, size);
422 //     LocalAllocationBuffer b = a;
423 //     CHECK(!a.IsValid());
424 //     CHECK(b.IsValid());
425 //     // {a} is invalid now and cannot be used for further allocations.
426 //   }
427 //   // Since {b} went out of scope, the LAB is closed, resulting in creating a
428 //   // filler object for the remaining area.
429 class LocalAllocationBuffer {
430  public:
431   // Indicates that a buffer cannot be used for allocations anymore. Can result
432   // from either reassigning a buffer, or trying to construct it from an
433   // invalid {AllocationResult}.
InvalidBuffer()434   static LocalAllocationBuffer InvalidBuffer() {
435     return LocalAllocationBuffer(
436         nullptr, LinearAllocationArea(kNullAddress, kNullAddress));
437   }
438 
439   // Creates a new LAB from a given {AllocationResult}. Results in
440   // InvalidBuffer if the result indicates a retry.
441   static inline LocalAllocationBuffer FromResult(Heap* heap,
442                                                  AllocationResult result,
443                                                  intptr_t size);
444 
~LocalAllocationBuffer()445   ~LocalAllocationBuffer() { CloseAndMakeIterable(); }
446 
447   LocalAllocationBuffer(const LocalAllocationBuffer& other) = delete;
448   V8_EXPORT_PRIVATE LocalAllocationBuffer(LocalAllocationBuffer&& other)
449       V8_NOEXCEPT;
450 
451   LocalAllocationBuffer& operator=(const LocalAllocationBuffer& other) = delete;
452   V8_EXPORT_PRIVATE LocalAllocationBuffer& operator=(
453       LocalAllocationBuffer&& other) V8_NOEXCEPT;
454 
455   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
456       int size_in_bytes, AllocationAlignment alignment);
457 
IsValid()458   inline bool IsValid() { return allocation_info_.top() != kNullAddress; }
459 
460   // Try to merge LABs, which is only possible when they are adjacent in memory.
461   // Returns true if the merge was successful, false otherwise.
462   inline bool TryMerge(LocalAllocationBuffer* other);
463 
464   inline bool TryFreeLast(HeapObject object, int object_size);
465 
466   // Close a LAB, effectively invalidating it. Returns the unused area.
467   V8_EXPORT_PRIVATE LinearAllocationArea CloseAndMakeIterable();
468   void MakeIterable();
469 
top()470   Address top() const { return allocation_info_.top(); }
limit()471   Address limit() const { return allocation_info_.limit(); }
472 
473  private:
474   V8_EXPORT_PRIVATE LocalAllocationBuffer(
475       Heap* heap, LinearAllocationArea allocation_info) V8_NOEXCEPT;
476 
477   Heap* heap_;
478   LinearAllocationArea allocation_info_;
479 };
480 
481 class SpaceWithLinearArea : public Space {
482  public:
SpaceWithLinearArea(Heap * heap,AllocationSpace id,FreeList * free_list)483   SpaceWithLinearArea(Heap* heap, AllocationSpace id, FreeList* free_list)
484       : Space(heap, id, free_list) {
485     allocation_info_.Reset(kNullAddress, kNullAddress);
486   }
487 
488   virtual bool SupportsAllocationObserver() = 0;
489 
490   // Returns the allocation pointer in this space.
top()491   Address top() { return allocation_info_.top(); }
limit()492   Address limit() { return allocation_info_.limit(); }
493 
494   // The allocation top address.
allocation_top_address()495   Address* allocation_top_address() { return allocation_info_.top_address(); }
496 
497   // The allocation limit address.
allocation_limit_address()498   Address* allocation_limit_address() {
499     return allocation_info_.limit_address();
500   }
501 
502   // Methods needed for allocation observers.
503   V8_EXPORT_PRIVATE void AddAllocationObserver(
504       AllocationObserver* observer) override;
505   V8_EXPORT_PRIVATE void RemoveAllocationObserver(
506       AllocationObserver* observer) override;
507   V8_EXPORT_PRIVATE void ResumeAllocationObservers() override;
508   V8_EXPORT_PRIVATE void PauseAllocationObservers() override;
509 
510   V8_EXPORT_PRIVATE void AdvanceAllocationObservers();
511   V8_EXPORT_PRIVATE void InvokeAllocationObservers(Address soon_object,
512                                                    size_t size_in_bytes,
513                                                    size_t aligned_size_in_bytes,
514                                                    size_t allocation_size);
515 
516   void MarkLabStartInitialized();
517 
518   // When allocation observers are active we may use a lower limit to allow the
519   // observers to 'interrupt' earlier than the natural limit. Given a linear
520   // area bounded by [start, end), this function computes the limit to use to
521   // allow proper observation based on existing observers. min_size specifies
522   // the minimum size that the limited area should have.
523   Address ComputeLimit(Address start, Address end, size_t min_size);
524   V8_EXPORT_PRIVATE virtual void UpdateInlineAllocationLimit(
525       size_t min_size) = 0;
526 
527   V8_EXPORT_PRIVATE void UpdateAllocationOrigins(AllocationOrigin origin);
528 
529   void PrintAllocationsOrigins();
530 
531  protected:
532   // TODO(ofrobots): make these private after refactoring is complete.
533   LinearAllocationArea allocation_info_;
534 
535   size_t allocations_origins_[static_cast<int>(
536       AllocationOrigin::kNumberOfAllocationOrigins)] = {0};
537 };
538 
539 }  // namespace internal
540 }  // namespace v8
541 
542 #endif  // V8_HEAP_SPACES_H_
543