• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_NEW_SPACES_H_
6 #define V8_HEAP_NEW_SPACES_H_
7 
8 #include <atomic>
9 #include <memory>
10 
11 #include "src/base/macros.h"
12 #include "src/base/platform/mutex.h"
13 #include "src/heap/heap.h"
14 #include "src/heap/spaces.h"
15 #include "src/logging/log.h"
16 #include "src/objects/heap-object.h"
17 
18 namespace v8 {
19 namespace internal {
20 
21 class Heap;
22 class MemoryChunk;
23 
24 enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
25 
26 // -----------------------------------------------------------------------------
27 // SemiSpace in young generation
28 //
29 // A SemiSpace is a contiguous chunk of memory holding page-like memory chunks.
30 // The mark-compact collector  uses the memory of the first page in the from
31 // space as a marking stack when tracing live objects.
32 class SemiSpace : public Space {
33  public:
34   using iterator = PageIterator;
35   using const_iterator = ConstPageIterator;
36 
37   static void Swap(SemiSpace* from, SemiSpace* to);
38 
SemiSpace(Heap * heap,SemiSpaceId semispace)39   SemiSpace(Heap* heap, SemiSpaceId semispace)
40       : Space(heap, NEW_SPACE, new NoFreeList()),
41         current_capacity_(0),
42         maximum_capacity_(0),
43         minimum_capacity_(0),
44         age_mark_(kNullAddress),
45         committed_(false),
46         id_(semispace),
47         current_page_(nullptr),
48         pages_used_(0) {}
49 
50   inline bool Contains(HeapObject o) const;
51   inline bool Contains(Object o) const;
52   inline bool ContainsSlow(Address a) const;
53 
54   void SetUp(size_t initial_capacity, size_t maximum_capacity);
55   void TearDown();
56 
57   bool Commit();
58   bool Uncommit();
is_committed()59   bool is_committed() { return committed_; }
60 
61   // Grow the semispace to the new capacity.  The new capacity requested must
62   // be larger than the current capacity and less than the maximum capacity.
63   bool GrowTo(size_t new_capacity);
64 
65   // Shrinks the semispace to the new capacity.  The new capacity requested
66   // must be more than the amount of used memory in the semispace and less
67   // than the current capacity.
68   bool ShrinkTo(size_t new_capacity);
69 
70   bool EnsureCurrentCapacity();
71 
space_end()72   Address space_end() { return memory_chunk_list_.back()->area_end(); }
73 
74   // Returns the start address of the first page of the space.
space_start()75   Address space_start() {
76     DCHECK_NE(memory_chunk_list_.front(), nullptr);
77     return memory_chunk_list_.front()->area_start();
78   }
79 
current_page()80   Page* current_page() { return current_page_; }
pages_used()81   int pages_used() { return pages_used_; }
82 
83   // Returns the start address of the current page of the space.
page_low()84   Address page_low() { return current_page_->area_start(); }
85 
86   // Returns one past the end address of the current page of the space.
page_high()87   Address page_high() { return current_page_->area_end(); }
88 
AdvancePage()89   bool AdvancePage() {
90     Page* next_page = current_page_->next_page();
91     // We cannot expand if we reached the maximum number of pages already. Note
92     // that we need to account for the next page already for this check as we
93     // could potentially fill the whole page after advancing.
94     const bool reached_max_pages = (pages_used_ + 1) == max_pages();
95     if (next_page == nullptr || reached_max_pages) {
96       return false;
97     }
98     current_page_ = next_page;
99     pages_used_++;
100     return true;
101   }
102 
103   // Resets the space to using the first page.
104   void Reset();
105 
106   void RemovePage(Page* page);
107   void PrependPage(Page* page);
108 
109   Page* InitializePage(MemoryChunk* chunk);
110 
111   // Age mark accessors.
age_mark()112   Address age_mark() { return age_mark_; }
113   void set_age_mark(Address mark);
114 
115   // Returns the current capacity of the semispace.
current_capacity()116   size_t current_capacity() { return current_capacity_; }
117 
118   // Returns the maximum capacity of the semispace.
maximum_capacity()119   size_t maximum_capacity() { return maximum_capacity_; }
120 
121   // Returns the initial capacity of the semispace.
minimum_capacity()122   size_t minimum_capacity() { return minimum_capacity_; }
123 
id()124   SemiSpaceId id() { return id_; }
125 
126   // Approximate amount of physical memory committed for this space.
127   size_t CommittedPhysicalMemory() override;
128 
129   // If we don't have these here then SemiSpace will be abstract.  However
130   // they should never be called:
131 
Size()132   size_t Size() override { UNREACHABLE(); }
133 
SizeOfObjects()134   size_t SizeOfObjects() override { return Size(); }
135 
Available()136   size_t Available() override { UNREACHABLE(); }
137 
first_page()138   Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
last_page()139   Page* last_page() { return reinterpret_cast<Page*>(Space::last_page()); }
140 
first_page()141   const Page* first_page() const {
142     return reinterpret_cast<const Page*>(Space::first_page());
143   }
last_page()144   const Page* last_page() const {
145     return reinterpret_cast<const Page*>(Space::last_page());
146   }
147 
begin()148   iterator begin() { return iterator(first_page()); }
end()149   iterator end() { return iterator(nullptr); }
150 
begin()151   const_iterator begin() const { return const_iterator(first_page()); }
end()152   const_iterator end() const { return const_iterator(nullptr); }
153 
154   std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
155 
156 #ifdef DEBUG
157   V8_EXPORT_PRIVATE void Print() override;
158   // Validate a range of of addresses in a SemiSpace.
159   // The "from" address must be on a page prior to the "to" address,
160   // in the linked page order, or it must be earlier on the same page.
161   static void AssertValidRange(Address from, Address to);
162 #else
163   // Do nothing.
AssertValidRange(Address from,Address to)164   inline static void AssertValidRange(Address from, Address to) {}
165 #endif
166 
167 #ifdef VERIFY_HEAP
168   virtual void Verify();
169 #endif
170 
171  private:
172   void RewindPages(int num_pages);
173 
max_pages()174   inline int max_pages() {
175     return static_cast<int>(current_capacity_ / Page::kPageSize);
176   }
177 
178   // Copies the flags into the masked positions on all pages in the space.
179   void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
180 
181   // The currently committed space capacity.
182   size_t current_capacity_;
183 
184   // The maximum capacity that can be used by this space. A space cannot grow
185   // beyond that size.
186   size_t maximum_capacity_;
187 
188   // The minimum capacity for the space. A space cannot shrink below this size.
189   size_t minimum_capacity_;
190 
191   // Used to govern object promotion during mark-compact collection.
192   Address age_mark_;
193 
194   bool committed_;
195   SemiSpaceId id_;
196 
197   Page* current_page_;
198 
199   int pages_used_;
200 
201   friend class NewSpace;
202   friend class SemiSpaceObjectIterator;
203 };
204 
205 // A SemiSpaceObjectIterator is an ObjectIterator that iterates over the active
206 // semispace of the heap's new space.  It iterates over the objects in the
207 // semispace from a given start address (defaulting to the bottom of the
208 // semispace) to the top of the semispace.  New objects allocated after the
209 // iterator is created are not iterated.
210 class SemiSpaceObjectIterator : public ObjectIterator {
211  public:
212   // Create an iterator over the allocated objects in the given to-space.
213   explicit SemiSpaceObjectIterator(NewSpace* space);
214 
215   inline HeapObject Next() override;
216 
217  private:
218   void Initialize(Address start, Address end);
219 
220   // The current iteration point.
221   Address current_;
222   // The end of iteration.
223   Address limit_;
224 };
225 
226 // -----------------------------------------------------------------------------
227 // The young generation space.
228 //
229 // The new space consists of a contiguous pair of semispaces.  It simply
230 // forwards most functions to the appropriate semispace.
231 
232 class V8_EXPORT_PRIVATE NewSpace
NON_EXPORTED_BASE(public SpaceWithLinearArea)233     : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
234  public:
235   using iterator = PageIterator;
236   using const_iterator = ConstPageIterator;
237 
238   NewSpace(Heap* heap, v8::PageAllocator* page_allocator,
239            size_t initial_semispace_capacity, size_t max_semispace_capacity);
240 
241   ~NewSpace() override { TearDown(); }
242 
243   inline bool ContainsSlow(Address a) const;
244   inline bool Contains(Object o) const;
245   inline bool Contains(HeapObject o) const;
246 
247   // Tears down the space.  Heap memory was not allocated by the space, so it
248   // is not deallocated here.
249   void TearDown();
250 
251   // Flip the pair of spaces.
252   void Flip();
253 
254   // Grow the capacity of the semispaces.  Assumes that they are not at
255   // their maximum capacity.
256   void Grow();
257 
258   // Shrink the capacity of the semispaces.
259   void Shrink();
260 
261   // Return the allocated bytes in the active semispace.
262   size_t Size() final {
263     DCHECK_GE(top(), to_space_.page_low());
264     return to_space_.pages_used() *
265                MemoryChunkLayout::AllocatableMemoryInDataPage() +
266            static_cast<size_t>(top() - to_space_.page_low());
267   }
268 
269   size_t SizeOfObjects() final { return Size(); }
270 
271   // Return the allocatable capacity of a semispace.
272   size_t Capacity() {
273     SLOW_DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
274     return (to_space_.current_capacity() / Page::kPageSize) *
275            MemoryChunkLayout::AllocatableMemoryInDataPage();
276   }
277 
278   // Return the current size of a semispace, allocatable and non-allocatable
279   // memory.
280   size_t TotalCapacity() {
281     DCHECK(to_space_.current_capacity() == from_space_.current_capacity());
282     return to_space_.current_capacity();
283   }
284 
285   // Committed memory for NewSpace is the committed memory of both semi-spaces
286   // combined.
287   size_t CommittedMemory() final {
288     return from_space_.CommittedMemory() + to_space_.CommittedMemory();
289   }
290 
291   size_t MaximumCommittedMemory() final {
292     return from_space_.MaximumCommittedMemory() +
293            to_space_.MaximumCommittedMemory();
294   }
295 
296   // Approximate amount of physical memory committed for this space.
297   size_t CommittedPhysicalMemory() final;
298 
299   // Return the available bytes without growing.
300   size_t Available() final {
301     DCHECK_GE(Capacity(), Size());
302     return Capacity() - Size();
303   }
304 
305   size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
306     if (type == ExternalBackingStoreType::kArrayBuffer)
307       return heap()->YoungArrayBufferBytes();
308     DCHECK_EQ(0, from_space_.ExternalBackingStoreBytes(type));
309     return to_space_.ExternalBackingStoreBytes(type);
310   }
311 
312   size_t ExternalBackingStoreBytes() {
313     size_t result = 0;
314     for (int i = 0; i < ExternalBackingStoreType::kNumTypes; i++) {
315       result +=
316           ExternalBackingStoreBytes(static_cast<ExternalBackingStoreType>(i));
317     }
318     return result;
319   }
320 
321   size_t AllocatedSinceLastGC() {
322     const Address age_mark = to_space_.age_mark();
323     DCHECK_NE(age_mark, kNullAddress);
324     DCHECK_NE(top(), kNullAddress);
325     Page* const age_mark_page = Page::FromAllocationAreaAddress(age_mark);
326     Page* const last_page = Page::FromAllocationAreaAddress(top());
327     Page* current_page = age_mark_page;
328     size_t allocated = 0;
329     if (current_page != last_page) {
330       DCHECK_EQ(current_page, age_mark_page);
331       DCHECK_GE(age_mark_page->area_end(), age_mark);
332       allocated += age_mark_page->area_end() - age_mark;
333       current_page = current_page->next_page();
334     } else {
335       DCHECK_GE(top(), age_mark);
336       return top() - age_mark;
337     }
338     while (current_page != last_page) {
339       DCHECK_NE(current_page, age_mark_page);
340       allocated += MemoryChunkLayout::AllocatableMemoryInDataPage();
341       current_page = current_page->next_page();
342     }
343     DCHECK_GE(top(), current_page->area_start());
344     allocated += top() - current_page->area_start();
345     DCHECK_LE(allocated, Size());
346     return allocated;
347   }
348 
349   void MovePageFromSpaceToSpace(Page* page) {
350     DCHECK(page->IsFromPage());
351     from_space_.RemovePage(page);
352     to_space_.PrependPage(page);
353   }
354 
355   bool Rebalance();
356 
357   // Return the maximum capacity of a semispace.
358   size_t MaximumCapacity() {
359     DCHECK(to_space_.maximum_capacity() == from_space_.maximum_capacity());
360     return to_space_.maximum_capacity();
361   }
362 
363   bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
364 
365   // Returns the initial capacity of a semispace.
366   size_t InitialTotalCapacity() {
367     DCHECK(to_space_.minimum_capacity() == from_space_.minimum_capacity());
368     return to_space_.minimum_capacity();
369   }
370 
371   void VerifyTop();
372 
373   Address original_top_acquire() {
374     return original_top_.load(std::memory_order_acquire);
375   }
376   Address original_limit_relaxed() {
377     return original_limit_.load(std::memory_order_relaxed);
378   }
379 
380   // Return the address of the first allocatable address in the active
381   // semispace. This may be the address where the first object resides.
382   Address first_allocatable_address() { return to_space_.space_start(); }
383 
384   // Get the age mark of the inactive semispace.
385   Address age_mark() { return from_space_.age_mark(); }
386   // Set the age mark in the active semispace.
387   void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
388 
389   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
390   AllocateRaw(int size_in_bytes, AllocationAlignment alignment,
391               AllocationOrigin origin = AllocationOrigin::kRuntime);
392 
393   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawSynchronized(
394       int size_in_bytes, AllocationAlignment alignment,
395       AllocationOrigin origin = AllocationOrigin::kRuntime);
396 
397   // Reset the allocation pointer to the beginning of the active semispace.
398   void ResetLinearAllocationArea();
399 
400   // When inline allocation stepping is active, either because of incremental
401   // marking, idle scavenge, or allocation statistics gathering, we 'interrupt'
402   // inline allocation every once in a while. This is done by setting
403   // allocation_info_.limit to be lower than the actual limit and and increasing
404   // it in steps to guarantee that the observers are notified periodically.
405   void UpdateInlineAllocationLimit(size_t size_in_bytes) override;
406 
407   inline bool ToSpaceContainsSlow(Address a) const;
408   inline bool ToSpaceContains(Object o) const;
409   inline bool FromSpaceContains(Object o) const;
410 
411   // Try to switch the active semispace to a new, empty, page.
412   // Returns false if this isn't possible or reasonable (i.e., there
413   // are no pages, or the current page is already empty), or true
414   // if successful.
415   bool AddFreshPage();
416   bool AddFreshPageSynchronized();
417 
418 #ifdef VERIFY_HEAP
419   // Verify the active semispace.
420   virtual void Verify(Isolate* isolate);
421 #endif
422 
423 #ifdef DEBUG
424   // Print the active semispace.
425   void Print() override { to_space_.Print(); }
426 #endif
427 
428   // Return whether the operation succeeded.
429   bool CommitFromSpaceIfNeeded() {
430     if (from_space_.is_committed()) return true;
431     return from_space_.Commit();
432   }
433 
434   bool UncommitFromSpace() {
435     if (!from_space_.is_committed()) return true;
436     return from_space_.Uncommit();
437   }
438 
439   bool IsFromSpaceCommitted() { return from_space_.is_committed(); }
440 
441   SemiSpace* active_space() { return &to_space_; }
442 
443   Page* first_page() { return to_space_.first_page(); }
444   Page* last_page() { return to_space_.last_page(); }
445 
446   iterator begin() { return to_space_.begin(); }
447   iterator end() { return to_space_.end(); }
448 
449   const_iterator begin() const { return to_space_.begin(); }
450   const_iterator end() const { return to_space_.end(); }
451 
452   std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
453 
454   SemiSpace& from_space() { return from_space_; }
455   SemiSpace& to_space() { return to_space_; }
456 
457   void MoveOriginalTopForward() {
458     DCHECK_GE(top(), original_top_);
459     DCHECK_LE(top(), original_limit_);
460     original_top_.store(top(), std::memory_order_release);
461   }
462 
463   void MaybeFreeUnusedLab(LinearAllocationArea info);
464 
465  private:
466   // Update linear allocation area to match the current to-space page.
467   void UpdateLinearAllocationArea();
468 
469   base::Mutex mutex_;
470 
471   // The top and the limit at the time of setting the linear allocation area.
472   // These values can be accessed by background tasks.
473   std::atomic<Address> original_top_;
474   std::atomic<Address> original_limit_;
475 
476   // The semispaces.
477   SemiSpace to_space_;
478   SemiSpace from_space_;
479   VirtualMemory reservation_;
480 
481   // Internal allocation methods.
482   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
483   AllocateFastAligned(int size_in_bytes, int* aligned_size_in_bytes,
484                       AllocationAlignment alignment, AllocationOrigin origin);
485 
486   V8_WARN_UNUSED_RESULT V8_INLINE AllocationResult
487   AllocateFastUnaligned(int size_in_bytes, AllocationOrigin origin);
488 
489   V8_WARN_UNUSED_RESULT AllocationResult
490   AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
491                   AllocationOrigin origin);
492 
493   V8_WARN_UNUSED_RESULT AllocationResult
494   AllocateRawAligned(int size_in_bytes, AllocationAlignment alignment,
495                      AllocationOrigin origin = AllocationOrigin::kRuntime);
496 
497   V8_WARN_UNUSED_RESULT AllocationResult AllocateRawUnaligned(
498       int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
499 
500   bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment);
501   bool SupportsAllocationObserver() override { return true; }
502 
503   friend class SemiSpaceObjectIterator;
504 };
505 
506 // For contiguous spaces, top should be in the space (or at the end) and limit
507 // should be the end of the space.
508 #define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
509   SLOW_DCHECK((space).page_low() <= (info).top() &&   \
510               (info).top() <= (space).page_high() &&  \
511               (info).limit() <= (space).page_high())
512 
513 }  // namespace internal
514 }  // namespace v8
515 
516 #endif  // V8_HEAP_NEW_SPACES_H_
517