• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_PAGED_SPACES_H_
6 #define V8_HEAP_PAGED_SPACES_H_
7 
8 #include <atomic>
9 #include <memory>
10 #include <utility>
11 
12 #include "src/base/bounds.h"
13 #include "src/base/macros.h"
14 #include "src/base/optional.h"
15 #include "src/base/platform/mutex.h"
16 #include "src/common/globals.h"
17 #include "src/flags/flags.h"
18 #include "src/heap/allocation-stats.h"
19 #include "src/heap/memory-chunk-layout.h"
20 #include "src/heap/memory-chunk.h"
21 #include "src/heap/spaces.h"
22 
23 namespace v8 {
24 namespace internal {
25 
26 class CompactionSpace;
27 class Heap;
28 class HeapObject;
29 class Isolate;
30 class ObjectVisitor;
31 
32 // -----------------------------------------------------------------------------
33 // Heap object iterator in old/map spaces.
34 //
35 // A PagedSpaceObjectIterator iterates objects from the bottom of the given
36 // space to its top or from the bottom of the given page to its top.
37 //
38 // If objects are allocated in the page during iteration the iterator may
39 // or may not iterate over those objects.  The caller must create a new
40 // iterator in order to be sure to visit these new objects.
41 class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
42  public:
43   // Creates a new object iterator in a given space.
44   PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
45   PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);
46 
47   // Advance to the next object, skipping free spaces and other fillers and
48   // skipping the special garbage section of which there is one per space.
49   // Returns nullptr when the iteration has ended.
50   inline HeapObject Next() override;
51 
52   // The pointer compression cage base value used for decompression of all
53   // tagged values except references to Code objects.
cage_base()54   PtrComprCageBase cage_base() const {
55 #if V8_COMPRESS_POINTERS
56     return cage_base_;
57 #else
58     return PtrComprCageBase{};
59 #endif  // V8_COMPRESS_POINTERS
60   }
61 
62  private:
63   // Fast (inlined) path of next().
64   inline HeapObject FromCurrentPage();
65 
66   // Slow path of next(), goes into the next page.  Returns false if the
67   // iteration has ended.
68   bool AdvanceToNextPage();
69 
70   Address cur_addr_;  // Current iteration point.
71   Address cur_end_;   // End iteration point.
72   const PagedSpace* const space_;
73   PageRange page_range_;
74   PageRange::iterator current_page_;
75 #if V8_COMPRESS_POINTERS
76   const PtrComprCageBase cage_base_;
77 #endif  // V8_COMPRESS_POINTERS
78 };
79 
80 class V8_EXPORT_PRIVATE PagedSpace
NON_EXPORTED_BASE(public SpaceWithLinearArea)81     : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
82  public:
83   using iterator = PageIterator;
84   using const_iterator = ConstPageIterator;
85 
86   static const size_t kCompactionMemoryWanted = 500 * KB;
87 
88   // Creates a space with an id.
89   PagedSpace(
90       Heap* heap, AllocationSpace id, Executability executable,
91       FreeList* free_list, LinearAllocationArea* allocation_info_,
92       CompactionSpaceKind compaction_space_kind = CompactionSpaceKind::kNone);
93 
94   ~PagedSpace() override { TearDown(); }
95 
96   // Checks whether an object/address is in this space.
97   inline bool Contains(Address a) const;
98   inline bool Contains(Object o) const;
99   bool ContainsSlow(Address addr) const;
100 
101   // Does the space need executable memory?
102   Executability executable() const { return executable_; }
103 
104   // Prepares for a mark-compact GC.
105   void PrepareForMarkCompact();
106 
107   // Current capacity without growing (Size() + Available()).
108   size_t Capacity() const { return accounting_stats_.Capacity(); }
109 
110   // Approximate amount of physical memory committed for this space.
111   size_t CommittedPhysicalMemory() const override;
112 
113 #if DEBUG
114   void VerifyCommittedPhysicalMemory() const;
115 #endif  // DEBUG
116 
117   void IncrementCommittedPhysicalMemory(size_t increment_value);
118   void DecrementCommittedPhysicalMemory(size_t decrement_value);
119 
120   // Sets the capacity, the available space and the wasted space to zero.
121   // The stats are rebuilt during sweeping by adding each page to the
122   // capacity and the size when it is encountered.  As free spaces are
123   // discovered during the sweeping they are subtracted from the size and added
124   // to the available and wasted totals. The free list is cleared as well.
125   void ClearAllocatorState() {
126     accounting_stats_.ClearSize();
127     free_list_->Reset();
128   }
129 
130   // Available bytes without growing.  These are the bytes on the free list.
131   // The bytes in the linear allocation area are not included in this total
132   // because updating the stats would slow down allocation.  New pages are
133   // immediately added to the free list so they show up here.
134   size_t Available() const override;
135 
136   // Allocated bytes in this space.  Garbage bytes that were not found due to
137   // concurrent sweeping are counted as being allocated!  The bytes in the
138   // current linear allocation area (between top and limit) are also counted
139   // here.
140   size_t Size() const override { return accounting_stats_.Size(); }
141 
142   // Wasted bytes in this space.  These are just the bytes that were thrown away
143   // due to being too small to use for allocation.
144   virtual size_t Waste() const { return free_list_->wasted_bytes(); }
145 
146   // Allocate the requested number of bytes in the space from a background
147   // thread.
148   V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
149   RawRefillLabBackground(LocalHeap* local_heap, size_t min_size_in_bytes,
150                          size_t max_size_in_bytes,
151                          AllocationAlignment alignment,
152                          AllocationOrigin origin);
153 
154   size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
155     if (size_in_bytes == 0) return 0;
156     heap()->CreateFillerObjectAtBackground(
157         start, static_cast<int>(size_in_bytes),
158         ClearFreedMemoryMode::kDontClearFreedMemory);
159     if (mode == SpaceAccountingMode::kSpaceAccounted) {
160       return AccountedFree(start, size_in_bytes);
161     } else {
162       return UnaccountedFree(start, size_in_bytes);
163     }
164   }
165 
166   // Give a block of memory to the space's free list.  It might be added to
167   // the free list or accounted as waste.
168   // If add_to_freelist is false then just accounting stats are updated and
169   // no attempt to add area to free list is made.
170   size_t AccountedFree(Address start, size_t size_in_bytes) {
171     size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
172     Page* page = Page::FromAddress(start);
173     accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
174     DCHECK_GE(size_in_bytes, wasted);
175     return size_in_bytes - wasted;
176   }
177 
178   size_t UnaccountedFree(Address start, size_t size_in_bytes) {
179     size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
180     DCHECK_GE(size_in_bytes, wasted);
181     return size_in_bytes - wasted;
182   }
183 
184   inline bool TryFreeLast(Address object_address, int object_size);
185 
186   void ResetFreeList();
187 
188   // Empty space linear allocation area, returning unused area to free list.
189   void FreeLinearAllocationArea() override;
190 
191   void MakeLinearAllocationAreaIterable();
192 
193   void MarkLinearAllocationAreaBlack();
194   void UnmarkLinearAllocationArea();
195 
196   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
197     accounting_stats_.DecreaseAllocatedBytes(bytes, page);
198   }
199   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
200     accounting_stats_.IncreaseAllocatedBytes(bytes, page);
201   }
202   void DecreaseCapacity(size_t bytes) {
203     accounting_stats_.DecreaseCapacity(bytes);
204   }
205   void IncreaseCapacity(size_t bytes) {
206     accounting_stats_.IncreaseCapacity(bytes);
207   }
208 
209   void RefineAllocatedBytesAfterSweeping(Page* page);
210 
211   Page* InitializePage(MemoryChunk* chunk) override;
212 
213   void ReleasePage(Page* page);
214 
215   // Adds the page to this space and returns the number of bytes added to the
216   // free list of the space.
217   size_t AddPage(Page* page);
218   void RemovePage(Page* page);
219   // Remove a page if it has at least |size_in_bytes| bytes available that can
220   // be used for allocation.
221   Page* RemovePageSafe(int size_in_bytes);
222 
223   void SetReadable();
224   void SetReadAndExecutable();
225   void SetCodeModificationPermissions();
226 
227   void SetDefaultCodePermissions() {
228     if (FLAG_jitless) {
229       SetReadable();
230     } else {
231       SetReadAndExecutable();
232     }
233   }
234 
235 #ifdef VERIFY_HEAP
236   // Verify integrity of this space.
237   virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
238 
239   void VerifyLiveBytes();
240 
241   // Overridden by subclasses to verify space-specific object
242   // properties (e.g., only maps or free-list nodes are in map space).
243   virtual void VerifyObject(HeapObject obj) const {}
244 #endif
245 
246 #ifdef DEBUG
247   void VerifyCountersAfterSweeping(Heap* heap);
248   void VerifyCountersBeforeConcurrentSweeping();
249   // Print meta info and objects in this space.
250   void Print() override;
251 
252   // Report code object related statistics
253   static void ReportCodeStatistics(Isolate* isolate);
254   static void ResetCodeStatistics(Isolate* isolate);
255 #endif
256 
257   bool CanExpand(size_t size) const;
258 
259   // Returns the number of total pages in this space.
260   int CountTotalPages() const;
261 
262   // Return size of allocatable area on a page in this space.
263   inline int AreaSize() const { return static_cast<int>(area_size_); }
264 
265   bool is_compaction_space() const {
266     return compaction_space_kind_ != CompactionSpaceKind::kNone;
267   }
268 
269   CompactionSpaceKind compaction_space_kind() const {
270     return compaction_space_kind_;
271   }
272 
273   // Merges {other} into the current space. Note that this modifies {other},
274   // e.g., removes its bump pointer area and resets statistics.
275   void MergeCompactionSpace(CompactionSpace* other);
276 
277   // Refills the free list from the corresponding free list filled by the
278   // sweeper.
279   virtual void RefillFreeList();
280 
281   base::Mutex* mutex() { return &space_mutex_; }
282 
283   inline void UnlinkFreeListCategories(Page* page);
284   inline size_t RelinkFreeListCategories(Page* page);
285 
286   Page* first_page() override {
287     return reinterpret_cast<Page*>(memory_chunk_list_.front());
288   }
289   const Page* first_page() const override {
290     return reinterpret_cast<const Page*>(memory_chunk_list_.front());
291   }
292 
293   iterator begin() { return iterator(first_page()); }
294   iterator end() { return iterator(nullptr); }
295 
296   const_iterator begin() const { return const_iterator(first_page()); }
297   const_iterator end() const { return const_iterator(nullptr); }
298 
299   // Shrink immortal immovable pages of the space to be exactly the size needed
300   // using the high water mark.
301   void ShrinkImmortalImmovablePages();
302 
303   size_t ShrinkPageToHighWaterMark(Page* page);
304 
305   std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
306 
307   void SetLinearAllocationArea(Address top, Address limit);
308 
309   Address original_top() const { return original_top_; }
310 
311   Address original_limit() const { return original_limit_; }
312 
313   void MoveOriginalTopForward() {
314     base::SharedMutexGuard<base::kExclusive> guard(&pending_allocation_mutex_);
315     DCHECK_GE(top(), original_top_);
316     DCHECK_LE(top(), original_limit_);
317     original_top_ = top();
318   }
319 
320   base::SharedMutex* pending_allocation_mutex() {
321     return &pending_allocation_mutex_;
322   }
323 
324   void AddRangeToActiveSystemPages(Page* page, Address start, Address end);
325   void ReduceActiveSystemPages(Page* page,
326                                ActiveSystemPages active_system_pages);
327 
328  private:
329   class ConcurrentAllocationMutex {
330    public:
331     explicit ConcurrentAllocationMutex(const PagedSpace* space) {
332       if (space->SupportsConcurrentAllocation()) {
333         guard_.emplace(&space->space_mutex_);
334       }
335     }
336 
337     base::Optional<base::MutexGuard> guard_;
338   };
339 
340   bool SupportsConcurrentAllocation() const { return !is_compaction_space(); }
341 
342   // Set space linear allocation area.
343   void SetTopAndLimit(Address top, Address limit);
344   void DecreaseLimit(Address new_limit);
345   void UpdateInlineAllocationLimit(size_t min_size) override;
346   bool SupportsAllocationObserver() const override {
347     return !is_compaction_space();
348   }
349 
350  protected:
351   // PagedSpaces that should be included in snapshots have different, i.e.,
352   // smaller, initial pages.
353   virtual bool snapshotable() const { return true; }
354 
355   bool HasPages() const { return first_page() != nullptr; }
356 
357   // Returns whether sweeping of this space is safe on this thread. Code space
358   // sweeping is only allowed on the main thread.
359   bool IsSweepingAllowedOnThread(LocalHeap* local_heap) const;
360 
361   // Cleans up the space, frees all pages in this space except those belonging
362   // to the initial chunk, uncommits addresses in the initial chunk.
363   void TearDown();
364 
365   // Expands the space by allocating a fixed number of pages. Returns false if
366   // it cannot allocate requested number of pages from OS, or if the hard heap
367   // size limit has been hit.
368   virtual Page* Expand();
369 
370   // Expands the space by a single page from a background thread and allocates
371   // a memory area of the given size in it. If successful the method returns
372   // the address and size of the area.
373   base::Optional<std::pair<Address, size_t>> ExpandBackground(
374       size_t size_in_bytes);
375 
376   bool EnsureAllocation(int size_in_bytes, AllocationAlignment alignment,
377                         AllocationOrigin origin,
378                         int* out_max_aligned_size) final;
379 
380   V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
381       size_t size_in_bytes, AllocationOrigin origin);
382 
383   V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(int required_freed_bytes,
384                                                       int max_pages,
385                                                       int size_in_bytes,
386                                                       AllocationOrigin origin);
387 
388   // Refills LAB for EnsureLabMain. This function is space-dependent. Returns
389   // false if there is not enough space and the caller has to retry after
390   // collecting garbage.
391   V8_WARN_UNUSED_RESULT virtual bool RefillLabMain(int size_in_bytes,
392                                                    AllocationOrigin origin);
393 
394   // Actual implementation of refilling LAB. Returns false if there is not
395   // enough space and the caller has to retry after collecting garbage.
396   V8_WARN_UNUSED_RESULT bool RawRefillLabMain(int size_in_bytes,
397                                               AllocationOrigin origin);
398 
399   V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
400   TryAllocationFromFreeListBackground(size_t min_size_in_bytes,
401                                       size_t max_size_in_bytes,
402                                       AllocationAlignment alignment,
403                                       AllocationOrigin origin);
404 
405   V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
406                                        AllocationOrigin origin);
407 
408   size_t committed_physical_memory() const {
409     return committed_physical_memory_.load(std::memory_order_relaxed);
410   }
411 
412   Executability executable_;
413 
414   CompactionSpaceKind compaction_space_kind_;
415 
416   size_t area_size_;
417 
418   // Accounting information for this space.
419   AllocationStats accounting_stats_;
420 
421   // Mutex guarding any concurrent access to the space.
422   mutable base::Mutex space_mutex_;
423 
424   // The top and the limit at the time of setting the linear allocation area.
425   // These values are protected by pending_allocation_mutex_.
426   Address original_top_;
427   Address original_limit_;
428 
429   // Protects original_top_ and original_limit_.
430   base::SharedMutex pending_allocation_mutex_;
431 
432   std::atomic<size_t> committed_physical_memory_{0};
433 
434   friend class IncrementalMarking;
435   friend class MarkCompactCollector;
436 
437   // Used in cctest.
438   friend class heap::HeapTester;
439 };
440 
441 // -----------------------------------------------------------------------------
442 // Compaction space that is used temporarily during compaction.
443 
444 class V8_EXPORT_PRIVATE CompactionSpace : public PagedSpace {
445  public:
CompactionSpace(Heap * heap,AllocationSpace id,Executability executable,CompactionSpaceKind compaction_space_kind)446   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
447                   CompactionSpaceKind compaction_space_kind)
448       : PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
449                    &allocation_info_, compaction_space_kind) {
450     DCHECK(is_compaction_space());
451   }
452 
GetNewPages()453   const std::vector<Page*>& GetNewPages() { return new_pages_; }
454 
455  private:
456   LinearAllocationArea allocation_info_;
457 
458  protected:
459   V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
460                                            AllocationOrigin origin) override;
461 
462   Page* Expand() override;
463   // The space is temporary and not included in any snapshots.
snapshotable()464   bool snapshotable() const override { return false; }
465   // Pages that were allocated in this local space and need to be merged
466   // to the main space.
467   std::vector<Page*> new_pages_;
468 };
469 
470 // A collection of |CompactionSpace|s used by a single compaction task.
471 class CompactionSpaceCollection : public Malloced {
472  public:
CompactionSpaceCollection(Heap * heap,CompactionSpaceKind compaction_space_kind)473   explicit CompactionSpaceCollection(Heap* heap,
474                                      CompactionSpaceKind compaction_space_kind)
475       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
476                    compaction_space_kind),
477         map_space_(heap, MAP_SPACE, Executability::NOT_EXECUTABLE,
478                    compaction_space_kind),
479         code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
480                     compaction_space_kind) {}
481 
Get(AllocationSpace space)482   CompactionSpace* Get(AllocationSpace space) {
483     switch (space) {
484       case OLD_SPACE:
485         return &old_space_;
486       case MAP_SPACE:
487         return &map_space_;
488       case CODE_SPACE:
489         return &code_space_;
490       default:
491         UNREACHABLE();
492     }
493     UNREACHABLE();
494   }
495 
496  private:
497   CompactionSpace old_space_;
498   CompactionSpace map_space_;
499   CompactionSpace code_space_;
500 };
501 
502 // -----------------------------------------------------------------------------
503 // Old generation regular object space.
504 
505 class OldSpace : public PagedSpace {
506  public:
507   // Creates an old space object. The constructor does not allocate pages
508   // from OS.
OldSpace(Heap * heap,LinearAllocationArea * allocation_info)509   explicit OldSpace(Heap* heap, LinearAllocationArea* allocation_info)
510       : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
511                    allocation_info) {}
512 
IsAtPageStart(Address addr)513   static bool IsAtPageStart(Address addr) {
514     return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
515            MemoryChunkLayout::ObjectStartOffsetInDataPage();
516   }
517 
ExternalBackingStoreBytes(ExternalBackingStoreType type)518   size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
519     if (type == ExternalBackingStoreType::kArrayBuffer)
520       return heap()->OldArrayBufferBytes();
521     return external_backing_store_bytes_[type];
522   }
523 };
524 
525 // -----------------------------------------------------------------------------
526 // Old generation code object space.
527 
528 class CodeSpace : public PagedSpace {
529  public:
530   // Creates an old space object. The constructor does not allocate pages
531   // from OS.
CodeSpace(Heap * heap)532   explicit CodeSpace(Heap* heap)
533       : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList(),
534                    &paged_allocation_info_) {}
535 
536  private:
537   LinearAllocationArea paged_allocation_info_;
538 };
539 
540 // -----------------------------------------------------------------------------
541 // Old space for all map objects
542 
543 class MapSpace : public PagedSpace {
544  public:
545   // Creates a map space object.
MapSpace(Heap * heap)546   explicit MapSpace(Heap* heap)
547       : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE, FreeList::CreateFreeList(),
548                    &paged_allocation_info_) {}
549 
RoundSizeDownToObjectAlignment(int size)550   int RoundSizeDownToObjectAlignment(int size) const override {
551     if (base::bits::IsPowerOfTwo(Map::kSize)) {
552       return RoundDown(size, Map::kSize);
553     } else {
554       return (size / Map::kSize) * Map::kSize;
555     }
556   }
557 
558   void SortFreeList();
559 
560 #ifdef VERIFY_HEAP
561   void VerifyObject(HeapObject obj) const override;
562 #endif
563 
564  private:
565   LinearAllocationArea paged_allocation_info_;
566 };
567 
568 // Iterates over the chunks (pages and large object pages) that can contain
569 // pointers to new space or to evacuation candidates.
570 class OldGenerationMemoryChunkIterator {
571  public:
572   inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
573 
574   // Return nullptr when the iterator is done.
575   inline MemoryChunk* next();
576 
577  private:
578   enum State {
579     kOldSpaceState,
580     kMapState,
581     kCodeState,
582     kLargeObjectState,
583     kCodeLargeObjectState,
584     kFinishedState
585   };
586   Heap* const heap_;
587   State state_;
588   PageIterator old_iterator_;
589   PageIterator code_iterator_;
590   PageIterator map_iterator_;
591   LargePageIterator lo_iterator_;
592   LargePageIterator code_lo_iterator_;
593 };
594 
595 }  // namespace internal
596 }  // namespace v8
597 
598 #endif  // V8_HEAP_PAGED_SPACES_H_
599