• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_PAGED_SPACES_H_
6 #define V8_HEAP_PAGED_SPACES_H_
7 
8 #include <memory>
9 #include <utility>
10 
11 #include "src/base/bounds.h"
12 #include "src/base/macros.h"
13 #include "src/base/optional.h"
14 #include "src/base/platform/mutex.h"
15 #include "src/common/globals.h"
16 #include "src/flags/flags.h"
17 #include "src/heap/allocation-stats.h"
18 #include "src/heap/memory-chunk.h"
19 #include "src/heap/spaces.h"
20 
21 namespace v8 {
22 namespace internal {
23 
24 class Heap;
25 class HeapObject;
26 class Isolate;
27 class LocalSpace;
28 class ObjectVisitor;
29 
30 // -----------------------------------------------------------------------------
31 // Heap object iterator in old/map spaces.
32 //
33 // A PagedSpaceObjectIterator iterates objects from the bottom of the given
34 // space to its top or from the bottom of the given page to its top.
35 //
36 // If objects are allocated in the page during iteration the iterator may
37 // or may not iterate over those objects.  The caller must create a new
38 // iterator in order to be sure to visit these new objects.
39 class V8_EXPORT_PRIVATE PagedSpaceObjectIterator : public ObjectIterator {
40  public:
41   // Creates a new object iterator in a given space.
42   PagedSpaceObjectIterator(Heap* heap, PagedSpace* space);
43   PagedSpaceObjectIterator(Heap* heap, PagedSpace* space, Page* page);
44 
45   // Advance to the next object, skipping free spaces and other fillers and
46   // skipping the special garbage section of which there is one per space.
47   // Returns nullptr when the iteration has ended.
48   inline HeapObject Next() override;
49 
50  private:
51   // Fast (inlined) path of next().
52   inline HeapObject FromCurrentPage();
53 
54   // Slow path of next(), goes into the next page.  Returns false if the
55   // iteration has ended.
56   bool AdvanceToNextPage();
57 
58   Address cur_addr_;  // Current iteration point.
59   Address cur_end_;   // End iteration point.
60   PagedSpace* space_;
61   PageRange page_range_;
62   PageRange::iterator current_page_;
63 };
64 
65 class V8_EXPORT_PRIVATE PagedSpace
NON_EXPORTED_BASE(public SpaceWithLinearArea)66     : NON_EXPORTED_BASE(public SpaceWithLinearArea) {
67  public:
68   using iterator = PageIterator;
69   using const_iterator = ConstPageIterator;
70 
71   static const size_t kCompactionMemoryWanted = 500 * KB;
72 
73   // Creates a space with an id.
74   PagedSpace(Heap* heap, AllocationSpace id, Executability executable,
75              FreeList* free_list,
76              LocalSpaceKind local_space_kind = LocalSpaceKind::kNone);
77 
78   ~PagedSpace() override { TearDown(); }
79 
80   // Checks whether an object/address is in this space.
81   inline bool Contains(Address a) const;
82   inline bool Contains(Object o) const;
83   bool ContainsSlow(Address addr) const;
84 
85   // Does the space need executable memory?
86   Executability executable() { return executable_; }
87 
88   // Prepares for a mark-compact GC.
89   void PrepareForMarkCompact();
90 
91   // Current capacity without growing (Size() + Available()).
92   size_t Capacity() { return accounting_stats_.Capacity(); }
93 
94   // Approximate amount of physical memory committed for this space.
95   size_t CommittedPhysicalMemory() override;
96 
97   // Sets the capacity, the available space and the wasted space to zero.
98   // The stats are rebuilt during sweeping by adding each page to the
99   // capacity and the size when it is encountered.  As free spaces are
100   // discovered during the sweeping they are subtracted from the size and added
101   // to the available and wasted totals. The free list is cleared as well.
102   void ClearAllocatorState() {
103     accounting_stats_.ClearSize();
104     free_list_->Reset();
105   }
106 
107   // Available bytes without growing.  These are the bytes on the free list.
108   // The bytes in the linear allocation area are not included in this total
109   // because updating the stats would slow down allocation.  New pages are
110   // immediately added to the free list so they show up here.
111   size_t Available() override;
112 
113   // Allocated bytes in this space.  Garbage bytes that were not found due to
114   // concurrent sweeping are counted as being allocated!  The bytes in the
115   // current linear allocation area (between top and limit) are also counted
116   // here.
117   size_t Size() override { return accounting_stats_.Size(); }
118 
119   // Wasted bytes in this space.  These are just the bytes that were thrown away
120   // due to being too small to use for allocation.
121   virtual size_t Waste() { return free_list_->wasted_bytes(); }
122 
123   // Allocate the requested number of bytes in the space if possible, return a
124   // failure object if not.
125   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawUnaligned(
126       int size_in_bytes, AllocationOrigin origin = AllocationOrigin::kRuntime);
127 
128   // Allocate the requested number of bytes in the space double aligned if
129   // possible, return a failure object if not.
130   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRawAligned(
131       int size_in_bytes, AllocationAlignment alignment,
132       AllocationOrigin origin = AllocationOrigin::kRuntime);
133 
134   // Allocate the requested number of bytes in the space and consider allocation
135   // alignment if needed.
136   V8_WARN_UNUSED_RESULT inline AllocationResult AllocateRaw(
137       int size_in_bytes, AllocationAlignment alignment,
138       AllocationOrigin origin = AllocationOrigin::kRuntime);
139 
140   // Allocate the requested number of bytes in the space from a background
141   // thread.
142   V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
143   RawRefillLabBackground(LocalHeap* local_heap, size_t min_size_in_bytes,
144                          size_t max_size_in_bytes,
145                          AllocationAlignment alignment,
146                          AllocationOrigin origin);
147 
148   size_t Free(Address start, size_t size_in_bytes, SpaceAccountingMode mode) {
149     if (size_in_bytes == 0) return 0;
150     heap()->CreateFillerObjectAtBackground(
151         start, static_cast<int>(size_in_bytes),
152         ClearFreedMemoryMode::kDontClearFreedMemory);
153     if (mode == SpaceAccountingMode::kSpaceAccounted) {
154       return AccountedFree(start, size_in_bytes);
155     } else {
156       return UnaccountedFree(start, size_in_bytes);
157     }
158   }
159 
160   // Give a block of memory to the space's free list.  It might be added to
161   // the free list or accounted as waste.
162   // If add_to_freelist is false then just accounting stats are updated and
163   // no attempt to add area to free list is made.
164   size_t AccountedFree(Address start, size_t size_in_bytes) {
165     size_t wasted = free_list_->Free(start, size_in_bytes, kLinkCategory);
166     Page* page = Page::FromAddress(start);
167     accounting_stats_.DecreaseAllocatedBytes(size_in_bytes, page);
168     DCHECK_GE(size_in_bytes, wasted);
169     return size_in_bytes - wasted;
170   }
171 
172   size_t UnaccountedFree(Address start, size_t size_in_bytes) {
173     size_t wasted = free_list_->Free(start, size_in_bytes, kDoNotLinkCategory);
174     DCHECK_GE(size_in_bytes, wasted);
175     return size_in_bytes - wasted;
176   }
177 
178   inline bool TryFreeLast(HeapObject object, int object_size);
179 
180   void ResetFreeList();
181 
182   // Empty space linear allocation area, returning unused area to free list.
183   void FreeLinearAllocationArea();
184 
185   void MakeLinearAllocationAreaIterable();
186 
187   void MarkLinearAllocationAreaBlack();
188   void UnmarkLinearAllocationArea();
189 
190   void DecreaseAllocatedBytes(size_t bytes, Page* page) {
191     accounting_stats_.DecreaseAllocatedBytes(bytes, page);
192   }
193   void IncreaseAllocatedBytes(size_t bytes, Page* page) {
194     accounting_stats_.IncreaseAllocatedBytes(bytes, page);
195   }
196   void DecreaseCapacity(size_t bytes) {
197     accounting_stats_.DecreaseCapacity(bytes);
198   }
199   void IncreaseCapacity(size_t bytes) {
200     accounting_stats_.IncreaseCapacity(bytes);
201   }
202 
203   void RefineAllocatedBytesAfterSweeping(Page* page);
204 
205   Page* InitializePage(MemoryChunk* chunk);
206 
207   void ReleasePage(Page* page);
208 
209   // Adds the page to this space and returns the number of bytes added to the
210   // free list of the space.
211   size_t AddPage(Page* page);
212   void RemovePage(Page* page);
213   // Remove a page if it has at least |size_in_bytes| bytes available that can
214   // be used for allocation.
215   Page* RemovePageSafe(int size_in_bytes);
216 
217   void SetReadable();
218   void SetReadAndExecutable();
219   void SetReadAndWritable();
220 
221   void SetDefaultCodePermissions() {
222     if (FLAG_jitless) {
223       SetReadable();
224     } else {
225       SetReadAndExecutable();
226     }
227   }
228 
229 #ifdef VERIFY_HEAP
230   // Verify integrity of this space.
231   virtual void Verify(Isolate* isolate, ObjectVisitor* visitor);
232 
233   void VerifyLiveBytes();
234 
235   // Overridden by subclasses to verify space-specific object
236   // properties (e.g., only maps or free-list nodes are in map space).
237   virtual void VerifyObject(HeapObject obj) {}
238 #endif
239 
240 #ifdef DEBUG
241   void VerifyCountersAfterSweeping(Heap* heap);
242   void VerifyCountersBeforeConcurrentSweeping();
243   // Print meta info and objects in this space.
244   void Print() override;
245 
246   // Report code object related statistics
247   static void ReportCodeStatistics(Isolate* isolate);
248   static void ResetCodeStatistics(Isolate* isolate);
249 #endif
250 
251   bool CanExpand(size_t size);
252 
253   // Returns the number of total pages in this space.
254   int CountTotalPages();
255 
256   // Return size of allocatable area on a page in this space.
257   inline int AreaSize() { return static_cast<int>(area_size_); }
258 
259   bool is_local_space() { return local_space_kind_ != LocalSpaceKind::kNone; }
260 
261   bool is_compaction_space() {
262     return base::IsInRange(local_space_kind_,
263                            LocalSpaceKind::kFirstCompactionSpace,
264                            LocalSpaceKind::kLastCompactionSpace);
265   }
266 
267   LocalSpaceKind local_space_kind() { return local_space_kind_; }
268 
269   // Merges {other} into the current space. Note that this modifies {other},
270   // e.g., removes its bump pointer area and resets statistics.
271   void MergeLocalSpace(LocalSpace* other);
272 
273   // Refills the free list from the corresponding free list filled by the
274   // sweeper.
275   virtual void RefillFreeList();
276 
277   base::Mutex* mutex() { return &space_mutex_; }
278 
279   inline void UnlinkFreeListCategories(Page* page);
280   inline size_t RelinkFreeListCategories(Page* page);
281 
282   Page* first_page() { return reinterpret_cast<Page*>(Space::first_page()); }
283   const Page* first_page() const {
284     return reinterpret_cast<const Page*>(Space::first_page());
285   }
286 
287   iterator begin() { return iterator(first_page()); }
288   iterator end() { return iterator(nullptr); }
289 
290   const_iterator begin() const { return const_iterator(first_page()); }
291   const_iterator end() const { return const_iterator(nullptr); }
292 
293   // Shrink immortal immovable pages of the space to be exactly the size needed
294   // using the high water mark.
295   void ShrinkImmortalImmovablePages();
296 
297   size_t ShrinkPageToHighWaterMark(Page* page);
298 
299   std::unique_ptr<ObjectIterator> GetObjectIterator(Heap* heap) override;
300 
301   void SetLinearAllocationArea(Address top, Address limit);
302 
303  private:
304   class ConcurrentAllocationMutex {
305    public:
306     explicit ConcurrentAllocationMutex(PagedSpace* space) {
307       if (space->SupportsConcurrentAllocation()) {
308         guard_.emplace(&space->space_mutex_);
309       }
310     }
311 
312     base::Optional<base::MutexGuard> guard_;
313   };
314 
315   bool SupportsConcurrentAllocation() {
316     return FLAG_concurrent_allocation && !is_local_space();
317   }
318 
319   // Set space linear allocation area.
320   void SetTopAndLimit(Address top, Address limit);
321   void DecreaseLimit(Address new_limit);
322   void UpdateInlineAllocationLimit(size_t min_size) override;
323   bool SupportsAllocationObserver() override { return !is_local_space(); }
324 
325   // Slow path of allocation function
326   V8_WARN_UNUSED_RESULT AllocationResult
327   AllocateRawSlow(int size_in_bytes, AllocationAlignment alignment,
328                   AllocationOrigin origin);
329 
330  protected:
331   // PagedSpaces that should be included in snapshots have different, i.e.,
332   // smaller, initial pages.
333   virtual bool snapshotable() { return true; }
334 
335   bool HasPages() { return first_page() != nullptr; }
336 
337   // Cleans up the space, frees all pages in this space except those belonging
338   // to the initial chunk, uncommits addresses in the initial chunk.
339   void TearDown();
340 
341   // Expands the space by allocating a fixed number of pages. Returns false if
342   // it cannot allocate requested number of pages from OS, or if the hard heap
343   // size limit has been hit.
344   virtual Page* Expand();
345   Page* ExpandBackground(LocalHeap* local_heap);
346   Page* AllocatePage();
347 
348   // Sets up a linear allocation area that fits the given number of bytes.
349   // Returns false if there is not enough space and the caller has to retry
350   // after collecting garbage.
351   inline bool EnsureLabMain(int size_in_bytes, AllocationOrigin origin);
352   // Allocates an object from the linear allocation area. Assumes that the
353   // linear allocation area is large enought to fit the object.
354   inline AllocationResult AllocateFastUnaligned(int size_in_bytes);
355   // Tries to allocate an aligned object from the linear allocation area.
356   // Returns nullptr if the linear allocation area does not fit the object.
357   // Otherwise, returns the object pointer and writes the allocation size
358   // (object size + alignment filler size) to the size_in_bytes.
359   inline AllocationResult AllocateFastAligned(int size_in_bytes,
360                                               int* aligned_size_in_bytes,
361                                               AllocationAlignment alignment);
362 
363   V8_WARN_UNUSED_RESULT bool TryAllocationFromFreeListMain(
364       size_t size_in_bytes, AllocationOrigin origin);
365 
366   V8_WARN_UNUSED_RESULT bool ContributeToSweepingMain(int required_freed_bytes,
367                                                       int max_pages,
368                                                       int size_in_bytes,
369                                                       AllocationOrigin origin);
370 
371   // Refills LAB for EnsureLabMain. This function is space-dependent. Returns
372   // false if there is not enough space and the caller has to retry after
373   // collecting garbage.
374   V8_WARN_UNUSED_RESULT virtual bool RefillLabMain(int size_in_bytes,
375                                                    AllocationOrigin origin);
376 
377   // Actual implementation of refilling LAB. Returns false if there is not
378   // enough space and the caller has to retry after collecting garbage.
379   V8_WARN_UNUSED_RESULT bool RawRefillLabMain(int size_in_bytes,
380                                               AllocationOrigin origin);
381 
382   V8_WARN_UNUSED_RESULT base::Optional<std::pair<Address, size_t>>
383   TryAllocationFromFreeListBackground(LocalHeap* local_heap,
384                                       size_t min_size_in_bytes,
385                                       size_t max_size_in_bytes,
386                                       AllocationAlignment alignment,
387                                       AllocationOrigin origin);
388 
389   V8_WARN_UNUSED_RESULT bool TryExpand(int size_in_bytes,
390                                        AllocationOrigin origin);
391 
392   Executability executable_;
393 
394   LocalSpaceKind local_space_kind_;
395 
396   size_t area_size_;
397 
398   // Accounting information for this space.
399   AllocationStats accounting_stats_;
400 
401   // Mutex guarding any concurrent access to the space.
402   base::Mutex space_mutex_;
403 
404   friend class IncrementalMarking;
405   friend class MarkCompactCollector;
406 
407   // Used in cctest.
408   friend class heap::HeapTester;
409 };
410 
411 // -----------------------------------------------------------------------------
412 // Base class for compaction space and off-thread space.
413 
414 class V8_EXPORT_PRIVATE LocalSpace : public PagedSpace {
415  public:
LocalSpace(Heap * heap,AllocationSpace id,Executability executable,LocalSpaceKind local_space_kind)416   LocalSpace(Heap* heap, AllocationSpace id, Executability executable,
417              LocalSpaceKind local_space_kind)
418       : PagedSpace(heap, id, executable, FreeList::CreateFreeList(),
419                    local_space_kind) {
420     DCHECK_NE(local_space_kind, LocalSpaceKind::kNone);
421   }
422 
GetNewPages()423   const std::vector<Page*>& GetNewPages() { return new_pages_; }
424 
425  protected:
426   Page* Expand() override;
427   // The space is temporary and not included in any snapshots.
snapshotable()428   bool snapshotable() override { return false; }
429   // Pages that were allocated in this local space and need to be merged
430   // to the main space.
431   std::vector<Page*> new_pages_;
432 };
433 
434 // -----------------------------------------------------------------------------
435 // Compaction space that is used temporarily during compaction.
436 
437 class V8_EXPORT_PRIVATE CompactionSpace : public LocalSpace {
438  public:
CompactionSpace(Heap * heap,AllocationSpace id,Executability executable,LocalSpaceKind local_space_kind)439   CompactionSpace(Heap* heap, AllocationSpace id, Executability executable,
440                   LocalSpaceKind local_space_kind)
441       : LocalSpace(heap, id, executable, local_space_kind) {
442     DCHECK(is_compaction_space());
443   }
444 
445  protected:
446   V8_WARN_UNUSED_RESULT bool RefillLabMain(int size_in_bytes,
447                                            AllocationOrigin origin) override;
448 };
449 
450 // A collection of |CompactionSpace|s used by a single compaction task.
451 class CompactionSpaceCollection : public Malloced {
452  public:
CompactionSpaceCollection(Heap * heap,LocalSpaceKind local_space_kind)453   explicit CompactionSpaceCollection(Heap* heap,
454                                      LocalSpaceKind local_space_kind)
455       : old_space_(heap, OLD_SPACE, Executability::NOT_EXECUTABLE,
456                    local_space_kind),
457         code_space_(heap, CODE_SPACE, Executability::EXECUTABLE,
458                     local_space_kind) {}
459 
Get(AllocationSpace space)460   CompactionSpace* Get(AllocationSpace space) {
461     switch (space) {
462       case OLD_SPACE:
463         return &old_space_;
464       case CODE_SPACE:
465         return &code_space_;
466       default:
467         UNREACHABLE();
468     }
469     UNREACHABLE();
470   }
471 
472  private:
473   CompactionSpace old_space_;
474   CompactionSpace code_space_;
475 };
476 
477 // -----------------------------------------------------------------------------
478 // Old generation regular object space.
479 
480 class OldSpace : public PagedSpace {
481  public:
482   // Creates an old space object. The constructor does not allocate pages
483   // from OS.
OldSpace(Heap * heap)484   explicit OldSpace(Heap* heap)
485       : PagedSpace(heap, OLD_SPACE, NOT_EXECUTABLE,
486                    FreeList::CreateFreeList()) {}
487 
IsAtPageStart(Address addr)488   static bool IsAtPageStart(Address addr) {
489     return static_cast<intptr_t>(addr & kPageAlignmentMask) ==
490            MemoryChunkLayout::ObjectStartOffsetInDataPage();
491   }
492 
ExternalBackingStoreBytes(ExternalBackingStoreType type)493   size_t ExternalBackingStoreBytes(ExternalBackingStoreType type) const final {
494     if (type == ExternalBackingStoreType::kArrayBuffer)
495       return heap()->OldArrayBufferBytes();
496     return external_backing_store_bytes_[type];
497   }
498 };
499 
500 // -----------------------------------------------------------------------------
501 // Old generation code object space.
502 
503 class CodeSpace : public PagedSpace {
504  public:
505   // Creates an old space object. The constructor does not allocate pages
506   // from OS.
CodeSpace(Heap * heap)507   explicit CodeSpace(Heap* heap)
508       : PagedSpace(heap, CODE_SPACE, EXECUTABLE, FreeList::CreateFreeList()) {}
509 };
510 
511 // -----------------------------------------------------------------------------
512 // Old space for all map objects
513 
514 class MapSpace : public PagedSpace {
515  public:
516   // Creates a map space object.
MapSpace(Heap * heap)517   explicit MapSpace(Heap* heap)
518       : PagedSpace(heap, MAP_SPACE, NOT_EXECUTABLE,
519                    FreeList::CreateFreeList()) {}
520 
RoundSizeDownToObjectAlignment(int size)521   int RoundSizeDownToObjectAlignment(int size) override {
522     if (base::bits::IsPowerOfTwo(Map::kSize)) {
523       return RoundDown(size, Map::kSize);
524     } else {
525       return (size / Map::kSize) * Map::kSize;
526     }
527   }
528 
529   void SortFreeList();
530 
531 #ifdef VERIFY_HEAP
532   void VerifyObject(HeapObject obj) override;
533 #endif
534 };
535 
536 // Iterates over the chunks (pages and large object pages) that can contain
537 // pointers to new space or to evacuation candidates.
538 class OldGenerationMemoryChunkIterator {
539  public:
540   inline explicit OldGenerationMemoryChunkIterator(Heap* heap);
541 
542   // Return nullptr when the iterator is done.
543   inline MemoryChunk* next();
544 
545  private:
546   enum State {
547     kOldSpaceState,
548     kMapState,
549     kCodeState,
550     kLargeObjectState,
551     kCodeLargeObjectState,
552     kFinishedState
553   };
554   Heap* heap_;
555   State state_;
556   PageIterator old_iterator_;
557   PageIterator code_iterator_;
558   PageIterator map_iterator_;
559   LargePageIterator lo_iterator_;
560   LargePageIterator code_lo_iterator_;
561 };
562 
563 }  // namespace internal
564 }  // namespace v8
565 
566 #endif  // V8_HEAP_PAGED_SPACES_H_
567