• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
7 
8 #include <atomic>
9 #include <vector>
10 
11 #include "include/v8-internal.h"
12 #include "src/heap/base/worklist.h"
13 #include "src/heap/concurrent-marking.h"
14 #include "src/heap/marking-visitor.h"
15 #include "src/heap/marking-worklist.h"
16 #include "src/heap/marking.h"
17 #include "src/heap/memory-measurement.h"
18 #include "src/heap/parallel-work-item.h"
19 #include "src/heap/spaces.h"
20 #include "src/heap/sweeper.h"
21 
22 namespace v8 {
23 namespace internal {
24 
25 // Forward declarations.
26 class EvacuationJobTraits;
27 class HeapObjectVisitor;
28 class ItemParallelJob;
29 class LargePage;
30 class MigrationObserver;
31 class ReadOnlySpace;
32 class RecordMigratedSlotVisitor;
33 class UpdatingItem;
34 class YoungGenerationMarkingVisitor;
35 
36 class MarkBitCellIterator {
37  public:
MarkBitCellIterator(const MemoryChunk * chunk,Bitmap * bitmap)38   MarkBitCellIterator(const MemoryChunk* chunk, Bitmap* bitmap)
39       : chunk_(chunk) {
40     last_cell_index_ =
41         Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
42     cell_base_ = chunk_->address();
43     cell_index_ =
44         Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
45     cells_ = bitmap->cells();
46   }
47 
Done()48   inline bool Done() { return cell_index_ >= last_cell_index_; }
49 
HasNext()50   inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
51 
CurrentCell()52   inline MarkBit::CellType* CurrentCell() {
53     DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
54                                chunk_->AddressToMarkbitIndex(cell_base_))));
55     return &cells_[cell_index_];
56   }
57 
CurrentCellBase()58   inline Address CurrentCellBase() {
59     DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
60                                chunk_->AddressToMarkbitIndex(cell_base_))));
61     return cell_base_;
62   }
63 
Advance()64   V8_WARN_UNUSED_RESULT inline bool Advance() {
65     cell_base_ += Bitmap::kBitsPerCell * kTaggedSize;
66     return ++cell_index_ != last_cell_index_;
67   }
68 
Advance(unsigned int new_cell_index)69   inline bool Advance(unsigned int new_cell_index) {
70     if (new_cell_index != cell_index_) {
71       DCHECK_GT(new_cell_index, cell_index_);
72       DCHECK_LE(new_cell_index, last_cell_index_);
73       unsigned int diff = new_cell_index - cell_index_;
74       cell_index_ = new_cell_index;
75       cell_base_ += diff * (Bitmap::kBitsPerCell * kTaggedSize);
76       return true;
77     }
78     return false;
79   }
80 
81   // Return the next mark bit cell. If there is no next it returns 0;
PeekNext()82   inline MarkBit::CellType PeekNext() {
83     if (HasNext()) {
84       return cells_[cell_index_ + 1];
85     }
86     return 0;
87   }
88 
89  private:
90   const MemoryChunk* chunk_;
91   MarkBit::CellType* cells_;
92   unsigned int last_cell_index_;
93   unsigned int cell_index_;
94   Address cell_base_;
95 };
96 
97 enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
98 
99 template <LiveObjectIterationMode mode>
100 class LiveObjectRange {
101  public:
102   class iterator {
103    public:
104     using value_type = std::pair<HeapObject, int /* size */>;
105     using pointer = const value_type*;
106     using reference = const value_type&;
107     using iterator_category = std::forward_iterator_tag;
108 
109     inline iterator(const MemoryChunk* chunk, Bitmap* bitmap, Address start);
110 
111     inline iterator& operator++();
112     inline iterator operator++(int);
113 
114     bool operator==(iterator other) const {
115       return current_object_ == other.current_object_;
116     }
117 
118     bool operator!=(iterator other) const { return !(*this == other); }
119 
120     value_type operator*() {
121       return std::make_pair(current_object_, current_size_);
122     }
123 
124    private:
125     inline void AdvanceToNextValidObject();
126 
127     const MemoryChunk* const chunk_;
128     Map const one_word_filler_map_;
129     Map const two_word_filler_map_;
130     Map const free_space_map_;
131     MarkBitCellIterator it_;
132     Address cell_base_;
133     MarkBit::CellType current_cell_;
134     HeapObject current_object_;
135     int current_size_;
136   };
137 
LiveObjectRange(const MemoryChunk * chunk,Bitmap * bitmap)138   LiveObjectRange(const MemoryChunk* chunk, Bitmap* bitmap)
139       : chunk_(chunk),
140         bitmap_(bitmap),
141         start_(chunk_->area_start()),
142         end_(chunk->area_end()) {
143     DCHECK(!chunk->IsLargePage());
144   }
145 
146   inline iterator begin();
147   inline iterator end();
148 
149  private:
150   const MemoryChunk* const chunk_;
151   Bitmap* bitmap_;
152   Address start_;
153   Address end_;
154 };
155 
156 class LiveObjectVisitor : AllStatic {
157  public:
158   enum IterationMode {
159     kKeepMarking,
160     kClearMarkbits,
161   };
162 
163   // Visits black objects on a MemoryChunk until the Visitor returns |false| for
164   // an object. If IterationMode::kClearMarkbits is passed the markbits and
165   // slots for visited objects are cleared for each successfully visited object.
166   template <class Visitor, typename MarkingState>
167   static bool VisitBlackObjects(MemoryChunk* chunk, MarkingState* state,
168                                 Visitor* visitor, IterationMode iteration_mode,
169                                 HeapObject* failed_object);
170 
171   // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
172   // visitation for an object.
173   template <class Visitor, typename MarkingState>
174   static void VisitBlackObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
175                                       Visitor* visitor,
176                                       IterationMode iteration_mode);
177 
178   // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
179   // visitation for an object.
180   template <class Visitor, typename MarkingState>
181   static void VisitGreyObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
182                                      Visitor* visitor,
183                                      IterationMode iteration_mode);
184 
185   template <typename MarkingState>
186   static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
187 };
188 
189 enum class AlwaysPromoteYoung { kYes, kNo };
190 enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
191 enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
192 
193 // Base class for minor and full MC collectors.
194 class MarkCompactCollectorBase {
195  public:
196   virtual ~MarkCompactCollectorBase() = default;
197 
198   virtual void SetUp() = 0;
199   virtual void TearDown() = 0;
200   virtual void CollectGarbage() = 0;
201 
heap()202   inline Heap* heap() const { return heap_; }
203   inline Isolate* isolate();
204 
205  protected:
MarkCompactCollectorBase(Heap * heap)206   explicit MarkCompactCollectorBase(Heap* heap) : heap_(heap) {}
207 
208   // Marking operations for objects reachable from roots.
209   virtual void MarkLiveObjects() = 0;
210   // Mark objects reachable (transitively) from objects in the marking
211   // work list.
212   virtual void DrainMarkingWorklist() = 0;
213   // Clear non-live references held in side data structures.
214   virtual void ClearNonLiveReferences() = 0;
215   virtual void EvacuatePrologue() = 0;
216   virtual void EvacuateEpilogue() = 0;
217   virtual void Evacuate() = 0;
218   virtual void EvacuatePagesInParallel() = 0;
219   virtual void UpdatePointersAfterEvacuation() = 0;
220   virtual std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
221       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
222 
223   // Returns the number of wanted compaction tasks.
224   template <class Evacuator, class Collector>
225   size_t CreateAndExecuteEvacuationTasks(
226       Collector* collector,
227       std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
228       MigrationObserver* migration_observer);
229 
230   // Returns whether this page should be moved according to heuristics.
231   bool ShouldMovePage(Page* p, intptr_t live_bytes,
232                       AlwaysPromoteYoung promote_young);
233 
234   template <typename IterateableSpace>
235   int CollectRememberedSetUpdatingItems(
236       std::vector<std::unique_ptr<UpdatingItem>>* items,
237       IterateableSpace* space, RememberedSetUpdatingMode mode);
238 
239   int NumberOfParallelCompactionTasks();
240 
241   Heap* heap_;
242 };
243 
244 class MinorMarkingState final
245     : public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
246  public:
MinorMarkingState(PtrComprCageBase cage_base)247   explicit MinorMarkingState(PtrComprCageBase cage_base)
248       : MarkingStateBase(cage_base) {}
249 
bitmap(const BasicMemoryChunk * chunk)250   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
251       const BasicMemoryChunk* chunk) const {
252     return MemoryChunk::cast(chunk)
253         ->young_generation_bitmap<AccessMode::ATOMIC>();
254   }
255 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)256   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
257     chunk->young_generation_live_byte_count_ += by;
258   }
259 
live_bytes(MemoryChunk * chunk)260   intptr_t live_bytes(MemoryChunk* chunk) const {
261     return chunk->young_generation_live_byte_count_;
262   }
263 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)264   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
265     chunk->young_generation_live_byte_count_ = value;
266   }
267 };
268 
269 class MinorNonAtomicMarkingState final
270     : public MarkingStateBase<MinorNonAtomicMarkingState,
271                               AccessMode::NON_ATOMIC> {
272  public:
MinorNonAtomicMarkingState(PtrComprCageBase cage_base)273   explicit MinorNonAtomicMarkingState(PtrComprCageBase cage_base)
274       : MarkingStateBase(cage_base) {}
275 
bitmap(const BasicMemoryChunk * chunk)276   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
277       const BasicMemoryChunk* chunk) const {
278     return MemoryChunk::cast(chunk)
279         ->young_generation_bitmap<AccessMode::NON_ATOMIC>();
280   }
281 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)282   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
283     chunk->young_generation_live_byte_count_.fetch_add(
284         by, std::memory_order_relaxed);
285   }
286 
live_bytes(MemoryChunk * chunk)287   intptr_t live_bytes(MemoryChunk* chunk) const {
288     return chunk->young_generation_live_byte_count_.load(
289         std::memory_order_relaxed);
290   }
291 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)292   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
293     chunk->young_generation_live_byte_count_.store(value,
294                                                    std::memory_order_relaxed);
295   }
296 };
297 
298 // This is used by marking visitors.
299 class MajorMarkingState final
300     : public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
301  public:
MajorMarkingState(PtrComprCageBase cage_base)302   explicit MajorMarkingState(PtrComprCageBase cage_base)
303       : MarkingStateBase(cage_base) {}
304 
bitmap(const BasicMemoryChunk * chunk)305   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
306       const BasicMemoryChunk* chunk) const {
307     return chunk->marking_bitmap<AccessMode::ATOMIC>();
308   }
309 
310   // Concurrent marking uses local live bytes so we may do these accesses
311   // non-atomically.
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)312   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
313     chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
314   }
315 
live_bytes(MemoryChunk * chunk)316   intptr_t live_bytes(MemoryChunk* chunk) const {
317     return chunk->live_byte_count_.load(std::memory_order_relaxed);
318   }
319 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)320   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
321     chunk->live_byte_count_.store(value, std::memory_order_relaxed);
322   }
323 };
324 
325 // This is used by Scavenger and Evacuator in TransferColor.
326 // Live byte increments have to be atomic.
327 class MajorAtomicMarkingState final
328     : public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
329  public:
MajorAtomicMarkingState(PtrComprCageBase cage_base)330   explicit MajorAtomicMarkingState(PtrComprCageBase cage_base)
331       : MarkingStateBase(cage_base) {}
332 
bitmap(const BasicMemoryChunk * chunk)333   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
334       const BasicMemoryChunk* chunk) const {
335     return chunk->marking_bitmap<AccessMode::ATOMIC>();
336   }
337 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)338   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
339     chunk->live_byte_count_.fetch_add(by);
340   }
341 };
342 
343 class MajorNonAtomicMarkingState final
344     : public MarkingStateBase<MajorNonAtomicMarkingState,
345                               AccessMode::NON_ATOMIC> {
346  public:
MajorNonAtomicMarkingState(PtrComprCageBase cage_base)347   explicit MajorNonAtomicMarkingState(PtrComprCageBase cage_base)
348       : MarkingStateBase(cage_base) {}
349 
bitmap(const BasicMemoryChunk * chunk)350   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
351       const BasicMemoryChunk* chunk) const {
352     return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
353   }
354 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)355   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
356     chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
357   }
358 
live_bytes(MemoryChunk * chunk)359   intptr_t live_bytes(MemoryChunk* chunk) const {
360     return chunk->live_byte_count_.load(std::memory_order_relaxed);
361   }
362 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)363   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
364     chunk->live_byte_count_.store(value, std::memory_order_relaxed);
365   }
366 };
367 
368 // This visitor is used for marking on the main thread. It is cheaper than
369 // the concurrent marking visitor because it does not snapshot JSObjects.
370 template <typename MarkingState>
371 class MainMarkingVisitor final
372     : public MarkingVisitorBase<MainMarkingVisitor<MarkingState>,
373                                 MarkingState> {
374  public:
375   // This is used for revisiting objects that were black allocated.
376   class V8_NODISCARD RevisitScope {
377    public:
RevisitScope(MainMarkingVisitor * visitor)378     explicit RevisitScope(MainMarkingVisitor* visitor) : visitor_(visitor) {
379       DCHECK(!visitor->revisiting_object_);
380       visitor->revisiting_object_ = true;
381     }
~RevisitScope()382     ~RevisitScope() {
383       DCHECK(visitor_->revisiting_object_);
384       visitor_->revisiting_object_ = false;
385     }
386 
387    private:
388     MainMarkingVisitor<MarkingState>* visitor_;
389   };
390 
MainMarkingVisitor(MarkingState * marking_state,MarkingWorklists::Local * local_marking_worklists,WeakObjects::Local * local_weak_objects,Heap * heap,unsigned mark_compact_epoch,base::EnumSet<CodeFlushMode> code_flush_mode,bool embedder_tracing_enabled,bool should_keep_ages_unchanged)391   MainMarkingVisitor(MarkingState* marking_state,
392                      MarkingWorklists::Local* local_marking_worklists,
393                      WeakObjects::Local* local_weak_objects, Heap* heap,
394                      unsigned mark_compact_epoch,
395                      base::EnumSet<CodeFlushMode> code_flush_mode,
396                      bool embedder_tracing_enabled,
397                      bool should_keep_ages_unchanged)
398       : MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
399             local_marking_worklists, local_weak_objects, heap,
400             mark_compact_epoch, code_flush_mode, embedder_tracing_enabled,
401             should_keep_ages_unchanged),
402         marking_state_(marking_state),
403         revisiting_object_(false) {}
404 
405   // HeapVisitor override to allow revisiting of black objects.
ShouldVisit(HeapObject object)406   bool ShouldVisit(HeapObject object) {
407     return marking_state_->GreyToBlack(object) ||
408            V8_UNLIKELY(revisiting_object_);
409   }
410 
411  private:
412   // Functions required by MarkingVisitorBase.
413 
414   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
415   int VisitJSObjectSubclass(Map map, T object);
416 
417   template <typename T>
418   int VisitLeftTrimmableArray(Map map, T object);
419 
420   template <typename TSlot>
421   void RecordSlot(HeapObject object, TSlot slot, HeapObject target);
422 
423   void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
424 
SynchronizePageAccess(HeapObject heap_object)425   void SynchronizePageAccess(HeapObject heap_object) {
426     // Nothing to do on the main thread.
427   }
428 
marking_state()429   MarkingState* marking_state() { return marking_state_; }
430 
retaining_path_mode()431   TraceRetainingPathMode retaining_path_mode() {
432     return (V8_UNLIKELY(FLAG_track_retaining_path))
433                ? TraceRetainingPathMode::kEnabled
434                : TraceRetainingPathMode::kDisabled;
435   }
436 
437   MarkingState* const marking_state_;
438 
439   friend class MarkingVisitorBase<MainMarkingVisitor<MarkingState>,
440                                   MarkingState>;
441   bool revisiting_object_;
442 };
443 
444 // Collector for young and old generation.
445 class MarkCompactCollector final : public MarkCompactCollectorBase {
446  public:
447   using MarkingState = MajorMarkingState;
448   using AtomicMarkingState = MajorAtomicMarkingState;
449   using NonAtomicMarkingState = MajorNonAtomicMarkingState;
450 
451   using MarkingVisitor = MainMarkingVisitor<MarkingState>;
452 
453   class RootMarkingVisitor;
454   class CustomRootBodyMarkingVisitor;
455   class SharedHeapObjectVisitor;
456 
457   enum IterationMode {
458     kKeepMarking,
459     kClearMarkbits,
460   };
461 
462   enum class MarkingWorklistProcessingMode {
463     kDefault,
464     kTrackNewlyDiscoveredObjects
465   };
466 
467   enum class StartCompactionMode {
468     kIncremental,
469     kAtomic,
470   };
471 
marking_state()472   MarkingState* marking_state() { return &marking_state_; }
473 
non_atomic_marking_state()474   NonAtomicMarkingState* non_atomic_marking_state() {
475     return &non_atomic_marking_state_;
476   }
477 
478   void SetUp() override;
479   void TearDown() override;
480   // Performs a global garbage collection.
481   void CollectGarbage() override;
482 
483   void CollectEvacuationCandidates(PagedSpace* space);
484 
485   void AddEvacuationCandidate(Page* p);
486 
487   // Prepares for GC by resetting relocation info in old and map spaces and
488   // choosing spaces to compact.
489   void Prepare();
490 
491   // Stop concurrent marking (either by preempting it right away or waiting for
492   // it to complete as requested by |stop_request|).
493   void FinishConcurrentMarking();
494 
495   // Returns whether compaction is running.
496   bool StartCompaction(StartCompactionMode mode);
497 
498   void AbortCompaction();
499 
500   void StartMarking();
501 
IsOnEvacuationCandidate(Object obj)502   static inline bool IsOnEvacuationCandidate(Object obj) {
503     return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
504   }
505 
506   static bool IsOnEvacuationCandidate(MaybeObject obj);
507 
508   struct RecordRelocSlotInfo {
509     MemoryChunk* memory_chunk;
510     SlotType slot_type;
511     uint32_t offset;
512   };
513 
514   static V8_EXPORT_PRIVATE bool IsMapOrForwardedMap(Map map);
515 
516   static bool ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
517                                     HeapObject target);
518   static RecordRelocSlotInfo ProcessRelocInfo(Code host, RelocInfo* rinfo,
519                                               HeapObject target);
520 
521   static void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
522   V8_INLINE static void RecordSlot(HeapObject object, ObjectSlot slot,
523                                    HeapObject target);
524   V8_INLINE static void RecordSlot(HeapObject object, HeapObjectSlot slot,
525                                    HeapObject target);
526   V8_INLINE static void RecordSlot(MemoryChunk* source_page,
527                                    HeapObjectSlot slot, HeapObject target);
528   void RecordLiveSlotsOnPage(Page* page);
529 
is_compacting()530   bool is_compacting() const { return compacting_; }
is_shared_heap()531   bool is_shared_heap() const { return is_shared_heap_; }
532 
533   void FinishSweepingIfOutOfWork();
534 
535   enum class SweepingForcedFinalizationMode { kUnifiedHeap, kV8Only };
536 
537   // Ensures that sweeping is finished.
538   //
539   // Note: Can only be called safely from main thread.
540   V8_EXPORT_PRIVATE void EnsureSweepingCompleted(
541       SweepingForcedFinalizationMode mode);
542 
543   void EnsurePageIsSwept(Page* page);
544 
545   void DrainSweepingWorklistForSpace(AllocationSpace space);
546 
547   // Checks if sweeping is in progress right now on any space.
sweeping_in_progress()548   bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
549 
set_evacuation(bool evacuation)550   void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
551 
evacuation()552   bool evacuation() const { return evacuation_; }
553 
marking_worklists()554   MarkingWorklists* marking_worklists() { return &marking_worklists_; }
555 
local_marking_worklists()556   MarkingWorklists::Local* local_marking_worklists() {
557     return local_marking_worklists_.get();
558   }
559 
weak_objects()560   WeakObjects* weak_objects() { return &weak_objects_; }
561 
local_weak_objects()562   WeakObjects::Local* local_weak_objects() { return local_weak_objects_.get(); }
563 
564   inline void AddTransitionArray(TransitionArray array);
565 
AddNewlyDiscovered(HeapObject object)566   void AddNewlyDiscovered(HeapObject object) {
567     if (ephemeron_marking_.newly_discovered_overflowed) return;
568 
569     if (ephemeron_marking_.newly_discovered.size() <
570         ephemeron_marking_.newly_discovered_limit) {
571       ephemeron_marking_.newly_discovered.push_back(object);
572     } else {
573       ephemeron_marking_.newly_discovered_overflowed = true;
574     }
575   }
576 
ResetNewlyDiscovered()577   void ResetNewlyDiscovered() {
578     ephemeron_marking_.newly_discovered_overflowed = false;
579     ephemeron_marking_.newly_discovered.clear();
580   }
581 
sweeper()582   Sweeper* sweeper() { return sweeper_; }
583 
584 #ifdef DEBUG
585   // Checks whether performing mark-compact collection.
in_use()586   bool in_use() { return state_ > PREPARE_GC; }
are_map_pointers_encoded()587   bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
588 #endif
589 
590   void VerifyMarking();
591 #ifdef VERIFY_HEAP
592   void VerifyMarkbitsAreClean();
593   void VerifyMarkbitsAreDirty(ReadOnlySpace* space);
594   void VerifyMarkbitsAreClean(PagedSpace* space);
595   void VerifyMarkbitsAreClean(NewSpace* space);
596   void VerifyMarkbitsAreClean(LargeObjectSpace* space);
597 #endif
598 
epoch()599   unsigned epoch() const { return epoch_; }
600 
code_flush_mode()601   base::EnumSet<CodeFlushMode> code_flush_mode() const {
602     return code_flush_mode_;
603   }
604 
605   explicit MarkCompactCollector(Heap* heap);
606   ~MarkCompactCollector() override;
607 
608   // Used by wrapper tracing.
609   V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
610   // Used by incremental marking for object that change their layout.
611   void VisitObject(HeapObject obj);
612   // Used by incremental marking for black-allocated objects.
613   void RevisitObject(HeapObject obj);
614 
615   // Drains the main thread marking worklist until the specified number of
616   // bytes are processed. If the number of bytes is zero, then the worklist
617   // is drained until it is empty.
618   template <MarkingWorklistProcessingMode mode =
619                 MarkingWorklistProcessingMode::kDefault>
620   std::pair<size_t, size_t> ProcessMarkingWorklist(size_t bytes_to_process);
621 
622  private:
623   void ComputeEvacuationHeuristics(size_t area_size,
624                                    int* target_fragmentation_percent,
625                                    size_t* max_evacuated_bytes);
626 
627   void RecordObjectStats();
628 
629   // Finishes GC, performs heap verification if enabled.
630   void Finish();
631 
632   // Free unmarked ArrayBufferExtensions.
633   void SweepArrayBufferExtensions();
634 
635   // Free unmarked entries in the ExternalPointerTable.
636   void SweepExternalPointerTable();
637 
638   void MarkLiveObjects() override;
639 
640   // Marks the object grey and adds it to the marking work list.
641   // This is for non-incremental marking only.
642   V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
643 
644   // Marks the object grey and adds it to the marking work list.
645   // This is for non-incremental marking only.
646   V8_INLINE void MarkRootObject(Root root, HeapObject obj);
647 
648   // Mark the heap roots and all objects reachable from them.
649   void MarkRoots(RootVisitor* root_visitor,
650                  ObjectVisitor* custom_root_body_visitor);
651 
652   // Mark all objects that are directly referenced from one of the clients
653   // heaps.
654   void MarkObjectsFromClientHeaps();
655 
656   // Updates pointers to shared objects from client heaps.
657   void UpdatePointersInClientHeaps();
658   void UpdatePointersInClientHeap(Isolate* client);
659 
660   // Marks object reachable from harmony weak maps and wrapper tracing.
661   void ProcessEphemeronMarking();
662 
663   // If the call-site of the top optimized code was not prepared for
664   // deoptimization, then treat embedded pointers in the code as strong as
665   // otherwise they can die and try to deoptimize the underlying code.
666   void ProcessTopOptimizedFrame(ObjectVisitor* visitor, Isolate* isolate);
667 
668   // Drains the main thread marking work list. Will mark all pending objects
669   // if no concurrent threads are running.
670   void DrainMarkingWorklist() override;
671 
672   // Implements ephemeron semantics: Marks value if key is already reachable.
673   // Returns true if value was actually marked.
674   bool ProcessEphemeron(HeapObject key, HeapObject value);
675 
676   // Marks ephemerons and drains marking worklist iteratively
677   // until a fixpoint is reached. Returns false if too many iterations have been
678   // tried and the linear approach should be used.
679   bool ProcessEphemeronsUntilFixpoint();
680 
681   // Drains ephemeron and marking worklists. Single iteration of the
682   // fixpoint iteration.
683   bool ProcessEphemerons();
684 
685   // Mark ephemerons and drain marking worklist with a linear algorithm.
686   // Only used if fixpoint iteration doesn't finish within a few iterations.
687   void ProcessEphemeronsLinear();
688 
689   // Perform Wrapper Tracing if in use.
690   void PerformWrapperTracing();
691 
692   // Callback function for telling whether the object *p is an unmarked
693   // heap object.
694   static bool IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p);
695 
696   // Clear non-live references in weak cells, transition and descriptor arrays,
697   // and deoptimize dependent code of non-live maps.
698   void ClearNonLiveReferences() override;
699   void MarkDependentCodeForDeoptimization();
700   // Checks if the given weak cell is a simple transition from the parent map
701   // of the given dead target. If so it clears the transition and trims
702   // the descriptor array of the parent if needed.
703   void ClearPotentialSimpleMapTransition(Map dead_target);
704   void ClearPotentialSimpleMapTransition(Map map, Map dead_target);
705 
706   // Flushes a weakly held bytecode array from a shared function info.
707   void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
708 
709   // Clears bytecode arrays / baseline code that have not been executed for
710   // multiple collections.
711   void ProcessOldCodeCandidates();
712   void ProcessFlushedBaselineCandidates();
713 
714   // Resets any JSFunctions which have had their bytecode flushed.
715   void ClearFlushedJsFunctions();
716 
717   // Compact every array in the global list of transition arrays and
718   // trim the corresponding descriptor array if a transition target is non-live.
719   void ClearFullMapTransitions();
720   void TrimDescriptorArray(Map map, DescriptorArray descriptors);
721   void TrimEnumCache(Map map, DescriptorArray descriptors);
722   bool CompactTransitionArray(Map map, TransitionArray transitions,
723                               DescriptorArray descriptors);
724   bool TransitionArrayNeedsCompaction(TransitionArray transitions,
725                                       int num_transitions);
726 
727   // After all reachable objects have been marked those weak map entries
728   // with an unreachable key are removed from all encountered weak maps.
729   // The linked list of all encountered weak maps is destroyed.
730   void ClearWeakCollections();
731 
732   // Goes through the list of encountered weak references and clears those with
733   // dead values. If the value is a dead map and the parent map transitions to
734   // the dead map via weak cell, then this function also clears the map
735   // transition.
736   void ClearWeakReferences();
737 
738   // Goes through the list of encountered JSWeakRefs and WeakCells and clears
739   // those with dead values.
740   void ClearJSWeakRefs();
741 
742   // Starts sweeping of spaces by contributing on the main thread and setting
743   // up other pages for sweeping. Does not start sweeper tasks.
744   void StartSweepSpaces();
745   void StartSweepSpace(PagedSpace* space);
746 
747   void EvacuatePrologue() override;
748   void EvacuateEpilogue() override;
749   void Evacuate() override;
750   void EvacuatePagesInParallel() override;
751   void UpdatePointersAfterEvacuation() override;
752 
753   std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
754       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
755 
756   void ReleaseEvacuationCandidates();
757   // Returns number of aborted pages.
758   size_t PostProcessEvacuationCandidates();
759   void ReportAbortedEvacuationCandidateDueToOOM(Address failed_start,
760                                                 Page* page);
761   void ReportAbortedEvacuationCandidateDueToFlags(Address failed_start,
762                                                   Page* page);
763 
764   static const int kEphemeronChunkSize = 8 * KB;
765 
766   int NumberOfParallelEphemeronVisitingTasks(size_t elements);
767 
768   void RightTrimDescriptorArray(DescriptorArray array, int descriptors_to_trim);
769 
770   base::Mutex mutex_;
771   base::Semaphore page_parallel_job_semaphore_{0};
772 
773 #ifdef DEBUG
774   enum CollectorState{IDLE,
775                       PREPARE_GC,
776                       MARK_LIVE_OBJECTS,
777                       SWEEP_SPACES,
778                       ENCODE_FORWARDING_ADDRESSES,
779                       UPDATE_POINTERS,
780                       RELOCATE_OBJECTS};
781 
782   // The current stage of the collector.
783   CollectorState state_;
784 #endif
785 
786   const bool is_shared_heap_;
787 
788   bool evacuation_ = false;
789   // True if we are collecting slots to perform evacuation from evacuation
790   // candidates.
791   bool compacting_ = false;
792   bool black_allocation_ = false;
793   bool have_code_to_deoptimize_ = false;
794 
795   MarkingWorklists marking_worklists_;
796 
797   WeakObjects weak_objects_;
798   EphemeronMarking ephemeron_marking_;
799 
800   std::unique_ptr<MarkingVisitor> marking_visitor_;
801   std::unique_ptr<MarkingWorklists::Local> local_marking_worklists_;
802   std::unique_ptr<WeakObjects::Local> local_weak_objects_;
803   NativeContextInferrer native_context_inferrer_;
804   NativeContextStats native_context_stats_;
805 
806   // Candidates for pages that should be evacuated.
807   std::vector<Page*> evacuation_candidates_;
808   // Pages that are actually processed during evacuation.
809   std::vector<Page*> old_space_evacuation_pages_;
810   std::vector<Page*> new_space_evacuation_pages_;
811   std::vector<std::pair<Address, Page*>>
812       aborted_evacuation_candidates_due_to_oom_;
813   std::vector<std::pair<Address, Page*>>
814       aborted_evacuation_candidates_due_to_flags_;
815   std::vector<LargePage*> promoted_large_pages_;
816 
817   MarkingState marking_state_;
818   NonAtomicMarkingState non_atomic_marking_state_;
819 
820   Sweeper* sweeper_;
821 
822   // Counts the number of major mark-compact collections. The counter is
823   // incremented right after marking. This is used for:
824   // - marking descriptor arrays. See NumberOfMarkedDescriptors. Only the lower
825   //   two bits are used, so it is okay if this counter overflows and wraps
826   //   around.
827   unsigned epoch_ = 0;
828 
829   // Bytecode flushing is disabled when the code coverage mode is changed. Since
830   // that can happen while a GC is happening and we need the
831   // code_flush_mode_ to remain the same through out a GC, we record this at
832   // the start of each GC.
833   base::EnumSet<CodeFlushMode> code_flush_mode_;
834 
835   friend class FullEvacuator;
836   friend class RecordMigratedSlotVisitor;
837 };
838 
839 class V8_NODISCARD EvacuationScope {
840  public:
EvacuationScope(MarkCompactCollector * collector)841   explicit EvacuationScope(MarkCompactCollector* collector)
842       : collector_(collector) {
843     collector_->set_evacuation(true);
844   }
845 
~EvacuationScope()846   ~EvacuationScope() { collector_->set_evacuation(false); }
847 
848  private:
849   MarkCompactCollector* collector_;
850 };
851 
852 // Collector for young-generation only.
853 class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
854  public:
855   using MarkingState = MinorMarkingState;
856   using NonAtomicMarkingState = MinorNonAtomicMarkingState;
857 
858   static constexpr size_t kMaxParallelTasks = 8;
859 
860   explicit MinorMarkCompactCollector(Heap* heap);
861   ~MinorMarkCompactCollector() override;
862 
marking_state()863   MarkingState* marking_state() { return &marking_state_; }
864 
non_atomic_marking_state()865   NonAtomicMarkingState* non_atomic_marking_state() {
866     return &non_atomic_marking_state_;
867   }
868 
869   void SetUp() override;
870   void TearDown() override;
871   void CollectGarbage() override;
872 
873   void MakeIterable(Page* page, FreeSpaceTreatmentMode free_space_mode);
874   void CleanupPromotedPages();
875 
876  private:
877   using MarkingWorklist =
878       ::heap::base::Worklist<HeapObject, 64 /* segment size */>;
879   class RootMarkingVisitor;
880 
881   static const int kNumMarkers = 8;
882   static const int kMainMarker = 0;
883 
worklist()884   inline MarkingWorklist* worklist() { return worklist_; }
885 
main_marking_visitor()886   inline YoungGenerationMarkingVisitor* main_marking_visitor() {
887     return main_marking_visitor_;
888   }
889 
890   void MarkLiveObjects() override;
891   void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
892   V8_INLINE void MarkRootObject(HeapObject obj);
893   void DrainMarkingWorklist() override;
894   void TraceFragmentation();
895   void ClearNonLiveReferences() override;
896 
897   void EvacuatePrologue() override;
898   void EvacuateEpilogue() override;
899   void Evacuate() override;
900   void EvacuatePagesInParallel() override;
901   void UpdatePointersAfterEvacuation() override;
902 
903   std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
904                                                           Address start,
905                                                           Address end);
906   std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
907       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
908 
909   int CollectToSpaceUpdatingItems(
910       std::vector<std::unique_ptr<UpdatingItem>>* items);
911 
912   void SweepArrayBufferExtensions();
913 
914   MarkingWorklist* worklist_;
915   MarkingWorklist::Local main_thread_worklist_local_;
916 
917   MarkingState marking_state_;
918   NonAtomicMarkingState non_atomic_marking_state_;
919 
920   YoungGenerationMarkingVisitor* main_marking_visitor_;
921   base::Semaphore page_parallel_job_semaphore_;
922   std::vector<Page*> new_space_evacuation_pages_;
923   std::vector<Page*> promoted_pages_;
924   std::vector<LargePage*> promoted_large_pages_;
925 
926   friend class YoungGenerationMarkingTask;
927   friend class YoungGenerationMarkingJob;
928   friend class YoungGenerationMarkingVisitor;
929 };
930 
931 }  // namespace internal
932 }  // namespace v8
933 
934 #endif  // V8_HEAP_MARK_COMPACT_H_
935