• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
7 
8 #include <atomic>
9 #include <vector>
10 
11 #include "src/heap/concurrent-marking.h"
12 #include "src/heap/marking-visitor.h"
13 #include "src/heap/marking-worklist.h"
14 #include "src/heap/marking.h"
15 #include "src/heap/memory-measurement.h"
16 #include "src/heap/parallel-work-item.h"
17 #include "src/heap/spaces.h"
18 #include "src/heap/sweeper.h"
19 
20 namespace v8 {
21 namespace internal {
22 
23 // Forward declarations.
24 class EvacuationJobTraits;
25 class HeapObjectVisitor;
26 class ItemParallelJob;
27 class MigrationObserver;
28 class ReadOnlySpace;
29 class RecordMigratedSlotVisitor;
30 class UpdatingItem;
31 class YoungGenerationMarkingVisitor;
32 
33 class MarkBitCellIterator {
34  public:
MarkBitCellIterator(const MemoryChunk * chunk,Bitmap * bitmap)35   MarkBitCellIterator(const MemoryChunk* chunk, Bitmap* bitmap)
36       : chunk_(chunk) {
37     last_cell_index_ =
38         Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
39     cell_base_ = chunk_->address();
40     cell_index_ =
41         Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
42     cells_ = bitmap->cells();
43   }
44 
Done()45   inline bool Done() { return cell_index_ >= last_cell_index_; }
46 
HasNext()47   inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
48 
CurrentCell()49   inline MarkBit::CellType* CurrentCell() {
50     DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
51                                chunk_->AddressToMarkbitIndex(cell_base_))));
52     return &cells_[cell_index_];
53   }
54 
CurrentCellBase()55   inline Address CurrentCellBase() {
56     DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
57                                chunk_->AddressToMarkbitIndex(cell_base_))));
58     return cell_base_;
59   }
60 
Advance()61   V8_WARN_UNUSED_RESULT inline bool Advance() {
62     cell_base_ += Bitmap::kBitsPerCell * kTaggedSize;
63     return ++cell_index_ != last_cell_index_;
64   }
65 
Advance(unsigned int new_cell_index)66   inline bool Advance(unsigned int new_cell_index) {
67     if (new_cell_index != cell_index_) {
68       DCHECK_GT(new_cell_index, cell_index_);
69       DCHECK_LE(new_cell_index, last_cell_index_);
70       unsigned int diff = new_cell_index - cell_index_;
71       cell_index_ = new_cell_index;
72       cell_base_ += diff * (Bitmap::kBitsPerCell * kTaggedSize);
73       return true;
74     }
75     return false;
76   }
77 
78   // Return the next mark bit cell. If there is no next it returns 0;
PeekNext()79   inline MarkBit::CellType PeekNext() {
80     if (HasNext()) {
81       return cells_[cell_index_ + 1];
82     }
83     return 0;
84   }
85 
86  private:
87   const MemoryChunk* chunk_;
88   MarkBit::CellType* cells_;
89   unsigned int last_cell_index_;
90   unsigned int cell_index_;
91   Address cell_base_;
92 };
93 
94 enum LiveObjectIterationMode { kBlackObjects, kGreyObjects, kAllLiveObjects };
95 
96 template <LiveObjectIterationMode mode>
97 class LiveObjectRange {
98  public:
99   class iterator {
100    public:
101     using value_type = std::pair<HeapObject, int /* size */>;
102     using pointer = const value_type*;
103     using reference = const value_type&;
104     using iterator_category = std::forward_iterator_tag;
105 
106     inline iterator(const MemoryChunk* chunk, Bitmap* bitmap, Address start);
107 
108     inline iterator& operator++();
109     inline iterator operator++(int);
110 
111     bool operator==(iterator other) const {
112       return current_object_ == other.current_object_;
113     }
114 
115     bool operator!=(iterator other) const { return !(*this == other); }
116 
117     value_type operator*() {
118       return std::make_pair(current_object_, current_size_);
119     }
120 
121    private:
122     inline void AdvanceToNextValidObject();
123 
124     const MemoryChunk* const chunk_;
125     Map const one_word_filler_map_;
126     Map const two_word_filler_map_;
127     Map const free_space_map_;
128     MarkBitCellIterator it_;
129     Address cell_base_;
130     MarkBit::CellType current_cell_;
131     HeapObject current_object_;
132     int current_size_;
133   };
134 
LiveObjectRange(const MemoryChunk * chunk,Bitmap * bitmap)135   LiveObjectRange(const MemoryChunk* chunk, Bitmap* bitmap)
136       : chunk_(chunk),
137         bitmap_(bitmap),
138         start_(chunk_->area_start()),
139         end_(chunk->area_end()) {
140     DCHECK(!chunk->IsLargePage());
141   }
142 
143   inline iterator begin();
144   inline iterator end();
145 
146  private:
147   const MemoryChunk* const chunk_;
148   Bitmap* bitmap_;
149   Address start_;
150   Address end_;
151 };
152 
153 class LiveObjectVisitor : AllStatic {
154  public:
155   enum IterationMode {
156     kKeepMarking,
157     kClearMarkbits,
158   };
159 
160   // Visits black objects on a MemoryChunk until the Visitor returns |false| for
161   // an object. If IterationMode::kClearMarkbits is passed the markbits and
162   // slots for visited objects are cleared for each successfully visited object.
163   template <class Visitor, typename MarkingState>
164   static bool VisitBlackObjects(MemoryChunk* chunk, MarkingState* state,
165                                 Visitor* visitor, IterationMode iteration_mode,
166                                 HeapObject* failed_object);
167 
168   // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
169   // visitation for an object.
170   template <class Visitor, typename MarkingState>
171   static void VisitBlackObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
172                                       Visitor* visitor,
173                                       IterationMode iteration_mode);
174 
175   // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
176   // visitation for an object.
177   template <class Visitor, typename MarkingState>
178   static void VisitGreyObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
179                                      Visitor* visitor,
180                                      IterationMode iteration_mode);
181 
182   template <typename MarkingState>
183   static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
184 };
185 
186 enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
187 enum MarkingTreatmentMode { KEEP, CLEAR };
188 enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
189 
190 // Base class for minor and full MC collectors.
191 class MarkCompactCollectorBase {
192  public:
193   virtual ~MarkCompactCollectorBase() = default;
194 
195   virtual void SetUp() = 0;
196   virtual void TearDown() = 0;
197   virtual void CollectGarbage() = 0;
198 
heap()199   inline Heap* heap() const { return heap_; }
200   inline Isolate* isolate();
201 
202  protected:
MarkCompactCollectorBase(Heap * heap)203   explicit MarkCompactCollectorBase(Heap* heap)
204       : heap_(heap), old_to_new_slots_(0) {}
205 
206   // Marking operations for objects reachable from roots.
207   virtual void MarkLiveObjects() = 0;
208   // Mark objects reachable (transitively) from objects in the marking
209   // work list.
210   virtual void DrainMarkingWorklist() = 0;
211   // Clear non-live references held in side data structures.
212   virtual void ClearNonLiveReferences() = 0;
213   virtual void EvacuatePrologue() = 0;
214   virtual void EvacuateEpilogue() = 0;
215   virtual void Evacuate() = 0;
216   virtual void EvacuatePagesInParallel() = 0;
217   virtual void UpdatePointersAfterEvacuation() = 0;
218   virtual std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(
219       MemoryChunk* chunk, Address start, Address end) = 0;
220   virtual std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
221       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
222 
223   template <class Evacuator, class Collector>
224   void CreateAndExecuteEvacuationTasks(
225       Collector* collector,
226       std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
227       MigrationObserver* migration_observer, const intptr_t live_bytes);
228 
229   // Returns whether this page should be moved according to heuristics.
230   bool ShouldMovePage(Page* p, intptr_t live_bytes, bool promote_young);
231 
232   int CollectToSpaceUpdatingItems(
233       std::vector<std::unique_ptr<UpdatingItem>>* items);
234   template <typename IterateableSpace>
235   int CollectRememberedSetUpdatingItems(
236       std::vector<std::unique_ptr<UpdatingItem>>* items,
237       IterateableSpace* space, RememberedSetUpdatingMode mode);
238 
239   int NumberOfParallelCompactionTasks();
240 
241   Heap* heap_;
242   // Number of old to new slots. Should be computed during MarkLiveObjects.
243   // -1 indicates that the value couldn't be computed.
244   int old_to_new_slots_;
245 };
246 
247 class MinorMarkingState final
248     : public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
249  public:
bitmap(const BasicMemoryChunk * chunk)250   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
251       const BasicMemoryChunk* chunk) const {
252     return MemoryChunk::cast(chunk)
253         ->young_generation_bitmap<AccessMode::ATOMIC>();
254   }
255 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)256   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
257     chunk->young_generation_live_byte_count_ += by;
258   }
259 
live_bytes(MemoryChunk * chunk)260   intptr_t live_bytes(MemoryChunk* chunk) const {
261     return chunk->young_generation_live_byte_count_;
262   }
263 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)264   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
265     chunk->young_generation_live_byte_count_ = value;
266   }
267 };
268 
269 class MinorNonAtomicMarkingState final
270     : public MarkingStateBase<MinorNonAtomicMarkingState,
271                               AccessMode::NON_ATOMIC> {
272  public:
bitmap(const BasicMemoryChunk * chunk)273   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
274       const BasicMemoryChunk* chunk) const {
275     return MemoryChunk::cast(chunk)
276         ->young_generation_bitmap<AccessMode::NON_ATOMIC>();
277   }
278 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)279   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
280     chunk->young_generation_live_byte_count_.fetch_add(
281         by, std::memory_order_relaxed);
282   }
283 
live_bytes(MemoryChunk * chunk)284   intptr_t live_bytes(MemoryChunk* chunk) const {
285     return chunk->young_generation_live_byte_count_.load(
286         std::memory_order_relaxed);
287   }
288 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)289   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
290     chunk->young_generation_live_byte_count_.store(value,
291                                                    std::memory_order_relaxed);
292   }
293 };
294 
295 // This is used by marking visitors.
296 class MajorMarkingState final
297     : public MarkingStateBase<MajorMarkingState, AccessMode::ATOMIC> {
298  public:
bitmap(const BasicMemoryChunk * chunk)299   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
300       const BasicMemoryChunk* chunk) const {
301     return chunk->marking_bitmap<AccessMode::ATOMIC>();
302   }
303 
304   // Concurrent marking uses local live bytes so we may do these accesses
305   // non-atomically.
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)306   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
307     chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
308   }
309 
live_bytes(MemoryChunk * chunk)310   intptr_t live_bytes(MemoryChunk* chunk) const {
311     return chunk->live_byte_count_.load(std::memory_order_relaxed);
312   }
313 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)314   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
315     chunk->live_byte_count_.store(value, std::memory_order_relaxed);
316   }
317 };
318 
319 // This is used by Scavenger and Evacuator in TransferColor.
320 // Live byte increments have to be atomic.
321 class MajorAtomicMarkingState final
322     : public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
323  public:
bitmap(const BasicMemoryChunk * chunk)324   ConcurrentBitmap<AccessMode::ATOMIC>* bitmap(
325       const BasicMemoryChunk* chunk) const {
326     return chunk->marking_bitmap<AccessMode::ATOMIC>();
327   }
328 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)329   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
330     chunk->live_byte_count_.fetch_add(by);
331   }
332 };
333 
334 class MajorNonAtomicMarkingState final
335     : public MarkingStateBase<MajorNonAtomicMarkingState,
336                               AccessMode::NON_ATOMIC> {
337  public:
bitmap(const BasicMemoryChunk * chunk)338   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
339       const BasicMemoryChunk* chunk) const {
340     return chunk->marking_bitmap<AccessMode::NON_ATOMIC>();
341   }
342 
IncrementLiveBytes(MemoryChunk * chunk,intptr_t by)343   void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
344     chunk->live_byte_count_.fetch_add(by, std::memory_order_relaxed);
345   }
346 
live_bytes(MemoryChunk * chunk)347   intptr_t live_bytes(MemoryChunk* chunk) const {
348     return chunk->live_byte_count_.load(std::memory_order_relaxed);
349   }
350 
SetLiveBytes(MemoryChunk * chunk,intptr_t value)351   void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
352     chunk->live_byte_count_.store(value, std::memory_order_relaxed);
353   }
354 };
355 
356 // This visitor is used for marking on the main thread. It is cheaper than
357 // the concurrent marking visitor because it does not snapshot JSObjects.
358 template <typename MarkingState>
359 class MainMarkingVisitor final
360     : public MarkingVisitorBase<MainMarkingVisitor<MarkingState>,
361                                 MarkingState> {
362  public:
363   // This is used for revisiting objects that were black allocated.
364   class RevisitScope {
365    public:
RevisitScope(MainMarkingVisitor * visitor)366     explicit RevisitScope(MainMarkingVisitor* visitor) : visitor_(visitor) {
367       DCHECK(!visitor->revisiting_object_);
368       visitor->revisiting_object_ = true;
369     }
~RevisitScope()370     ~RevisitScope() {
371       DCHECK(visitor_->revisiting_object_);
372       visitor_->revisiting_object_ = false;
373     }
374 
375    private:
376     MainMarkingVisitor<MarkingState>* visitor_;
377   };
378 
MainMarkingVisitor(MarkingState * marking_state,MarkingWorklists::Local * local_marking_worklists,WeakObjects * weak_objects,Heap * heap,unsigned mark_compact_epoch,BytecodeFlushMode bytecode_flush_mode,bool embedder_tracing_enabled,bool is_forced_gc)379   MainMarkingVisitor(MarkingState* marking_state,
380                      MarkingWorklists::Local* local_marking_worklists,
381                      WeakObjects* weak_objects, Heap* heap,
382                      unsigned mark_compact_epoch,
383                      BytecodeFlushMode bytecode_flush_mode,
384                      bool embedder_tracing_enabled, bool is_forced_gc)
385       : MarkingVisitorBase<MainMarkingVisitor<MarkingState>, MarkingState>(
386             kMainThreadTask, local_marking_worklists, weak_objects, heap,
387             mark_compact_epoch, bytecode_flush_mode, embedder_tracing_enabled,
388             is_forced_gc),
389         marking_state_(marking_state),
390         revisiting_object_(false) {}
391 
392   // HeapVisitor override to allow revisiting of black objects.
ShouldVisit(HeapObject object)393   bool ShouldVisit(HeapObject object) {
394     return marking_state_->GreyToBlack(object) ||
395            V8_UNLIKELY(revisiting_object_);
396   }
397 
398   void MarkDescriptorArrayFromWriteBarrier(DescriptorArray descriptors,
399                                            int number_of_own_descriptors);
400 
401  private:
402   // Functions required by MarkingVisitorBase.
403 
404   template <typename T, typename TBodyDescriptor = typename T::BodyDescriptor>
405   int VisitJSObjectSubclass(Map map, T object);
406 
407   template <typename T>
408   int VisitLeftTrimmableArray(Map map, T object);
409 
410   template <typename TSlot>
411   void RecordSlot(HeapObject object, TSlot slot, HeapObject target);
412 
413   void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
414 
SynchronizePageAccess(HeapObject heap_object)415   void SynchronizePageAccess(HeapObject heap_object) {
416     // Nothing to do on the main thread.
417   }
418 
marking_state()419   MarkingState* marking_state() { return marking_state_; }
420 
retaining_path_mode()421   TraceRetainingPathMode retaining_path_mode() {
422     return (V8_UNLIKELY(FLAG_track_retaining_path))
423                ? TraceRetainingPathMode::kEnabled
424                : TraceRetainingPathMode::kDisabled;
425   }
426 
427   MarkingState* const marking_state_;
428 
429   friend class MarkingVisitorBase<MainMarkingVisitor<MarkingState>,
430                                   MarkingState>;
431   bool revisiting_object_;
432 };
433 
434 // Collector for young and old generation.
435 class MarkCompactCollector final : public MarkCompactCollectorBase {
436  public:
437 #ifdef V8_ATOMIC_MARKING_STATE
438   using MarkingState = MajorMarkingState;
439 #else
440   using MarkingState = MajorNonAtomicMarkingState;
441 #endif  // V8_ATOMIC_MARKING_STATE
442   using AtomicMarkingState = MajorAtomicMarkingState;
443   using NonAtomicMarkingState = MajorNonAtomicMarkingState;
444 
445   using MarkingVisitor = MainMarkingVisitor<MarkingState>;
446 
447   class RootMarkingVisitor;
448   class CustomRootBodyMarkingVisitor;
449 
450   enum IterationMode {
451     kKeepMarking,
452     kClearMarkbits,
453   };
454 
455   enum class MarkingWorklistProcessingMode {
456     kDefault,
457     kTrackNewlyDiscoveredObjects
458   };
459 
marking_state()460   MarkingState* marking_state() { return &marking_state_; }
461 
non_atomic_marking_state()462   NonAtomicMarkingState* non_atomic_marking_state() {
463     return &non_atomic_marking_state_;
464   }
465 
466   void SetUp() override;
467   void TearDown() override;
468   // Performs a global garbage collection.
469   void CollectGarbage() override;
470 
471   void CollectEvacuationCandidates(PagedSpace* space);
472 
473   void AddEvacuationCandidate(Page* p);
474 
475   // Prepares for GC by resetting relocation info in old and map spaces and
476   // choosing spaces to compact.
477   void Prepare();
478 
479   // Stop concurrent marking (either by preempting it right away or waiting for
480   // it to complete as requested by |stop_request|).
481   void FinishConcurrentMarking();
482 
483   bool StartCompaction();
484 
485   void AbortCompaction();
486 
487   void StartMarking();
488 
IsOnEvacuationCandidate(Object obj)489   static inline bool IsOnEvacuationCandidate(Object obj) {
490     return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
491   }
492 
493   static bool IsOnEvacuationCandidate(MaybeObject obj);
494 
495   struct RecordRelocSlotInfo {
496     MemoryChunk* memory_chunk;
497     SlotType slot_type;
498     bool should_record;
499     uint32_t offset;
500   };
501   static RecordRelocSlotInfo PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
502                                                     HeapObject target);
503   static void RecordRelocSlot(Code host, RelocInfo* rinfo, HeapObject target);
504   V8_INLINE static void RecordSlot(HeapObject object, ObjectSlot slot,
505                                    HeapObject target);
506   V8_INLINE static void RecordSlot(HeapObject object, HeapObjectSlot slot,
507                                    HeapObject target);
508   V8_INLINE static void RecordSlot(MemoryChunk* source_page,
509                                    HeapObjectSlot slot, HeapObject target);
510   void RecordLiveSlotsOnPage(Page* page);
511 
is_compacting()512   bool is_compacting() const { return compacting_; }
513 
514   // Ensures that sweeping is finished.
515   //
516   // Note: Can only be called safely from main thread.
517   V8_EXPORT_PRIVATE void EnsureSweepingCompleted();
518 
519   void DrainSweepingWorklists();
520   void DrainSweepingWorklistForSpace(AllocationSpace space);
521 
522   // Checks if sweeping is in progress right now on any space.
sweeping_in_progress()523   bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
524 
set_evacuation(bool evacuation)525   void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
526 
evacuation()527   bool evacuation() const { return evacuation_; }
528 
marking_worklists()529   MarkingWorklists* marking_worklists() { return &marking_worklists_; }
530 
local_marking_worklists()531   MarkingWorklists::Local* local_marking_worklists() {
532     return local_marking_worklists_.get();
533   }
534 
weak_objects()535   WeakObjects* weak_objects() { return &weak_objects_; }
536 
537   inline void AddTransitionArray(TransitionArray array);
538 
AddNewlyDiscovered(HeapObject object)539   void AddNewlyDiscovered(HeapObject object) {
540     if (ephemeron_marking_.newly_discovered_overflowed) return;
541 
542     if (ephemeron_marking_.newly_discovered.size() <
543         ephemeron_marking_.newly_discovered_limit) {
544       ephemeron_marking_.newly_discovered.push_back(object);
545     } else {
546       ephemeron_marking_.newly_discovered_overflowed = true;
547     }
548   }
549 
ResetNewlyDiscovered()550   void ResetNewlyDiscovered() {
551     ephemeron_marking_.newly_discovered_overflowed = false;
552     ephemeron_marking_.newly_discovered.clear();
553   }
554 
sweeper()555   Sweeper* sweeper() { return sweeper_; }
556 
557 #ifdef DEBUG
558   // Checks whether performing mark-compact collection.
in_use()559   bool in_use() { return state_ > PREPARE_GC; }
are_map_pointers_encoded()560   bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
561 #endif
562 
563   void VerifyMarking();
564 #ifdef VERIFY_HEAP
565   void VerifyMarkbitsAreClean();
566   void VerifyMarkbitsAreDirty(ReadOnlySpace* space);
567   void VerifyMarkbitsAreClean(PagedSpace* space);
568   void VerifyMarkbitsAreClean(NewSpace* space);
569   void VerifyMarkbitsAreClean(LargeObjectSpace* space);
570 #endif
571 
epoch()572   unsigned epoch() const { return epoch_; }
573 
574   explicit MarkCompactCollector(Heap* heap);
575   ~MarkCompactCollector() override;
576 
577   // Used by wrapper tracing.
578   V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
579   // Used by incremental marking for object that change their layout.
580   void VisitObject(HeapObject obj);
581   // Used by incremental marking for black-allocated objects.
582   void RevisitObject(HeapObject obj);
583   // Ensures that all descriptors int range [0, number_of_own_descripts)
584   // are visited.
585   void MarkDescriptorArrayFromWriteBarrier(DescriptorArray array,
586                                            int number_of_own_descriptors);
587 
588   // Drains the main thread marking worklist until the specified number of
589   // bytes are processed. If the number of bytes is zero, then the worklist
590   // is drained until it is empty.
591   template <MarkingWorklistProcessingMode mode =
592                 MarkingWorklistProcessingMode::kDefault>
593   size_t ProcessMarkingWorklist(size_t bytes_to_process);
594 
595  private:
596   void ComputeEvacuationHeuristics(size_t area_size,
597                                    int* target_fragmentation_percent,
598                                    size_t* max_evacuated_bytes);
599 
600   void RecordObjectStats();
601 
602   // Finishes GC, performs heap verification if enabled.
603   void Finish();
604 
605   // Free unmarked ArrayBufferExtensions.
606   void SweepArrayBufferExtensions();
607 
608   void MarkLiveObjects() override;
609 
610   // Marks the object black and adds it to the marking work list.
611   // This is for non-incremental marking only.
612   V8_INLINE void MarkObject(HeapObject host, HeapObject obj);
613 
614   // Marks the object black and adds it to the marking work list.
615   // This is for non-incremental marking only.
616   V8_INLINE void MarkRootObject(Root root, HeapObject obj);
617 
618   // Mark the heap roots and all objects reachable from them.
619   void MarkRoots(RootVisitor* root_visitor,
620                  ObjectVisitor* custom_root_body_visitor);
621 
622   // Marks object reachable from harmony weak maps and wrapper tracing.
623   void ProcessEphemeronMarking();
624 
625   // If the call-site of the top optimized code was not prepared for
626   // deoptimization, then treat embedded pointers in the code as strong as
627   // otherwise they can die and try to deoptimize the underlying code.
628   void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
629 
630   // Drains the main thread marking work list. Will mark all pending objects
631   // if no concurrent threads are running.
632   void DrainMarkingWorklist() override;
633 
634   // Implements ephemeron semantics: Marks value if key is already reachable.
635   // Returns true if value was actually marked.
636   bool ProcessEphemeron(HeapObject key, HeapObject value);
637 
638   // Marks ephemerons and drains marking worklist iteratively
639   // until a fixpoint is reached.
640   void ProcessEphemeronsUntilFixpoint();
641 
642   // Drains ephemeron and marking worklists. Single iteration of the
643   // fixpoint iteration.
644   bool ProcessEphemerons();
645 
646   // Mark ephemerons and drain marking worklist with a linear algorithm.
647   // Only used if fixpoint iteration doesn't finish within a few iterations.
648   void ProcessEphemeronsLinear();
649 
650   // Perform Wrapper Tracing if in use.
651   void PerformWrapperTracing();
652 
653   // Callback function for telling whether the object *p is an unmarked
654   // heap object.
655   static bool IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p);
656 
657   // Clear non-live references in weak cells, transition and descriptor arrays,
658   // and deoptimize dependent code of non-live maps.
659   void ClearNonLiveReferences() override;
660   void MarkDependentCodeForDeoptimization();
661   // Checks if the given weak cell is a simple transition from the parent map
662   // of the given dead target. If so it clears the transition and trims
663   // the descriptor array of the parent if needed.
664   void ClearPotentialSimpleMapTransition(Map dead_target);
665   void ClearPotentialSimpleMapTransition(Map map, Map dead_target);
666 
667   // Flushes a weakly held bytecode array from a shared function info.
668   void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
669 
670   // Clears bytecode arrays that have not been executed for multiple
671   // collections.
672   void ClearOldBytecodeCandidates();
673 
674   // Resets any JSFunctions which have had their bytecode flushed.
675   void ClearFlushedJsFunctions();
676 
677   // Compact every array in the global list of transition arrays and
678   // trim the corresponding descriptor array if a transition target is non-live.
679   void ClearFullMapTransitions();
680   void TrimDescriptorArray(Map map, DescriptorArray descriptors);
681   void TrimEnumCache(Map map, DescriptorArray descriptors);
682   bool CompactTransitionArray(Map map, TransitionArray transitions,
683                               DescriptorArray descriptors);
684 
685   // After all reachable objects have been marked those weak map entries
686   // with an unreachable key are removed from all encountered weak maps.
687   // The linked list of all encountered weak maps is destroyed.
688   void ClearWeakCollections();
689 
690   // Goes through the list of encountered weak references and clears those with
691   // dead values. If the value is a dead map and the parent map transitions to
692   // the dead map via weak cell, then this function also clears the map
693   // transition.
694   void ClearWeakReferences();
695 
696   // Goes through the list of encountered JSWeakRefs and WeakCells and clears
697   // those with dead values.
698   void ClearJSWeakRefs();
699 
700   void AbortWeakObjects();
701 
702   // Starts sweeping of spaces by contributing on the main thread and setting
703   // up other pages for sweeping. Does not start sweeper tasks.
704   void StartSweepSpaces();
705   void StartSweepSpace(PagedSpace* space);
706 
707   void EvacuatePrologue() override;
708   void EvacuateEpilogue() override;
709   void Evacuate() override;
710   void EvacuatePagesInParallel() override;
711   void UpdatePointersAfterEvacuation() override;
712 
713   std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
714                                                           Address start,
715                                                           Address end) override;
716   std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
717       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
718 
719   void ReleaseEvacuationCandidates();
720   void PostProcessEvacuationCandidates();
721   void ReportAbortedEvacuationCandidate(HeapObject failed_object,
722                                         MemoryChunk* chunk);
723 
724   static const int kEphemeronChunkSize = 8 * KB;
725 
726   int NumberOfParallelEphemeronVisitingTasks(size_t elements);
727 
728   void RightTrimDescriptorArray(DescriptorArray array, int descriptors_to_trim);
729 
730   base::Mutex mutex_;
731   base::Semaphore page_parallel_job_semaphore_;
732 
733 #ifdef DEBUG
734   enum CollectorState{IDLE,
735                       PREPARE_GC,
736                       MARK_LIVE_OBJECTS,
737                       SWEEP_SPACES,
738                       ENCODE_FORWARDING_ADDRESSES,
739                       UPDATE_POINTERS,
740                       RELOCATE_OBJECTS};
741 
742   // The current stage of the collector.
743   CollectorState state_;
744 #endif
745 
746   bool was_marked_incrementally_;
747 
748   bool evacuation_;
749 
750   // True if we are collecting slots to perform evacuation from evacuation
751   // candidates.
752   bool compacting_;
753 
754   bool black_allocation_;
755 
756   bool have_code_to_deoptimize_;
757 
758   MarkingWorklists marking_worklists_;
759 
760   WeakObjects weak_objects_;
761   EphemeronMarking ephemeron_marking_;
762 
763   std::unique_ptr<MarkingVisitor> marking_visitor_;
764   std::unique_ptr<MarkingWorklists::Local> local_marking_worklists_;
765   NativeContextInferrer native_context_inferrer_;
766   NativeContextStats native_context_stats_;
767 
768   // Candidates for pages that should be evacuated.
769   std::vector<Page*> evacuation_candidates_;
770   // Pages that are actually processed during evacuation.
771   std::vector<Page*> old_space_evacuation_pages_;
772   std::vector<Page*> new_space_evacuation_pages_;
773   std::vector<std::pair<HeapObject, Page*>> aborted_evacuation_candidates_;
774 
775   Sweeper* sweeper_;
776 
777   MarkingState marking_state_;
778   NonAtomicMarkingState non_atomic_marking_state_;
779 
780   // Counts the number of major mark-compact collections. The counter is
781   // incremented right after marking. This is used for:
782   // - marking descriptor arrays. See NumberOfMarkedDescriptors. Only the lower
783   //   two bits are used, so it is okay if this counter overflows and wraps
784   //   around.
785   unsigned epoch_ = 0;
786 
787   friend class FullEvacuator;
788   friend class RecordMigratedSlotVisitor;
789 };
790 
791 class EvacuationScope {
792  public:
EvacuationScope(MarkCompactCollector * collector)793   explicit EvacuationScope(MarkCompactCollector* collector)
794       : collector_(collector) {
795     collector_->set_evacuation(true);
796   }
797 
~EvacuationScope()798   ~EvacuationScope() { collector_->set_evacuation(false); }
799 
800  private:
801   MarkCompactCollector* collector_;
802 };
803 
804 #ifdef ENABLE_MINOR_MC
805 
806 // Collector for young-generation only.
807 class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
808  public:
809   using MarkingState = MinorMarkingState;
810   using NonAtomicMarkingState = MinorNonAtomicMarkingState;
811 
812   explicit MinorMarkCompactCollector(Heap* heap);
813   ~MinorMarkCompactCollector() override;
814 
marking_state()815   MarkingState* marking_state() { return &marking_state_; }
816 
non_atomic_marking_state()817   NonAtomicMarkingState* non_atomic_marking_state() {
818     return &non_atomic_marking_state_;
819   }
820 
821   void SetUp() override;
822   void TearDown() override;
823   void CollectGarbage() override;
824 
825   void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
826                     FreeSpaceTreatmentMode free_space_mode);
827   void CleanupSweepToIteratePages();
828 
829  private:
830   using MarkingWorklist = Worklist<HeapObject, 64 /* segment size */>;
831   class RootMarkingVisitor;
832 
833   static const int kNumMarkers = 8;
834   static const int kMainMarker = 0;
835 
worklist()836   inline MarkingWorklist* worklist() { return worklist_; }
837 
main_marking_visitor()838   inline YoungGenerationMarkingVisitor* main_marking_visitor() {
839     return main_marking_visitor_;
840   }
841 
842   void MarkLiveObjects() override;
843   void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
844   V8_INLINE void MarkRootObject(HeapObject obj);
845   void DrainMarkingWorklist() override;
846   void TraceFragmentation();
847   void ClearNonLiveReferences() override;
848 
849   void EvacuatePrologue() override;
850   void EvacuateEpilogue() override;
851   void Evacuate() override;
852   void EvacuatePagesInParallel() override;
853   void UpdatePointersAfterEvacuation() override;
854 
855   std::unique_ptr<UpdatingItem> CreateToSpaceUpdatingItem(MemoryChunk* chunk,
856                                                           Address start,
857                                                           Address end) override;
858   std::unique_ptr<UpdatingItem> CreateRememberedSetUpdatingItem(
859       MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
860 
861   void SweepArrayBufferExtensions();
862 
863   MarkingWorklist* worklist_;
864 
865   YoungGenerationMarkingVisitor* main_marking_visitor_;
866   base::Semaphore page_parallel_job_semaphore_;
867   std::vector<Page*> new_space_evacuation_pages_;
868   std::vector<Page*> sweep_to_iterate_pages_;
869 
870   MarkingState marking_state_;
871   NonAtomicMarkingState non_atomic_marking_state_;
872 
873   friend class YoungGenerationMarkingTask;
874   friend class YoungGenerationMarkingJob;
875   friend class YoungGenerationMarkingVisitor;
876 };
877 
878 #endif  // ENABLE_MINOR_MC
879 
880 }  // namespace internal
881 }  // namespace v8
882 
883 #endif  // V8_HEAP_MARK_COMPACT_H_
884