• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/mark-compact.h"
6 
7 #include <unordered_map>
8 
9 #include "src/base/optional.h"
10 #include "src/base/utils/random-number-generator.h"
11 #include "src/codegen/compilation-cache.h"
12 #include "src/deoptimizer/deoptimizer.h"
13 #include "src/execution/execution.h"
14 #include "src/execution/frames-inl.h"
15 #include "src/execution/isolate-utils-inl.h"
16 #include "src/execution/isolate-utils.h"
17 #include "src/execution/vm-state-inl.h"
18 #include "src/handles/global-handles.h"
19 #include "src/heap/array-buffer-sweeper.h"
20 #include "src/heap/code-object-registry.h"
21 #include "src/heap/gc-tracer.h"
22 #include "src/heap/incremental-marking-inl.h"
23 #include "src/heap/index-generator.h"
24 #include "src/heap/invalidated-slots-inl.h"
25 #include "src/heap/item-parallel-job.h"
26 #include "src/heap/large-spaces.h"
27 #include "src/heap/local-allocator-inl.h"
28 #include "src/heap/mark-compact-inl.h"
29 #include "src/heap/marking-barrier.h"
30 #include "src/heap/marking-visitor-inl.h"
31 #include "src/heap/marking-visitor.h"
32 #include "src/heap/memory-measurement-inl.h"
33 #include "src/heap/memory-measurement.h"
34 #include "src/heap/object-stats.h"
35 #include "src/heap/objects-visiting-inl.h"
36 #include "src/heap/parallel-work-item.h"
37 #include "src/heap/read-only-heap.h"
38 #include "src/heap/read-only-spaces.h"
39 #include "src/heap/safepoint.h"
40 #include "src/heap/spaces-inl.h"
41 #include "src/heap/sweeper.h"
42 #include "src/heap/worklist.h"
43 #include "src/ic/stub-cache.h"
44 #include "src/init/v8.h"
45 #include "src/objects/embedder-data-array-inl.h"
46 #include "src/objects/foreign.h"
47 #include "src/objects/hash-table-inl.h"
48 #include "src/objects/js-objects-inl.h"
49 #include "src/objects/maybe-object.h"
50 #include "src/objects/slots-inl.h"
51 #include "src/objects/transitions-inl.h"
52 #include "src/tasks/cancelable-task.h"
53 #include "src/utils/utils-inl.h"
54 
55 namespace v8 {
56 namespace internal {
57 
58 const char* Marking::kWhiteBitPattern = "00";
59 const char* Marking::kBlackBitPattern = "11";
60 const char* Marking::kGreyBitPattern = "10";
61 const char* Marking::kImpossibleBitPattern = "01";
62 
63 // The following has to hold in order for {MarkingState::MarkBitFrom} to not
64 // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
65 STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
66 
67 // =============================================================================
68 // Verifiers
69 // =============================================================================
70 
71 #ifdef VERIFY_HEAP
72 namespace {
73 
74 class MarkingVerifier : public ObjectVisitor, public RootVisitor {
75  public:
76   virtual void Run() = 0;
77 
78  protected:
MarkingVerifier(Heap * heap)79   explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
80 
81   virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
82       const MemoryChunk* chunk) = 0;
83 
84   virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
85   virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
86   virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
87 
88   virtual bool IsMarked(HeapObject object) = 0;
89 
90   virtual bool IsBlackOrGrey(HeapObject object) = 0;
91 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)92   void VisitPointers(HeapObject host, ObjectSlot start,
93                      ObjectSlot end) override {
94     VerifyPointers(start, end);
95   }
96 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)97   void VisitPointers(HeapObject host, MaybeObjectSlot start,
98                      MaybeObjectSlot end) override {
99     VerifyPointers(start, end);
100   }
101 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)102   void VisitRootPointers(Root root, const char* description,
103                          FullObjectSlot start, FullObjectSlot end) override {
104     VerifyRootPointers(start, end);
105   }
106 
107   void VerifyRoots();
108   void VerifyMarkingOnPage(const Page* page, Address start, Address end);
109   void VerifyMarking(NewSpace* new_space);
110   void VerifyMarking(PagedSpace* paged_space);
111   void VerifyMarking(LargeObjectSpace* lo_space);
112 
113   Heap* heap_;
114 };
115 
VerifyRoots()116 void MarkingVerifier::VerifyRoots() {
117   heap_->IterateRoots(this, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
118 }
119 
VerifyMarkingOnPage(const Page * page,Address start,Address end)120 void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
121                                           Address end) {
122   Address next_object_must_be_here_or_later = start;
123 
124   for (auto object_and_size :
125        LiveObjectRange<kAllLiveObjects>(page, bitmap(page))) {
126     HeapObject object = object_and_size.first;
127     size_t size = object_and_size.second;
128     Address current = object.address();
129     if (current < start) continue;
130     if (current >= end) break;
131     CHECK(IsMarked(object));
132     CHECK(current >= next_object_must_be_here_or_later);
133     object.Iterate(this);
134     next_object_must_be_here_or_later = current + size;
135     // The object is either part of a black area of black allocation or a
136     // regular black object
137     CHECK(bitmap(page)->AllBitsSetInRange(
138               page->AddressToMarkbitIndex(current),
139               page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
140           bitmap(page)->AllBitsClearInRange(
141               page->AddressToMarkbitIndex(current + kTaggedSize * 2),
142               page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
143     current = next_object_must_be_here_or_later;
144   }
145 }
146 
VerifyMarking(NewSpace * space)147 void MarkingVerifier::VerifyMarking(NewSpace* space) {
148   Address end = space->top();
149   // The bottom position is at the start of its page. Allows us to use
150   // page->area_start() as start of range on all pages.
151   CHECK_EQ(space->first_allocatable_address(),
152            space->first_page()->area_start());
153 
154   PageRange range(space->first_allocatable_address(), end);
155   for (auto it = range.begin(); it != range.end();) {
156     Page* page = *(it++);
157     Address limit = it != range.end() ? page->area_end() : end;
158     CHECK(limit == end || !page->Contains(end));
159     VerifyMarkingOnPage(page, page->area_start(), limit);
160   }
161 }
162 
VerifyMarking(PagedSpace * space)163 void MarkingVerifier::VerifyMarking(PagedSpace* space) {
164   for (Page* p : *space) {
165     VerifyMarkingOnPage(p, p->area_start(), p->area_end());
166   }
167 }
168 
VerifyMarking(LargeObjectSpace * lo_space)169 void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
170   LargeObjectSpaceObjectIterator it(lo_space);
171   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
172     if (IsBlackOrGrey(obj)) {
173       obj.Iterate(this);
174     }
175   }
176 }
177 
178 class FullMarkingVerifier : public MarkingVerifier {
179  public:
FullMarkingVerifier(Heap * heap)180   explicit FullMarkingVerifier(Heap* heap)
181       : MarkingVerifier(heap),
182         marking_state_(
183             heap->mark_compact_collector()->non_atomic_marking_state()) {}
184 
Run()185   void Run() override {
186     VerifyRoots();
187     VerifyMarking(heap_->new_space());
188     VerifyMarking(heap_->new_lo_space());
189     VerifyMarking(heap_->old_space());
190     VerifyMarking(heap_->code_space());
191     VerifyMarking(heap_->map_space());
192     VerifyMarking(heap_->lo_space());
193     VerifyMarking(heap_->code_lo_space());
194   }
195 
196  protected:
bitmap(const MemoryChunk * chunk)197   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
198       const MemoryChunk* chunk) override {
199     return marking_state_->bitmap(chunk);
200   }
201 
IsMarked(HeapObject object)202   bool IsMarked(HeapObject object) override {
203     return marking_state_->IsBlack(object);
204   }
205 
IsBlackOrGrey(HeapObject object)206   bool IsBlackOrGrey(HeapObject object) override {
207     return marking_state_->IsBlackOrGrey(object);
208   }
209 
VerifyPointers(ObjectSlot start,ObjectSlot end)210   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
211     VerifyPointersImpl(start, end);
212   }
213 
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)214   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
215     VerifyPointersImpl(start, end);
216   }
217 
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)218   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
219     VerifyPointersImpl(start, end);
220   }
221 
VisitCodeTarget(Code host,RelocInfo * rinfo)222   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
223     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
224     VerifyHeapObjectImpl(target);
225   }
226 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)227   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
228     DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
229     if (!host.IsWeakObject(rinfo->target_object())) {
230       HeapObject object = rinfo->target_object();
231       VerifyHeapObjectImpl(object);
232     }
233   }
234 
235  private:
VerifyHeapObjectImpl(HeapObject heap_object)236   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
237     CHECK(marking_state_->IsBlackOrGrey(heap_object));
238   }
239 
240   template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)241   V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
242     for (TSlot slot = start; slot < end; ++slot) {
243       typename TSlot::TObject object = *slot;
244       HeapObject heap_object;
245       if (object.GetHeapObjectIfStrong(&heap_object)) {
246         VerifyHeapObjectImpl(heap_object);
247       }
248     }
249   }
250 
251   MarkCompactCollector::NonAtomicMarkingState* marking_state_;
252 };
253 
254 class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
255  public:
256   virtual void Run() = 0;
257 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)258   void VisitPointers(HeapObject host, ObjectSlot start,
259                      ObjectSlot end) override {
260     VerifyPointers(start, end);
261   }
262 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)263   void VisitPointers(HeapObject host, MaybeObjectSlot start,
264                      MaybeObjectSlot end) override {
265     VerifyPointers(start, end);
266   }
267 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)268   void VisitRootPointers(Root root, const char* description,
269                          FullObjectSlot start, FullObjectSlot end) override {
270     VerifyRootPointers(start, end);
271   }
272 
273  protected:
EvacuationVerifier(Heap * heap)274   explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
275 
heap()276   inline Heap* heap() { return heap_; }
277 
278   virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
279   virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
280   virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
281 
282   void VerifyRoots();
283   void VerifyEvacuationOnPage(Address start, Address end);
284   void VerifyEvacuation(NewSpace* new_space);
285   void VerifyEvacuation(PagedSpace* paged_space);
286 
287   Heap* heap_;
288 };
289 
VerifyRoots()290 void EvacuationVerifier::VerifyRoots() {
291   heap_->IterateRoots(this, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
292 }
293 
VerifyEvacuationOnPage(Address start,Address end)294 void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
295   Address current = start;
296   while (current < end) {
297     HeapObject object = HeapObject::FromAddress(current);
298     if (!object.IsFreeSpaceOrFiller()) object.Iterate(this);
299     current += object.Size();
300   }
301 }
302 
VerifyEvacuation(NewSpace * space)303 void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
304   PageRange range(space->first_allocatable_address(), space->top());
305   for (auto it = range.begin(); it != range.end();) {
306     Page* page = *(it++);
307     Address current = page->area_start();
308     Address limit = it != range.end() ? page->area_end() : space->top();
309     CHECK(limit == space->top() || !page->Contains(space->top()));
310     VerifyEvacuationOnPage(current, limit);
311   }
312 }
313 
VerifyEvacuation(PagedSpace * space)314 void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
315   for (Page* p : *space) {
316     if (p->IsEvacuationCandidate()) continue;
317     if (p->Contains(space->top())) {
318       CodePageMemoryModificationScope memory_modification_scope(p);
319       heap_->CreateFillerObjectAt(
320           space->top(), static_cast<int>(space->limit() - space->top()),
321           ClearRecordedSlots::kNo);
322     }
323     VerifyEvacuationOnPage(p->area_start(), p->area_end());
324   }
325 }
326 
327 class FullEvacuationVerifier : public EvacuationVerifier {
328  public:
FullEvacuationVerifier(Heap * heap)329   explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
330 
Run()331   void Run() override {
332     VerifyRoots();
333     VerifyEvacuation(heap_->new_space());
334     VerifyEvacuation(heap_->old_space());
335     VerifyEvacuation(heap_->code_space());
336     VerifyEvacuation(heap_->map_space());
337   }
338 
339  protected:
VerifyHeapObjectImpl(HeapObject heap_object)340   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
341     CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
342                   Heap::InToPage(heap_object));
343     CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
344   }
345 
346   template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)347   void VerifyPointersImpl(TSlot start, TSlot end) {
348     for (TSlot current = start; current < end; ++current) {
349       typename TSlot::TObject object = *current;
350       HeapObject heap_object;
351       if (object.GetHeapObjectIfStrong(&heap_object)) {
352         VerifyHeapObjectImpl(heap_object);
353       }
354     }
355   }
356 
VerifyPointers(ObjectSlot start,ObjectSlot end)357   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
358     VerifyPointersImpl(start, end);
359   }
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)360   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
361     VerifyPointersImpl(start, end);
362   }
VisitCodeTarget(Code host,RelocInfo * rinfo)363   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
364     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
365     VerifyHeapObjectImpl(target);
366   }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)367   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
368     VerifyHeapObjectImpl(rinfo->target_object());
369   }
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)370   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
371     VerifyPointersImpl(start, end);
372   }
373 };
374 
375 }  // namespace
376 #endif  // VERIFY_HEAP
377 
378 // =============================================================================
379 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
380 // =============================================================================
381 
382 namespace {
383 
NumberOfAvailableCores()384 int NumberOfAvailableCores() {
385   static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
386   // This number of cores should be greater than zero and never change.
387   DCHECK_GE(num_cores, 1);
388   DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
389   return num_cores;
390 }
391 
392 }  // namespace
393 
NumberOfParallelCompactionTasks()394 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks() {
395   int tasks = FLAG_parallel_compaction ? NumberOfAvailableCores() : 1;
396   if (!heap_->CanPromoteYoungAndExpandOldGeneration(
397           static_cast<size_t>(tasks * Page::kPageSize))) {
398     // Optimize for memory usage near the heap limit.
399     tasks = 1;
400   }
401   return tasks;
402 }
403 
MarkCompactCollector(Heap * heap)404 MarkCompactCollector::MarkCompactCollector(Heap* heap)
405     : MarkCompactCollectorBase(heap),
406       page_parallel_job_semaphore_(0),
407 #ifdef DEBUG
408       state_(IDLE),
409 #endif
410       was_marked_incrementally_(false),
411       evacuation_(false),
412       compacting_(false),
413       black_allocation_(false),
414       have_code_to_deoptimize_(false),
415       sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
416   old_to_new_slots_ = -1;
417 }
418 
~MarkCompactCollector()419 MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
420 
SetUp()421 void MarkCompactCollector::SetUp() {
422   DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
423   DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
424   DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
425   DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
426 }
427 
TearDown()428 void MarkCompactCollector::TearDown() {
429   AbortCompaction();
430   AbortWeakObjects();
431   if (heap()->incremental_marking()->IsMarking()) {
432     local_marking_worklists()->Publish();
433     heap()->marking_barrier()->Publish();
434     // Marking barriers of LocalHeaps will be published in their destructors.
435     marking_worklists()->Clear();
436   }
437 }
438 
AddEvacuationCandidate(Page * p)439 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
440   DCHECK(!p->NeverEvacuate());
441 
442   if (FLAG_trace_evacuation_candidates) {
443     PrintIsolate(
444         isolate(),
445         "Evacuation candidate: Free bytes: %6zu. Free Lists length: %4d.\n",
446         p->area_size() - p->allocated_bytes(), p->FreeListsLength());
447   }
448 
449   p->MarkEvacuationCandidate();
450   evacuation_candidates_.push_back(p);
451 }
452 
TraceFragmentation(PagedSpace * space)453 static void TraceFragmentation(PagedSpace* space) {
454   int number_of_pages = space->CountTotalPages();
455   intptr_t reserved = (number_of_pages * space->AreaSize());
456   intptr_t free = reserved - space->SizeOfObjects();
457   PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
458          static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
459 }
460 
StartCompaction()461 bool MarkCompactCollector::StartCompaction() {
462   if (!compacting_) {
463     DCHECK(evacuation_candidates_.empty());
464 
465     if (FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())
466       return false;
467 
468     CollectEvacuationCandidates(heap()->old_space());
469 
470     if (FLAG_compact_code_space) {
471       CollectEvacuationCandidates(heap()->code_space());
472     } else if (FLAG_trace_fragmentation) {
473       TraceFragmentation(heap()->code_space());
474     }
475 
476     if (FLAG_trace_fragmentation) {
477       TraceFragmentation(heap()->map_space());
478     }
479 
480     compacting_ = !evacuation_candidates_.empty();
481   }
482 
483   return compacting_;
484 }
485 
StartMarking()486 void MarkCompactCollector::StartMarking() {
487   std::vector<Address> contexts =
488       heap()->memory_measurement()->StartProcessing();
489   if (FLAG_stress_per_context_marking_worklist) {
490     contexts.clear();
491     HandleScope handle_scope(heap()->isolate());
492     for (auto context : heap()->FindAllNativeContexts()) {
493       contexts.push_back(context->ptr());
494     }
495   }
496   marking_worklists()->CreateContextWorklists(contexts);
497   local_marking_worklists_ =
498       std::make_unique<MarkingWorklists::Local>(marking_worklists());
499   marking_visitor_ = std::make_unique<MarkingVisitor>(
500       marking_state(), local_marking_worklists(), weak_objects(), heap_,
501       epoch(), Heap::GetBytecodeFlushMode(),
502       heap_->local_embedder_heap_tracer()->InUse(),
503       heap_->is_current_gc_forced());
504 // Marking bits are cleared by the sweeper.
505 #ifdef VERIFY_HEAP
506   if (FLAG_verify_heap) {
507     VerifyMarkbitsAreClean();
508   }
509 #endif
510 }
511 
CollectGarbage()512 void MarkCompactCollector::CollectGarbage() {
513   // Make sure that Prepare() has been called. The individual steps below will
514   // update the state as they proceed.
515   DCHECK(state_ == PREPARE_GC);
516 
517 #ifdef ENABLE_MINOR_MC
518   heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
519 #endif  // ENABLE_MINOR_MC
520 
521   MarkLiveObjects();
522   ClearNonLiveReferences();
523   VerifyMarking();
524   heap()->memory_measurement()->FinishProcessing(native_context_stats_);
525   RecordObjectStats();
526 
527   StartSweepSpaces();
528   Evacuate();
529   Finish();
530 }
531 
532 #ifdef VERIFY_HEAP
VerifyMarkbitsAreDirty(ReadOnlySpace * space)533 void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
534   ReadOnlyHeapObjectIterator iterator(space);
535   for (HeapObject object = iterator.Next(); !object.is_null();
536        object = iterator.Next()) {
537     CHECK(non_atomic_marking_state()->IsBlack(object));
538   }
539 }
540 
VerifyMarkbitsAreClean(PagedSpace * space)541 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
542   for (Page* p : *space) {
543     CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
544     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
545   }
546 }
547 
VerifyMarkbitsAreClean(NewSpace * space)548 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
549   for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
550     CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
551     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
552   }
553 }
554 
VerifyMarkbitsAreClean(LargeObjectSpace * space)555 void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
556   LargeObjectSpaceObjectIterator it(space);
557   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
558     CHECK(non_atomic_marking_state()->IsWhite(obj));
559     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
560                     MemoryChunk::FromHeapObject(obj)));
561   }
562 }
563 
VerifyMarkbitsAreClean()564 void MarkCompactCollector::VerifyMarkbitsAreClean() {
565   VerifyMarkbitsAreClean(heap_->old_space());
566   VerifyMarkbitsAreClean(heap_->code_space());
567   VerifyMarkbitsAreClean(heap_->map_space());
568   VerifyMarkbitsAreClean(heap_->new_space());
569   // Read-only space should always be black since we never collect any objects
570   // in it or linked from it.
571   VerifyMarkbitsAreDirty(heap_->read_only_space());
572   VerifyMarkbitsAreClean(heap_->lo_space());
573   VerifyMarkbitsAreClean(heap_->code_lo_space());
574   VerifyMarkbitsAreClean(heap_->new_lo_space());
575 }
576 
577 #endif  // VERIFY_HEAP
578 
EnsureSweepingCompleted()579 void MarkCompactCollector::EnsureSweepingCompleted() {
580   if (!sweeper()->sweeping_in_progress()) return;
581 
582   sweeper()->EnsureCompleted();
583   heap()->old_space()->RefillFreeList();
584   heap()->code_space()->RefillFreeList();
585   heap()->map_space()->RefillFreeList();
586   heap()->map_space()->SortFreeList();
587 
588   heap()->tracer()->NotifySweepingCompleted();
589 
590 #ifdef VERIFY_HEAP
591   if (FLAG_verify_heap && !evacuation()) {
592     FullEvacuationVerifier verifier(heap());
593     verifier.Run();
594   }
595 #endif
596 }
597 
DrainSweepingWorklists()598 void MarkCompactCollector::DrainSweepingWorklists() {
599   if (!sweeper()->sweeping_in_progress()) return;
600   sweeper()->DrainSweepingWorklists();
601 }
602 
DrainSweepingWorklistForSpace(AllocationSpace space)603 void MarkCompactCollector::DrainSweepingWorklistForSpace(
604     AllocationSpace space) {
605   if (!sweeper()->sweeping_in_progress()) return;
606   sweeper()->DrainSweepingWorklistForSpace(space);
607 }
608 
ComputeEvacuationHeuristics(size_t area_size,int * target_fragmentation_percent,size_t * max_evacuated_bytes)609 void MarkCompactCollector::ComputeEvacuationHeuristics(
610     size_t area_size, int* target_fragmentation_percent,
611     size_t* max_evacuated_bytes) {
612   // For memory reducing and optimize for memory mode we directly define both
613   // constants.
614   const int kTargetFragmentationPercentForReduceMemory = 20;
615   const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
616   const int kTargetFragmentationPercentForOptimizeMemory = 20;
617   const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
618 
619   // For regular mode (which is latency critical) we define less aggressive
620   // defaults to start and switch to a trace-based (using compaction speed)
621   // approach as soon as we have enough samples.
622   const int kTargetFragmentationPercent = 70;
623   const size_t kMaxEvacuatedBytes = 4 * MB;
624   // Time to take for a single area (=payload of page). Used as soon as there
625   // exist enough compaction speed samples.
626   const float kTargetMsPerArea = .5;
627 
628   if (heap()->ShouldReduceMemory()) {
629     *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
630     *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
631   } else if (heap()->ShouldOptimizeForMemoryUsage()) {
632     *target_fragmentation_percent =
633         kTargetFragmentationPercentForOptimizeMemory;
634     *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
635   } else {
636     const double estimated_compaction_speed =
637         heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
638     if (estimated_compaction_speed != 0) {
639       // Estimate the target fragmentation based on traced compaction speed
640       // and a goal for a single page.
641       const double estimated_ms_per_area =
642           1 + area_size / estimated_compaction_speed;
643       *target_fragmentation_percent = static_cast<int>(
644           100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
645       if (*target_fragmentation_percent <
646           kTargetFragmentationPercentForReduceMemory) {
647         *target_fragmentation_percent =
648             kTargetFragmentationPercentForReduceMemory;
649       }
650     } else {
651       *target_fragmentation_percent = kTargetFragmentationPercent;
652     }
653     *max_evacuated_bytes = kMaxEvacuatedBytes;
654   }
655 }
656 
CollectEvacuationCandidates(PagedSpace * space)657 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
658   DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
659 
660   int number_of_pages = space->CountTotalPages();
661   size_t area_size = space->AreaSize();
662 
663   const bool in_standard_path =
664       !(FLAG_manual_evacuation_candidates_selection ||
665         FLAG_stress_compaction_random || FLAG_stress_compaction ||
666         FLAG_always_compact);
667   // Those variables will only be initialized if |in_standard_path|, and are not
668   // used otherwise.
669   size_t max_evacuated_bytes;
670   int target_fragmentation_percent;
671   size_t free_bytes_threshold;
672   if (in_standard_path) {
673     // We use two conditions to decide whether a page qualifies as an evacuation
674     // candidate, or not:
675     // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
676     //   between live bytes and capacity of this page (= area).
677     // * Evacuation quota: A global quota determining how much bytes should be
678     //   compacted.
679     ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
680                                 &max_evacuated_bytes);
681     free_bytes_threshold = target_fragmentation_percent * (area_size / 100);
682   }
683 
684   // Pairs of (live_bytes_in_page, page).
685   using LiveBytesPagePair = std::pair<size_t, Page*>;
686   std::vector<LiveBytesPagePair> pages;
687   pages.reserve(number_of_pages);
688 
689   DCHECK(!sweeping_in_progress());
690   Page* owner_of_linear_allocation_area =
691       space->top() == space->limit()
692           ? nullptr
693           : Page::FromAllocationAreaAddress(space->top());
694   for (Page* p : *space) {
695     if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
696         !p->CanAllocate())
697       continue;
698 
699     if (p->IsPinned()) {
700       DCHECK(
701           !p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING));
702       continue;
703     }
704 
705     // Invariant: Evacuation candidates are just created when marking is
706     // started. This means that sweeping has finished. Furthermore, at the end
707     // of a GC all evacuation candidates are cleared and their slot buffers are
708     // released.
709     CHECK(!p->IsEvacuationCandidate());
710     CHECK_NULL(p->slot_set<OLD_TO_OLD>());
711     CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
712     CHECK(p->SweepingDone());
713     DCHECK(p->area_size() == area_size);
714     if (in_standard_path) {
715       // Only the pages with at more than |free_bytes_threshold| free bytes are
716       // considered for evacuation.
717       if (area_size - p->allocated_bytes() >= free_bytes_threshold) {
718         pages.push_back(std::make_pair(p->allocated_bytes(), p));
719       }
720     } else {
721       pages.push_back(std::make_pair(p->allocated_bytes(), p));
722     }
723 
724     // Unpin pages for the next GC
725     if (p->IsFlagSet(MemoryChunk::PINNED)) {
726       p->ClearFlag(MemoryChunk::PINNED);
727     }
728   }
729 
730   int candidate_count = 0;
731   size_t total_live_bytes = 0;
732 
733   const bool reduce_memory = heap()->ShouldReduceMemory();
734   if (FLAG_manual_evacuation_candidates_selection) {
735     for (size_t i = 0; i < pages.size(); i++) {
736       Page* p = pages[i].second;
737       if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
738         candidate_count++;
739         total_live_bytes += pages[i].first;
740         p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
741         AddEvacuationCandidate(p);
742       }
743     }
744   } else if (FLAG_stress_compaction_random) {
745     double fraction = isolate()->fuzzer_rng()->NextDouble();
746     size_t pages_to_mark_count =
747         static_cast<size_t>(fraction * (pages.size() + 1));
748     for (uint64_t i : isolate()->fuzzer_rng()->NextSample(
749              pages.size(), pages_to_mark_count)) {
750       candidate_count++;
751       total_live_bytes += pages[i].first;
752       AddEvacuationCandidate(pages[i].second);
753     }
754   } else if (FLAG_stress_compaction) {
755     for (size_t i = 0; i < pages.size(); i++) {
756       Page* p = pages[i].second;
757       if (i % 2 == 0) {
758         candidate_count++;
759         total_live_bytes += pages[i].first;
760         AddEvacuationCandidate(p);
761       }
762     }
763   } else {
764     // The following approach determines the pages that should be evacuated.
765     //
766     // Sort pages from the most free to the least free, then select
767     // the first n pages for evacuation such that:
768     // - the total size of evacuated objects does not exceed the specified
769     // limit.
770     // - fragmentation of (n+1)-th page does not exceed the specified limit.
771     std::sort(pages.begin(), pages.end(),
772               [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
773                 return a.first < b.first;
774               });
775     for (size_t i = 0; i < pages.size(); i++) {
776       size_t live_bytes = pages[i].first;
777       DCHECK_GE(area_size, live_bytes);
778       if (FLAG_always_compact ||
779           ((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
780         candidate_count++;
781         total_live_bytes += live_bytes;
782       }
783       if (FLAG_trace_fragmentation_verbose) {
784         PrintIsolate(isolate(),
785                      "compaction-selection-page: space=%s free_bytes_page=%zu "
786                      "fragmentation_limit_kb=%zu "
787                      "fragmentation_limit_percent=%d sum_compaction_kb=%zu "
788                      "compaction_limit_kb=%zu\n",
789                      space->name(), (area_size - live_bytes) / KB,
790                      free_bytes_threshold / KB, target_fragmentation_percent,
791                      total_live_bytes / KB, max_evacuated_bytes / KB);
792       }
793     }
794     // How many pages we will allocated for the evacuated objects
795     // in the worst case: ceil(total_live_bytes / area_size)
796     int estimated_new_pages =
797         static_cast<int>((total_live_bytes + area_size - 1) / area_size);
798     DCHECK_LE(estimated_new_pages, candidate_count);
799     int estimated_released_pages = candidate_count - estimated_new_pages;
800     // Avoid (compact -> expand) cycles.
801     if ((estimated_released_pages == 0) && !FLAG_always_compact) {
802       candidate_count = 0;
803     }
804     for (int i = 0; i < candidate_count; i++) {
805       AddEvacuationCandidate(pages[i].second);
806     }
807   }
808 
809   if (FLAG_trace_fragmentation) {
810     PrintIsolate(isolate(),
811                  "compaction-selection: space=%s reduce_memory=%d pages=%d "
812                  "total_live_bytes=%zu\n",
813                  space->name(), reduce_memory, candidate_count,
814                  total_live_bytes / KB);
815   }
816 }
817 
AbortCompaction()818 void MarkCompactCollector::AbortCompaction() {
819   if (compacting_) {
820     RememberedSet<OLD_TO_OLD>::ClearAll(heap());
821     for (Page* p : evacuation_candidates_) {
822       p->ClearEvacuationCandidate();
823     }
824     compacting_ = false;
825     evacuation_candidates_.clear();
826   }
827   DCHECK(evacuation_candidates_.empty());
828 }
829 
Prepare()830 void MarkCompactCollector::Prepare() {
831   was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
832 
833 #ifdef DEBUG
834   DCHECK(state_ == IDLE);
835   state_ = PREPARE_GC;
836 #endif
837 
838   DCHECK(!FLAG_never_compact || !FLAG_always_compact);
839 
840   // Instead of waiting we could also abort the sweeper threads here.
841   EnsureSweepingCompleted();
842 
843   {
844     TRACE_GC(heap()->tracer(),
845              GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
846     heap_->array_buffer_sweeper()->EnsureFinished();
847   }
848 
849   if (heap()->incremental_marking()->IsSweeping()) {
850     heap()->incremental_marking()->Stop();
851   }
852 
853   if (!was_marked_incrementally_) {
854     {
855       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
856       heap_->local_embedder_heap_tracer()->TracePrologue(
857           heap_->flags_for_embedder_tracer());
858     }
859     if (!FLAG_never_compact) {
860       StartCompaction();
861     }
862     StartMarking();
863   }
864 
865   PagedSpaceIterator spaces(heap());
866   for (PagedSpace* space = spaces.Next(); space != nullptr;
867        space = spaces.Next()) {
868     space->PrepareForMarkCompact();
869   }
870 
871   if (FLAG_local_heaps) {
872     // Fill and reset all background thread LABs
873     heap_->safepoint()->IterateLocalHeaps(
874         [](LocalHeap* local_heap) { local_heap->FreeLinearAllocationArea(); });
875   }
876 
877   // All objects are guaranteed to be initialized in atomic pause
878   heap()->new_lo_space()->ResetPendingObject();
879   DCHECK_EQ(heap()->new_space()->top(),
880             heap()->new_space()->original_top_acquire());
881 }
882 
FinishConcurrentMarking()883 void MarkCompactCollector::FinishConcurrentMarking() {
884   // FinishConcurrentMarking is called for both, concurrent and parallel,
885   // marking. It is safe to call this function when tasks are already finished.
886   if (FLAG_parallel_marking || FLAG_concurrent_marking) {
887     heap()->concurrent_marking()->Join();
888     heap()->concurrent_marking()->FlushMemoryChunkData(
889         non_atomic_marking_state());
890     heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
891   }
892 }
893 
VerifyMarking()894 void MarkCompactCollector::VerifyMarking() {
895   CHECK(local_marking_worklists()->IsEmpty());
896   DCHECK(heap_->incremental_marking()->IsStopped());
897 #ifdef VERIFY_HEAP
898   if (FLAG_verify_heap) {
899     FullMarkingVerifier verifier(heap());
900     verifier.Run();
901   }
902 #endif
903 #ifdef VERIFY_HEAP
904   if (FLAG_verify_heap) {
905     heap()->old_space()->VerifyLiveBytes();
906     heap()->map_space()->VerifyLiveBytes();
907     heap()->code_space()->VerifyLiveBytes();
908   }
909 #endif
910 }
911 
Finish()912 void MarkCompactCollector::Finish() {
913   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
914 
915   SweepArrayBufferExtensions();
916 
917 #ifdef DEBUG
918   heap()->VerifyCountersBeforeConcurrentSweeping();
919 #endif
920 
921   marking_visitor_.reset();
922   local_marking_worklists_.reset();
923   marking_worklists_.ReleaseContextWorklists();
924   native_context_stats_.Clear();
925 
926   CHECK(weak_objects_.current_ephemerons.IsEmpty());
927   CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
928   weak_objects_.next_ephemerons.Clear();
929 
930   sweeper()->StartSweeperTasks();
931   sweeper()->StartIterabilityTasks();
932 
933   // Clear the marking state of live large objects.
934   heap_->lo_space()->ClearMarkingStateOfLiveObjects();
935   heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
936 
937 #ifdef DEBUG
938   DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
939   state_ = IDLE;
940 #endif
941   heap_->isolate()->inner_pointer_to_code_cache()->Flush();
942 
943   // The stub caches are not traversed during GC; clear them to force
944   // their lazy re-initialization. This must be done after the
945   // GC, because it relies on the new address of certain old space
946   // objects (empty string, illegal builtin).
947   isolate()->load_stub_cache()->Clear();
948   isolate()->store_stub_cache()->Clear();
949 
950   if (have_code_to_deoptimize_) {
951     // Some code objects were marked for deoptimization during the GC.
952     Deoptimizer::DeoptimizeMarkedCode(isolate());
953     have_code_to_deoptimize_ = false;
954   }
955 }
956 
SweepArrayBufferExtensions()957 void MarkCompactCollector::SweepArrayBufferExtensions() {
958   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS);
959   heap_->array_buffer_sweeper()->RequestSweepFull();
960 }
961 
962 class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
963  public:
RootMarkingVisitor(MarkCompactCollector * collector)964   explicit RootMarkingVisitor(MarkCompactCollector* collector)
965       : collector_(collector) {}
966 
VisitRootPointer(Root root,const char * description,FullObjectSlot p)967   void VisitRootPointer(Root root, const char* description,
968                         FullObjectSlot p) final {
969     MarkObjectByPointer(root, p);
970   }
971 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)972   void VisitRootPointers(Root root, const char* description,
973                          FullObjectSlot start, FullObjectSlot end) final {
974     for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
975   }
976 
977  private:
MarkObjectByPointer(Root root,FullObjectSlot p)978   V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
979     if (!(*p).IsHeapObject()) return;
980 
981     collector_->MarkRootObject(root, HeapObject::cast(*p));
982   }
983 
984   MarkCompactCollector* const collector_;
985 };
986 
987 // This visitor is used to visit the body of special objects held alive by
988 // other roots.
989 //
990 // It is currently used for
991 // - Code held alive by the top optimized frame. This code cannot be deoptimized
992 // and thus have to be kept alive in an isolate way, i.e., it should not keep
993 // alive other code objects reachable through the weak list but they should
994 // keep alive its embedded pointers (which would otherwise be dropped).
995 // - Prefix of the string table.
996 class MarkCompactCollector::CustomRootBodyMarkingVisitor final
997     : public ObjectVisitor {
998  public:
CustomRootBodyMarkingVisitor(MarkCompactCollector * collector)999   explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
1000       : collector_(collector) {}
1001 
VisitPointer(HeapObject host,ObjectSlot p)1002   void VisitPointer(HeapObject host, ObjectSlot p) final {
1003     MarkObject(host, *p);
1004   }
1005 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)1006   void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
1007     for (ObjectSlot p = start; p < end; ++p) {
1008       DCHECK(!HasWeakHeapObjectTag(*p));
1009       MarkObject(host, *p);
1010     }
1011   }
1012 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)1013   void VisitPointers(HeapObject host, MaybeObjectSlot start,
1014                      MaybeObjectSlot end) final {
1015     // At the moment, custom roots cannot contain weak pointers.
1016     UNREACHABLE();
1017   }
1018 
1019   // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
VisitCodeTarget(Code host,RelocInfo * rinfo)1020   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
1021     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1022     MarkObject(host, target);
1023   }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)1024   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
1025     MarkObject(host, rinfo->target_object());
1026   }
1027 
1028  private:
MarkObject(HeapObject host,Object object)1029   V8_INLINE void MarkObject(HeapObject host, Object object) {
1030     if (!object.IsHeapObject()) return;
1031     collector_->MarkObject(host, HeapObject::cast(object));
1032   }
1033 
1034   MarkCompactCollector* const collector_;
1035 };
1036 
1037 class InternalizedStringTableCleaner : public RootVisitor {
1038  public:
InternalizedStringTableCleaner(Heap * heap)1039   explicit InternalizedStringTableCleaner(Heap* heap)
1040       : heap_(heap), pointers_removed_(0) {}
1041 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)1042   void VisitRootPointers(Root root, const char* description,
1043                          FullObjectSlot start, FullObjectSlot end) override {
1044     UNREACHABLE();
1045   }
1046 
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)1047   void VisitRootPointers(Root root, const char* description,
1048                          OffHeapObjectSlot start,
1049                          OffHeapObjectSlot end) override {
1050     DCHECK_EQ(root, Root::kStringTable);
1051     // Visit all HeapObject pointers in [start, end).
1052     MarkCompactCollector::NonAtomicMarkingState* marking_state =
1053         heap_->mark_compact_collector()->non_atomic_marking_state();
1054     Isolate* isolate = heap_->isolate();
1055     for (OffHeapObjectSlot p = start; p < end; ++p) {
1056       Object o = p.load(isolate);
1057       if (o.IsHeapObject()) {
1058         HeapObject heap_object = HeapObject::cast(o);
1059         DCHECK(!Heap::InYoungGeneration(heap_object));
1060         if (marking_state->IsWhite(heap_object)) {
1061           pointers_removed_++;
1062           // Set the entry to the_hole_value (as deleted).
1063           p.store(StringTable::deleted_element());
1064         }
1065       }
1066     }
1067   }
1068 
PointersRemoved()1069   int PointersRemoved() { return pointers_removed_; }
1070 
1071  private:
1072   Heap* heap_;
1073   int pointers_removed_;
1074 };
1075 
1076 class ExternalStringTableCleaner : public RootVisitor {
1077  public:
ExternalStringTableCleaner(Heap * heap)1078   explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
1079 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)1080   void VisitRootPointers(Root root, const char* description,
1081                          FullObjectSlot start, FullObjectSlot end) override {
1082     // Visit all HeapObject pointers in [start, end).
1083     MarkCompactCollector::NonAtomicMarkingState* marking_state =
1084         heap_->mark_compact_collector()->non_atomic_marking_state();
1085     Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
1086     for (FullObjectSlot p = start; p < end; ++p) {
1087       Object o = *p;
1088       if (o.IsHeapObject()) {
1089         HeapObject heap_object = HeapObject::cast(o);
1090         if (marking_state->IsWhite(heap_object)) {
1091           if (o.IsExternalString()) {
1092             heap_->FinalizeExternalString(String::cast(o));
1093           } else {
1094             // The original external string may have been internalized.
1095             DCHECK(o.IsThinString());
1096           }
1097           // Set the entry to the_hole_value (as deleted).
1098           p.store(the_hole);
1099         }
1100       }
1101     }
1102   }
1103 
1104  private:
1105   Heap* heap_;
1106 };
1107 
1108 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1109 // are retained.
1110 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1111  public:
MarkCompactWeakObjectRetainer(MarkCompactCollector::NonAtomicMarkingState * marking_state)1112   explicit MarkCompactWeakObjectRetainer(
1113       MarkCompactCollector::NonAtomicMarkingState* marking_state)
1114       : marking_state_(marking_state) {}
1115 
RetainAs(Object object)1116   Object RetainAs(Object object) override {
1117     HeapObject heap_object = HeapObject::cast(object);
1118     DCHECK(!marking_state_->IsGrey(heap_object));
1119     if (marking_state_->IsBlack(heap_object)) {
1120       return object;
1121     } else if (object.IsAllocationSite() &&
1122                !(AllocationSite::cast(object).IsZombie())) {
1123       // "dead" AllocationSites need to live long enough for a traversal of new
1124       // space. These sites get a one-time reprieve.
1125 
1126       Object nested = object;
1127       while (nested.IsAllocationSite()) {
1128         AllocationSite current_site = AllocationSite::cast(nested);
1129         // MarkZombie will override the nested_site, read it first before
1130         // marking
1131         nested = current_site.nested_site();
1132         current_site.MarkZombie();
1133         marking_state_->WhiteToBlack(current_site);
1134       }
1135 
1136       return object;
1137     } else {
1138       return Object();
1139     }
1140   }
1141 
1142  private:
1143   MarkCompactCollector::NonAtomicMarkingState* marking_state_;
1144 };
1145 
1146 class RecordMigratedSlotVisitor : public ObjectVisitor {
1147  public:
RecordMigratedSlotVisitor(MarkCompactCollector * collector,EphemeronRememberedSet * ephemeron_remembered_set)1148   explicit RecordMigratedSlotVisitor(
1149       MarkCompactCollector* collector,
1150       EphemeronRememberedSet* ephemeron_remembered_set)
1151       : collector_(collector),
1152         ephemeron_remembered_set_(ephemeron_remembered_set) {}
1153 
VisitPointer(HeapObject host,ObjectSlot p)1154   inline void VisitPointer(HeapObject host, ObjectSlot p) final {
1155     DCHECK(!HasWeakHeapObjectTag(*p));
1156     RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
1157   }
1158 
VisitPointer(HeapObject host,MaybeObjectSlot p)1159   inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
1160     RecordMigratedSlot(host, *p, p.address());
1161   }
1162 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)1163   inline void VisitPointers(HeapObject host, ObjectSlot start,
1164                             ObjectSlot end) final {
1165     while (start < end) {
1166       VisitPointer(host, start);
1167       ++start;
1168     }
1169   }
1170 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)1171   inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
1172                             MaybeObjectSlot end) final {
1173     while (start < end) {
1174       VisitPointer(host, start);
1175       ++start;
1176     }
1177   }
1178 
VisitEphemeron(HeapObject host,int index,ObjectSlot key,ObjectSlot value)1179   inline void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
1180                              ObjectSlot value) override {
1181     DCHECK(host.IsEphemeronHashTable());
1182     DCHECK(!Heap::InYoungGeneration(host));
1183 
1184     VisitPointer(host, value);
1185 
1186     if (ephemeron_remembered_set_ && Heap::InYoungGeneration(*key)) {
1187       auto table = EphemeronHashTable::unchecked_cast(host);
1188       auto insert_result =
1189           ephemeron_remembered_set_->insert({table, std::unordered_set<int>()});
1190       insert_result.first->second.insert(index);
1191     } else {
1192       VisitPointer(host, key);
1193     }
1194   }
1195 
VisitCodeTarget(Code host,RelocInfo * rinfo)1196   inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
1197     DCHECK_EQ(host, rinfo->host());
1198     DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
1199     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1200     // The target is always in old space, we don't have to record the slot in
1201     // the old-to-new remembered set.
1202     DCHECK(!Heap::InYoungGeneration(target));
1203     collector_->RecordRelocSlot(host, rinfo, target);
1204   }
1205 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)1206   inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
1207     DCHECK_EQ(host, rinfo->host());
1208     DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
1209     HeapObject object = HeapObject::cast(rinfo->target_object());
1210     GenerationalBarrierForCode(host, rinfo, object);
1211     collector_->RecordRelocSlot(host, rinfo, object);
1212   }
1213 
1214   // Entries that are skipped for recording.
VisitExternalReference(Code host,RelocInfo * rinfo)1215   inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
VisitExternalReference(Foreign host,Address * p)1216   inline void VisitExternalReference(Foreign host, Address* p) final {}
VisitRuntimeEntry(Code host,RelocInfo * rinfo)1217   inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
VisitInternalReference(Code host,RelocInfo * rinfo)1218   inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
1219 
MarkArrayBufferExtensionPromoted(HeapObject object)1220   virtual void MarkArrayBufferExtensionPromoted(HeapObject object) {}
1221 
1222  protected:
RecordMigratedSlot(HeapObject host,MaybeObject value,Address slot)1223   inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
1224                                          Address slot) {
1225     if (value->IsStrongOrWeak()) {
1226       BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
1227       if (p->InYoungGeneration()) {
1228         DCHECK_IMPLIES(
1229             p->IsToPage(),
1230             p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
1231 
1232         MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
1233         DCHECK(chunk->SweepingDone());
1234         DCHECK_NULL(chunk->sweeping_slot_set<AccessMode::NON_ATOMIC>());
1235         RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
1236       } else if (p->IsEvacuationCandidate()) {
1237         RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
1238             MemoryChunk::FromHeapObject(host), slot);
1239       }
1240     }
1241   }
1242 
1243   MarkCompactCollector* collector_;
1244   EphemeronRememberedSet* ephemeron_remembered_set_;
1245 };
1246 
1247 class MigrationObserver {
1248  public:
MigrationObserver(Heap * heap)1249   explicit MigrationObserver(Heap* heap) : heap_(heap) {}
1250 
1251   virtual ~MigrationObserver() = default;
1252   virtual void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
1253                     int size) = 0;
1254 
1255  protected:
1256   Heap* heap_;
1257 };
1258 
1259 class ProfilingMigrationObserver final : public MigrationObserver {
1260  public:
ProfilingMigrationObserver(Heap * heap)1261   explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
1262 
Move(AllocationSpace dest,HeapObject src,HeapObject dst,int size)1263   inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
1264                    int size) final {
1265     if (dest == CODE_SPACE || (dest == OLD_SPACE && dst.IsBytecodeArray())) {
1266       PROFILE(heap_->isolate(),
1267               CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
1268     }
1269     heap_->OnMoveEvent(dst, src, size);
1270   }
1271 };
1272 
1273 class HeapObjectVisitor {
1274  public:
1275   virtual ~HeapObjectVisitor() = default;
1276   virtual bool Visit(HeapObject object, int size) = 0;
1277 };
1278 
1279 class EvacuateVisitorBase : public HeapObjectVisitor {
1280  public:
AddObserver(MigrationObserver * observer)1281   void AddObserver(MigrationObserver* observer) {
1282     migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
1283     observers_.push_back(observer);
1284   }
1285 
1286  protected:
1287   enum MigrationMode { kFast, kObserved };
1288 
1289   using MigrateFunction = void (*)(EvacuateVisitorBase* base, HeapObject dst,
1290                                    HeapObject src, int size,
1291                                    AllocationSpace dest);
1292 
1293   template <MigrationMode mode>
RawMigrateObject(EvacuateVisitorBase * base,HeapObject dst,HeapObject src,int size,AllocationSpace dest)1294   static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
1295                                HeapObject src, int size, AllocationSpace dest) {
1296     Address dst_addr = dst.address();
1297     Address src_addr = src.address();
1298     DCHECK(base->heap_->AllowedToBeMigrated(src.map(), src, dest));
1299     DCHECK_NE(dest, LO_SPACE);
1300     DCHECK_NE(dest, CODE_LO_SPACE);
1301     if (dest == OLD_SPACE) {
1302       DCHECK_OBJECT_SIZE(size);
1303       DCHECK(IsAligned(size, kTaggedSize));
1304       base->heap_->CopyBlock(dst_addr, src_addr, size);
1305       if (mode != MigrationMode::kFast)
1306         base->ExecuteMigrationObservers(dest, src, dst, size);
1307       dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
1308       if (V8_UNLIKELY(FLAG_minor_mc)) {
1309         base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
1310       }
1311     } else if (dest == CODE_SPACE) {
1312       DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
1313       base->heap_->CopyBlock(dst_addr, src_addr, size);
1314       Code::cast(dst).Relocate(dst_addr - src_addr);
1315       if (mode != MigrationMode::kFast)
1316         base->ExecuteMigrationObservers(dest, src, dst, size);
1317       dst.IterateBodyFast(dst.map(), size, base->record_visitor_);
1318     } else {
1319       DCHECK_OBJECT_SIZE(size);
1320       DCHECK(dest == NEW_SPACE);
1321       base->heap_->CopyBlock(dst_addr, src_addr, size);
1322       if (mode != MigrationMode::kFast)
1323         base->ExecuteMigrationObservers(dest, src, dst, size);
1324     }
1325     src.set_map_word(MapWord::FromForwardingAddress(dst));
1326   }
1327 
EvacuateVisitorBase(Heap * heap,EvacuationAllocator * local_allocator,RecordMigratedSlotVisitor * record_visitor)1328   EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
1329                       RecordMigratedSlotVisitor* record_visitor)
1330       : heap_(heap),
1331         local_allocator_(local_allocator),
1332         record_visitor_(record_visitor) {
1333     migration_function_ = RawMigrateObject<MigrationMode::kFast>;
1334   }
1335 
TryEvacuateObject(AllocationSpace target_space,HeapObject object,int size,HeapObject * target_object)1336   inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
1337                                 int size, HeapObject* target_object) {
1338 #ifdef VERIFY_HEAP
1339     if (FLAG_verify_heap && AbortCompactionForTesting(object)) return false;
1340 #endif  // VERIFY_HEAP
1341     AllocationAlignment alignment = HeapObject::RequiredAlignment(object.map());
1342     AllocationResult allocation = local_allocator_->Allocate(
1343         target_space, size, AllocationOrigin::kGC, alignment);
1344     if (allocation.To(target_object)) {
1345       MigrateObject(*target_object, object, size, target_space);
1346       if (target_space == CODE_SPACE)
1347         MemoryChunk::FromHeapObject(*target_object)
1348             ->GetCodeObjectRegistry()
1349             ->RegisterNewlyAllocatedCodeObject((*target_object).address());
1350       return true;
1351     }
1352     return false;
1353   }
1354 
ExecuteMigrationObservers(AllocationSpace dest,HeapObject src,HeapObject dst,int size)1355   inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject src,
1356                                         HeapObject dst, int size) {
1357     for (MigrationObserver* obs : observers_) {
1358       obs->Move(dest, src, dst, size);
1359     }
1360   }
1361 
MigrateObject(HeapObject dst,HeapObject src,int size,AllocationSpace dest)1362   inline void MigrateObject(HeapObject dst, HeapObject src, int size,
1363                             AllocationSpace dest) {
1364     migration_function_(this, dst, src, size, dest);
1365   }
1366 
1367 #ifdef VERIFY_HEAP
AbortCompactionForTesting(HeapObject object)1368   bool AbortCompactionForTesting(HeapObject object) {
1369     if (FLAG_stress_compaction) {
1370       const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1371                              kPageAlignmentMask & ~kObjectAlignmentMask;
1372       if ((object.ptr() & kPageAlignmentMask) == mask) {
1373         Page* page = Page::FromHeapObject(object);
1374         if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1375           page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1376         } else {
1377           page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1378           return true;
1379         }
1380       }
1381     }
1382     return false;
1383   }
1384 #endif  // VERIFY_HEAP
1385 
1386   Heap* heap_;
1387   EvacuationAllocator* local_allocator_;
1388   RecordMigratedSlotVisitor* record_visitor_;
1389   std::vector<MigrationObserver*> observers_;
1390   MigrateFunction migration_function_;
1391 };
1392 
1393 class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
1394  public:
EvacuateNewSpaceVisitor(Heap * heap,EvacuationAllocator * local_allocator,RecordMigratedSlotVisitor * record_visitor,Heap::PretenuringFeedbackMap * local_pretenuring_feedback,bool always_promote_young)1395   explicit EvacuateNewSpaceVisitor(
1396       Heap* heap, EvacuationAllocator* local_allocator,
1397       RecordMigratedSlotVisitor* record_visitor,
1398       Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
1399       bool always_promote_young)
1400       : EvacuateVisitorBase(heap, local_allocator, record_visitor),
1401         buffer_(LocalAllocationBuffer::InvalidBuffer()),
1402         promoted_size_(0),
1403         semispace_copied_size_(0),
1404         local_pretenuring_feedback_(local_pretenuring_feedback),
1405         is_incremental_marking_(heap->incremental_marking()->IsMarking()),
1406         always_promote_young_(always_promote_young) {}
1407 
Visit(HeapObject object,int size)1408   inline bool Visit(HeapObject object, int size) override {
1409     if (TryEvacuateWithoutCopy(object)) return true;
1410     HeapObject target_object;
1411 
1412     if (always_promote_young_) {
1413       heap_->UpdateAllocationSite(object.map(), object,
1414                                   local_pretenuring_feedback_);
1415 
1416       if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1417         heap_->FatalProcessOutOfMemory(
1418             "MarkCompactCollector: young object promotion failed");
1419       }
1420 
1421       promoted_size_ += size;
1422       return true;
1423     }
1424 
1425     if (heap_->ShouldBePromoted(object.address()) &&
1426         TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1427       promoted_size_ += size;
1428       return true;
1429     }
1430 
1431     heap_->UpdateAllocationSite(object.map(), object,
1432                                 local_pretenuring_feedback_);
1433 
1434     HeapObject target;
1435     AllocationSpace space = AllocateTargetObject(object, size, &target);
1436     MigrateObject(HeapObject::cast(target), object, size, space);
1437     semispace_copied_size_ += size;
1438     return true;
1439   }
1440 
promoted_size()1441   intptr_t promoted_size() { return promoted_size_; }
semispace_copied_size()1442   intptr_t semispace_copied_size() { return semispace_copied_size_; }
1443 
1444  private:
TryEvacuateWithoutCopy(HeapObject object)1445   inline bool TryEvacuateWithoutCopy(HeapObject object) {
1446     if (is_incremental_marking_) return false;
1447 
1448     Map map = object.map();
1449 
1450     // Some objects can be evacuated without creating a copy.
1451     if (map.visitor_id() == kVisitThinString) {
1452       HeapObject actual = ThinString::cast(object).unchecked_actual();
1453       if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
1454       object.set_map_word(MapWord::FromForwardingAddress(actual));
1455       return true;
1456     }
1457     // TODO(mlippautz): Handle ConsString.
1458 
1459     return false;
1460   }
1461 
AllocateTargetObject(HeapObject old_object,int size,HeapObject * target_object)1462   inline AllocationSpace AllocateTargetObject(HeapObject old_object, int size,
1463                                               HeapObject* target_object) {
1464     AllocationAlignment alignment =
1465         HeapObject::RequiredAlignment(old_object.map());
1466     AllocationSpace space_allocated_in = NEW_SPACE;
1467     AllocationResult allocation = local_allocator_->Allocate(
1468         NEW_SPACE, size, AllocationOrigin::kGC, alignment);
1469     if (allocation.IsRetry()) {
1470       allocation = AllocateInOldSpace(size, alignment);
1471       space_allocated_in = OLD_SPACE;
1472     }
1473     bool ok = allocation.To(target_object);
1474     DCHECK(ok);
1475     USE(ok);
1476     return space_allocated_in;
1477   }
1478 
AllocateInOldSpace(int size_in_bytes,AllocationAlignment alignment)1479   inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1480                                              AllocationAlignment alignment) {
1481     AllocationResult allocation = local_allocator_->Allocate(
1482         OLD_SPACE, size_in_bytes, AllocationOrigin::kGC, alignment);
1483     if (allocation.IsRetry()) {
1484       heap_->FatalProcessOutOfMemory(
1485           "MarkCompactCollector: semi-space copy, fallback in old gen");
1486     }
1487     return allocation;
1488   }
1489 
1490   LocalAllocationBuffer buffer_;
1491   intptr_t promoted_size_;
1492   intptr_t semispace_copied_size_;
1493   Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1494   bool is_incremental_marking_;
1495   bool always_promote_young_;
1496 };
1497 
1498 template <PageEvacuationMode mode>
1499 class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
1500  public:
EvacuateNewSpacePageVisitor(Heap * heap,RecordMigratedSlotVisitor * record_visitor,Heap::PretenuringFeedbackMap * local_pretenuring_feedback)1501   explicit EvacuateNewSpacePageVisitor(
1502       Heap* heap, RecordMigratedSlotVisitor* record_visitor,
1503       Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1504       : heap_(heap),
1505         record_visitor_(record_visitor),
1506         moved_bytes_(0),
1507         local_pretenuring_feedback_(local_pretenuring_feedback) {}
1508 
Move(Page * page)1509   static void Move(Page* page) {
1510     switch (mode) {
1511       case NEW_TO_NEW:
1512         page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1513         page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1514         break;
1515       case NEW_TO_OLD: {
1516         page->heap()->new_space()->from_space().RemovePage(page);
1517         Page* new_page = Page::ConvertNewToOld(page);
1518         DCHECK(!new_page->InYoungGeneration());
1519         new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1520         break;
1521       }
1522     }
1523   }
1524 
Visit(HeapObject object,int size)1525   inline bool Visit(HeapObject object, int size) override {
1526     if (mode == NEW_TO_NEW) {
1527       heap_->UpdateAllocationSite(object.map(), object,
1528                                   local_pretenuring_feedback_);
1529     } else if (mode == NEW_TO_OLD) {
1530       object.IterateBodyFast(record_visitor_);
1531       if (V8_UNLIKELY(FLAG_minor_mc)) {
1532         record_visitor_->MarkArrayBufferExtensionPromoted(object);
1533       }
1534     }
1535     return true;
1536   }
1537 
moved_bytes()1538   intptr_t moved_bytes() { return moved_bytes_; }
account_moved_bytes(intptr_t bytes)1539   void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1540 
1541  private:
1542   Heap* heap_;
1543   RecordMigratedSlotVisitor* record_visitor_;
1544   intptr_t moved_bytes_;
1545   Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1546 };
1547 
1548 class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
1549  public:
EvacuateOldSpaceVisitor(Heap * heap,EvacuationAllocator * local_allocator,RecordMigratedSlotVisitor * record_visitor)1550   EvacuateOldSpaceVisitor(Heap* heap, EvacuationAllocator* local_allocator,
1551                           RecordMigratedSlotVisitor* record_visitor)
1552       : EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
1553 
Visit(HeapObject object,int size)1554   inline bool Visit(HeapObject object, int size) override {
1555     HeapObject target_object;
1556     if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
1557                           object, size, &target_object)) {
1558       DCHECK(object.map_word().IsForwardingAddress());
1559       return true;
1560     }
1561     return false;
1562   }
1563 };
1564 
1565 class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
1566  public:
EvacuateRecordOnlyVisitor(Heap * heap)1567   explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
1568 
Visit(HeapObject object,int size)1569   inline bool Visit(HeapObject object, int size) override {
1570     RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
1571                                       &heap_->ephemeron_remembered_set_);
1572     object.IterateBodyFast(&visitor);
1573     return true;
1574   }
1575 
1576  private:
1577   Heap* heap_;
1578 };
1579 
IsUnmarkedHeapObject(Heap * heap,FullObjectSlot p)1580 bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
1581   Object o = *p;
1582   if (!o.IsHeapObject()) return false;
1583   HeapObject heap_object = HeapObject::cast(o);
1584   return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
1585       heap_object);
1586 }
1587 
MarkRoots(RootVisitor * root_visitor,ObjectVisitor * custom_root_body_visitor)1588 void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
1589                                      ObjectVisitor* custom_root_body_visitor) {
1590   // Mark the heap roots including global variables, stack variables,
1591   // etc., and all objects reachable from them.
1592   heap()->IterateRoots(root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
1593 
1594   // Custom marking for top optimized frame.
1595   ProcessTopOptimizedFrame(custom_root_body_visitor);
1596 }
1597 
VisitObject(HeapObject obj)1598 void MarkCompactCollector::VisitObject(HeapObject obj) {
1599   marking_visitor_->Visit(obj.map(), obj);
1600 }
1601 
RevisitObject(HeapObject obj)1602 void MarkCompactCollector::RevisitObject(HeapObject obj) {
1603   DCHECK(marking_state()->IsBlack(obj));
1604   DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->IsFlagSet(
1605                      MemoryChunk::HAS_PROGRESS_BAR),
1606                  0u == MemoryChunk::FromHeapObject(obj)->ProgressBar());
1607   MarkingVisitor::RevisitScope revisit(marking_visitor_.get());
1608   marking_visitor_->Visit(obj.map(), obj);
1609 }
1610 
MarkDescriptorArrayFromWriteBarrier(DescriptorArray descriptors,int number_of_own_descriptors)1611 void MarkCompactCollector::MarkDescriptorArrayFromWriteBarrier(
1612     DescriptorArray descriptors, int number_of_own_descriptors) {
1613   marking_visitor_->MarkDescriptorArrayFromWriteBarrier(
1614       descriptors, number_of_own_descriptors);
1615 }
1616 
ProcessEphemeronsUntilFixpoint()1617 void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
1618   bool work_to_do = true;
1619   int iterations = 0;
1620   int max_iterations = FLAG_ephemeron_fixpoint_iterations;
1621 
1622   while (work_to_do) {
1623     PerformWrapperTracing();
1624 
1625     if (iterations >= max_iterations) {
1626       // Give up fixpoint iteration and switch to linear algorithm.
1627       ProcessEphemeronsLinear();
1628       break;
1629     }
1630 
1631     // Move ephemerons from next_ephemerons into current_ephemerons to
1632     // drain them in this iteration.
1633     weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1634     heap()->concurrent_marking()->set_ephemeron_marked(false);
1635 
1636     {
1637       TRACE_GC(heap()->tracer(),
1638                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1639 
1640       if (FLAG_parallel_marking) {
1641         heap_->concurrent_marking()->RescheduleJobIfNeeded(
1642             TaskPriority::kUserBlocking);
1643       }
1644 
1645       work_to_do = ProcessEphemerons();
1646       FinishConcurrentMarking();
1647     }
1648 
1649     CHECK(weak_objects_.current_ephemerons.IsEmpty());
1650     CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1651 
1652     work_to_do = work_to_do || !local_marking_worklists()->IsEmpty() ||
1653                  heap()->concurrent_marking()->ephemeron_marked() ||
1654                  !local_marking_worklists()->IsEmbedderEmpty() ||
1655                  !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1656     ++iterations;
1657   }
1658 
1659   CHECK(local_marking_worklists()->IsEmpty());
1660   CHECK(weak_objects_.current_ephemerons.IsEmpty());
1661   CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1662 }
1663 
ProcessEphemerons()1664 bool MarkCompactCollector::ProcessEphemerons() {
1665   Ephemeron ephemeron;
1666   bool ephemeron_marked = false;
1667 
1668   // Drain current_ephemerons and push ephemerons where key and value are still
1669   // unreachable into next_ephemerons.
1670   while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
1671     if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
1672       ephemeron_marked = true;
1673     }
1674   }
1675 
1676   // Drain marking worklist and push discovered ephemerons into
1677   // discovered_ephemerons.
1678   DrainMarkingWorklist();
1679 
1680   // Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
1681   // before) and push ephemerons where key and value are still unreachable into
1682   // next_ephemerons.
1683   while (weak_objects_.discovered_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
1684     if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
1685       ephemeron_marked = true;
1686     }
1687   }
1688 
1689   // Flush local ephemerons for main task to global pool.
1690   weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThreadTask);
1691   weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
1692 
1693   return ephemeron_marked;
1694 }
1695 
ProcessEphemeronsLinear()1696 void MarkCompactCollector::ProcessEphemeronsLinear() {
1697   TRACE_GC(heap()->tracer(),
1698            GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
1699   CHECK(heap()->concurrent_marking()->IsStopped());
1700   std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
1701   Ephemeron ephemeron;
1702 
1703   DCHECK(weak_objects_.current_ephemerons.IsEmpty());
1704   weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1705 
1706   while (weak_objects_.current_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
1707     ProcessEphemeron(ephemeron.key, ephemeron.value);
1708 
1709     if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1710       key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1711     }
1712   }
1713 
1714   ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1715   bool work_to_do = true;
1716 
1717   while (work_to_do) {
1718     PerformWrapperTracing();
1719 
1720     ResetNewlyDiscovered();
1721     ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1722 
1723     {
1724       TRACE_GC(heap()->tracer(),
1725                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1726       // Drain marking worklist and push all discovered objects into
1727       // newly_discovered.
1728       ProcessMarkingWorklist<
1729           MarkCompactCollector::MarkingWorklistProcessingMode::
1730               kTrackNewlyDiscoveredObjects>(0);
1731     }
1732 
1733     while (
1734         weak_objects_.discovered_ephemerons.Pop(kMainThreadTask, &ephemeron)) {
1735       ProcessEphemeron(ephemeron.key, ephemeron.value);
1736 
1737       if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1738         key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1739       }
1740     }
1741 
1742     if (ephemeron_marking_.newly_discovered_overflowed) {
1743       // If newly_discovered was overflowed just visit all ephemerons in
1744       // next_ephemerons.
1745       weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
1746         if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
1747             non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
1748           local_marking_worklists()->Push(ephemeron.value);
1749         }
1750       });
1751 
1752     } else {
1753       // This is the good case: newly_discovered stores all discovered
1754       // objects. Now use key_to_values to see if discovered objects keep more
1755       // objects alive due to ephemeron semantics.
1756       for (HeapObject object : ephemeron_marking_.newly_discovered) {
1757         auto range = key_to_values.equal_range(object);
1758         for (auto it = range.first; it != range.second; ++it) {
1759           HeapObject value = it->second;
1760           MarkObject(object, value);
1761         }
1762       }
1763     }
1764 
1765     // Do NOT drain marking worklist here, otherwise the current checks
1766     // for work_to_do are not sufficient for determining if another iteration
1767     // is necessary.
1768 
1769     work_to_do = !local_marking_worklists()->IsEmpty() ||
1770                  !local_marking_worklists()->IsEmbedderEmpty() ||
1771                  !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1772     CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1773   }
1774 
1775   ResetNewlyDiscovered();
1776   ephemeron_marking_.newly_discovered.shrink_to_fit();
1777 
1778   CHECK(local_marking_worklists()->IsEmpty());
1779 }
1780 
PerformWrapperTracing()1781 void MarkCompactCollector::PerformWrapperTracing() {
1782   if (heap_->local_embedder_heap_tracer()->InUse()) {
1783     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
1784     {
1785       LocalEmbedderHeapTracer::ProcessingScope scope(
1786           heap_->local_embedder_heap_tracer());
1787       HeapObject object;
1788       while (local_marking_worklists()->PopEmbedder(&object)) {
1789         scope.TracePossibleWrapper(JSObject::cast(object));
1790       }
1791     }
1792     heap_->local_embedder_heap_tracer()->Trace(
1793         std::numeric_limits<double>::infinity());
1794   }
1795 }
1796 
DrainMarkingWorklist()1797 void MarkCompactCollector::DrainMarkingWorklist() { ProcessMarkingWorklist(0); }
1798 
1799 template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
ProcessMarkingWorklist(size_t bytes_to_process)1800 size_t MarkCompactCollector::ProcessMarkingWorklist(size_t bytes_to_process) {
1801   HeapObject object;
1802   size_t bytes_processed = 0;
1803   bool is_per_context_mode = local_marking_worklists()->IsPerContextMode();
1804   Isolate* isolate = heap()->isolate();
1805   while (local_marking_worklists()->Pop(&object) ||
1806          local_marking_worklists()->PopOnHold(&object)) {
1807     // Left trimming may result in grey or black filler objects on the marking
1808     // worklist. Ignore these objects.
1809     if (object.IsFreeSpaceOrFiller()) {
1810       // Due to copying mark bits and the fact that grey and black have their
1811       // first bit set, one word fillers are always black.
1812       DCHECK_IMPLIES(
1813           object.map() == ReadOnlyRoots(heap()).one_pointer_filler_map(),
1814           marking_state()->IsBlack(object));
1815       // Other fillers may be black or grey depending on the color of the object
1816       // that was trimmed.
1817       DCHECK_IMPLIES(
1818           object.map() != ReadOnlyRoots(heap()).one_pointer_filler_map(),
1819           marking_state()->IsBlackOrGrey(object));
1820       continue;
1821     }
1822     DCHECK(object.IsHeapObject());
1823     DCHECK(heap()->Contains(object));
1824     DCHECK(!(marking_state()->IsWhite(object)));
1825     if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
1826                     kTrackNewlyDiscoveredObjects) {
1827       AddNewlyDiscovered(object);
1828     }
1829     Map map = object.map(isolate);
1830     if (is_per_context_mode) {
1831       Address context;
1832       if (native_context_inferrer_.Infer(isolate, map, object, &context)) {
1833         local_marking_worklists()->SwitchToContext(context);
1834       }
1835     }
1836     size_t visited_size = marking_visitor_->Visit(map, object);
1837     if (is_per_context_mode) {
1838       native_context_stats_.IncrementSize(local_marking_worklists()->Context(),
1839                                           map, object, visited_size);
1840     }
1841     bytes_processed += visited_size;
1842     if (bytes_to_process && bytes_processed >= bytes_to_process) {
1843       break;
1844     }
1845   }
1846   return bytes_processed;
1847 }
1848 
1849 // Generate definitions for use in other files.
1850 template size_t MarkCompactCollector::ProcessMarkingWorklist<
1851     MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>(
1852     size_t bytes_to_process);
1853 template size_t MarkCompactCollector::ProcessMarkingWorklist<
1854     MarkCompactCollector::MarkingWorklistProcessingMode::
1855         kTrackNewlyDiscoveredObjects>(size_t bytes_to_process);
1856 
ProcessEphemeron(HeapObject key,HeapObject value)1857 bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
1858   if (marking_state()->IsBlackOrGrey(key)) {
1859     if (marking_state()->WhiteToGrey(value)) {
1860       local_marking_worklists()->Push(value);
1861       return true;
1862     }
1863 
1864   } else if (marking_state()->IsWhite(value)) {
1865     weak_objects_.next_ephemerons.Push(kMainThreadTask, Ephemeron{key, value});
1866   }
1867 
1868   return false;
1869 }
1870 
ProcessEphemeronMarking()1871 void MarkCompactCollector::ProcessEphemeronMarking() {
1872   DCHECK(local_marking_worklists()->IsEmpty());
1873 
1874   // Incremental marking might leave ephemerons in main task's local
1875   // buffer, flush it into global pool.
1876   weak_objects_.next_ephemerons.FlushToGlobal(kMainThreadTask);
1877 
1878   ProcessEphemeronsUntilFixpoint();
1879 
1880   CHECK(local_marking_worklists()->IsEmpty());
1881   CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
1882 }
1883 
ProcessTopOptimizedFrame(ObjectVisitor * visitor)1884 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1885   for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1886        !it.done(); it.Advance()) {
1887     if (it.frame()->type() == StackFrame::INTERPRETED) {
1888       return;
1889     }
1890     if (it.frame()->type() == StackFrame::OPTIMIZED) {
1891       Code code = it.frame()->LookupCode();
1892       if (!code.CanDeoptAt(it.frame()->pc())) {
1893         Code::BodyDescriptor::IterateBody(code.map(), code, visitor);
1894       }
1895       return;
1896     }
1897   }
1898 }
1899 
RecordObjectStats()1900 void MarkCompactCollector::RecordObjectStats() {
1901   if (V8_UNLIKELY(TracingFlags::is_gc_stats_enabled())) {
1902     heap()->CreateObjectStats();
1903     ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
1904                                    heap()->dead_object_stats_.get());
1905     collector.Collect();
1906     if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
1907                     v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
1908       std::stringstream live, dead;
1909       heap()->live_object_stats_->Dump(live);
1910       heap()->dead_object_stats_->Dump(dead);
1911       TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
1912                            "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
1913                            "live", TRACE_STR_COPY(live.str().c_str()), "dead",
1914                            TRACE_STR_COPY(dead.str().c_str()));
1915     }
1916     if (FLAG_trace_gc_object_stats) {
1917       heap()->live_object_stats_->PrintJSON("live");
1918       heap()->dead_object_stats_->PrintJSON("dead");
1919     }
1920     heap()->live_object_stats_->CheckpointObjectStats();
1921     heap()->dead_object_stats_->ClearObjectStats();
1922   }
1923 }
1924 
MarkLiveObjects()1925 void MarkCompactCollector::MarkLiveObjects() {
1926   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
1927   // The recursive GC marker detects when it is nearing stack overflow,
1928   // and switches to a different marking system.  JS interrupts interfere
1929   // with the C stack limit check.
1930   PostponeInterruptsScope postpone(isolate());
1931 
1932   {
1933     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
1934     IncrementalMarking* incremental_marking = heap_->incremental_marking();
1935     if (was_marked_incrementally_) {
1936       incremental_marking->Finalize();
1937       MarkingBarrier::PublishAll(heap());
1938     } else {
1939       CHECK(incremental_marking->IsStopped());
1940     }
1941   }
1942 
1943 #ifdef DEBUG
1944   DCHECK(state_ == PREPARE_GC);
1945   state_ = MARK_LIVE_OBJECTS;
1946 #endif
1947 
1948   heap_->local_embedder_heap_tracer()->EnterFinalPause();
1949 
1950   RootMarkingVisitor root_visitor(this);
1951 
1952   {
1953     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
1954     CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
1955     MarkRoots(&root_visitor, &custom_root_body_visitor);
1956   }
1957 
1958   {
1959     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
1960     if (FLAG_parallel_marking) {
1961       heap_->concurrent_marking()->RescheduleJobIfNeeded(
1962           TaskPriority::kUserBlocking);
1963     }
1964     DrainMarkingWorklist();
1965 
1966     FinishConcurrentMarking();
1967     DrainMarkingWorklist();
1968   }
1969 
1970   {
1971     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
1972 
1973     DCHECK(local_marking_worklists()->IsEmpty());
1974 
1975     // Mark objects reachable through the embedder heap. This phase is
1976     // opportunistic as it may not discover graphs that are only reachable
1977     // through ephemerons.
1978     {
1979       TRACE_GC(heap()->tracer(),
1980                GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
1981       do {
1982         // PerformWrapperTracing() also empties the work items collected by
1983         // concurrent markers. As a result this call needs to happen at least
1984         // once.
1985         PerformWrapperTracing();
1986         DrainMarkingWorklist();
1987       } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
1988                !local_marking_worklists()->IsEmbedderEmpty());
1989       DCHECK(local_marking_worklists()->IsEmbedderEmpty());
1990       DCHECK(local_marking_worklists()->IsEmpty());
1991     }
1992 
1993     // The objects reachable from the roots are marked, yet unreachable objects
1994     // are unmarked. Mark objects reachable due to embedder heap tracing or
1995     // harmony weak maps.
1996     {
1997       TRACE_GC(heap()->tracer(),
1998                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
1999       ProcessEphemeronMarking();
2000       DCHECK(local_marking_worklists()->IsEmpty());
2001     }
2002 
2003     // The objects reachable from the roots, weak maps, and embedder heap
2004     // tracing are marked. Objects pointed to only by weak global handles cannot
2005     // be immediately reclaimed. Instead, we have to mark them as pending and
2006     // mark objects reachable from them.
2007     //
2008     // First we identify nonlive weak handles and mark them as pending
2009     // destruction.
2010     {
2011       TRACE_GC(heap()->tracer(),
2012                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
2013       heap()->isolate()->global_handles()->IterateWeakRootsIdentifyFinalizers(
2014           &IsUnmarkedHeapObject);
2015       DrainMarkingWorklist();
2016     }
2017 
2018     // Process finalizers, effectively keeping them alive until the next
2019     // garbage collection.
2020     {
2021       TRACE_GC(heap()->tracer(),
2022                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
2023       heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
2024           &root_visitor);
2025       DrainMarkingWorklist();
2026     }
2027 
2028     // Repeat ephemeron processing from the newly marked objects.
2029     {
2030       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
2031       ProcessEphemeronMarking();
2032       DCHECK(local_marking_worklists()->IsEmbedderEmpty());
2033       DCHECK(local_marking_worklists()->IsEmpty());
2034     }
2035 
2036     {
2037       heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
2038           &IsUnmarkedHeapObject);
2039     }
2040   }
2041   if (was_marked_incrementally_) {
2042     MarkingBarrier::DeactivateAll(heap());
2043   }
2044 
2045   epoch_++;
2046 }
2047 
ClearNonLiveReferences()2048 void MarkCompactCollector::ClearNonLiveReferences() {
2049   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
2050 
2051   {
2052     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
2053 
2054     // Prune the string table removing all strings only pointed to by the
2055     // string table.  Cannot use string_table() here because the string
2056     // table is marked.
2057     StringTable* string_table = isolate()->string_table();
2058     InternalizedStringTableCleaner internalized_visitor(heap());
2059     string_table->DropOldData();
2060     string_table->IterateElements(&internalized_visitor);
2061     string_table->NotifyElementsRemoved(internalized_visitor.PointersRemoved());
2062 
2063     ExternalStringTableCleaner external_visitor(heap());
2064     heap()->external_string_table_.IterateAll(&external_visitor);
2065     heap()->external_string_table_.CleanUpAll();
2066   }
2067 
2068   {
2069     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
2070     ClearOldBytecodeCandidates();
2071   }
2072 
2073   {
2074     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHED_JS_FUNCTIONS);
2075     ClearFlushedJsFunctions();
2076   }
2077 
2078   {
2079     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
2080     // Process the weak references.
2081     MarkCompactWeakObjectRetainer mark_compact_object_retainer(
2082         non_atomic_marking_state());
2083     heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
2084   }
2085 
2086   {
2087     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
2088     // ClearFullMapTransitions must be called before weak references are
2089     // cleared.
2090     ClearFullMapTransitions();
2091   }
2092   {
2093     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
2094     ClearWeakReferences();
2095     ClearWeakCollections();
2096     ClearJSWeakRefs();
2097   }
2098 
2099   MarkDependentCodeForDeoptimization();
2100 
2101   DCHECK(weak_objects_.transition_arrays.IsEmpty());
2102   DCHECK(weak_objects_.weak_references.IsEmpty());
2103   DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
2104   DCHECK(weak_objects_.js_weak_refs.IsEmpty());
2105   DCHECK(weak_objects_.weak_cells.IsEmpty());
2106   DCHECK(weak_objects_.bytecode_flushing_candidates.IsEmpty());
2107   DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
2108 }
2109 
MarkDependentCodeForDeoptimization()2110 void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
2111   std::pair<HeapObject, Code> weak_object_in_code;
2112   while (weak_objects_.weak_objects_in_code.Pop(kMainThreadTask,
2113                                                 &weak_object_in_code)) {
2114     HeapObject object = weak_object_in_code.first;
2115     Code code = weak_object_in_code.second;
2116     if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
2117         !code.embedded_objects_cleared()) {
2118       if (!code.marked_for_deoptimization()) {
2119         code.SetMarkedForDeoptimization("weak objects");
2120         have_code_to_deoptimize_ = true;
2121       }
2122       code.ClearEmbeddedObjects(heap_);
2123       DCHECK(code.embedded_objects_cleared());
2124     }
2125   }
2126 }
2127 
ClearPotentialSimpleMapTransition(Map dead_target)2128 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
2129   DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
2130   Object potential_parent = dead_target.constructor_or_backpointer();
2131   if (potential_parent.IsMap()) {
2132     Map parent = Map::cast(potential_parent);
2133     DisallowHeapAllocation no_gc_obviously;
2134     if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
2135         TransitionsAccessor(isolate(), parent, &no_gc_obviously)
2136             .HasSimpleTransitionTo(dead_target)) {
2137       ClearPotentialSimpleMapTransition(parent, dead_target);
2138     }
2139   }
2140 }
2141 
ClearPotentialSimpleMapTransition(Map map,Map dead_target)2142 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
2143                                                              Map dead_target) {
2144   DCHECK(!map.is_prototype_map());
2145   DCHECK(!dead_target.is_prototype_map());
2146   DCHECK_EQ(map.raw_transitions(), HeapObjectReference::Weak(dead_target));
2147   // Take ownership of the descriptor array.
2148   int number_of_own_descriptors = map.NumberOfOwnDescriptors();
2149   DescriptorArray descriptors = map.instance_descriptors(kRelaxedLoad);
2150   if (descriptors == dead_target.instance_descriptors(kRelaxedLoad) &&
2151       number_of_own_descriptors > 0) {
2152     TrimDescriptorArray(map, descriptors);
2153     DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
2154   }
2155 }
2156 
FlushBytecodeFromSFI(SharedFunctionInfo shared_info)2157 void MarkCompactCollector::FlushBytecodeFromSFI(
2158     SharedFunctionInfo shared_info) {
2159   DCHECK(shared_info.HasBytecodeArray());
2160 
2161   // Retain objects required for uncompiled data.
2162   String inferred_name = shared_info.inferred_name();
2163   int start_position = shared_info.StartPosition();
2164   int end_position = shared_info.EndPosition();
2165 
2166   shared_info.DiscardCompiledMetadata(
2167       isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
2168         RecordSlot(object, slot, target);
2169       });
2170 
2171   // The size of the bytecode array should always be larger than an
2172   // UncompiledData object.
2173   STATIC_ASSERT(BytecodeArray::SizeFor(0) >=
2174                 UncompiledDataWithoutPreparseData::kSize);
2175 
2176   // Replace bytecode array with an uncompiled data array.
2177   HeapObject compiled_data = shared_info.GetBytecodeArray();
2178   Address compiled_data_start = compiled_data.address();
2179   int compiled_data_size = compiled_data.Size();
2180   MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
2181 
2182   // Clear any recorded slots for the compiled data as being invalid.
2183   DCHECK_NULL(chunk->sweeping_slot_set());
2184   RememberedSet<OLD_TO_NEW>::RemoveRange(
2185       chunk, compiled_data_start, compiled_data_start + compiled_data_size,
2186       SlotSet::FREE_EMPTY_BUCKETS);
2187   RememberedSet<OLD_TO_OLD>::RemoveRange(
2188       chunk, compiled_data_start, compiled_data_start + compiled_data_size,
2189       SlotSet::FREE_EMPTY_BUCKETS);
2190 
2191   // Swap the map, using set_map_after_allocation to avoid verify heap checks
2192   // which are not necessary since we are doing this during the GC atomic pause.
2193   compiled_data.set_map_after_allocation(
2194       ReadOnlyRoots(heap()).uncompiled_data_without_preparse_data_map(),
2195       SKIP_WRITE_BARRIER);
2196 
2197   // Create a filler object for any left over space in the bytecode array.
2198   if (!heap()->IsLargeObject(compiled_data)) {
2199     heap()->CreateFillerObjectAt(
2200         compiled_data.address() + UncompiledDataWithoutPreparseData::kSize,
2201         compiled_data_size - UncompiledDataWithoutPreparseData::kSize,
2202         ClearRecordedSlots::kNo);
2203   }
2204 
2205   // Initialize the uncompiled data.
2206   UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
2207   uncompiled_data.InitAfterBytecodeFlush(
2208       inferred_name, start_position, end_position,
2209       [](HeapObject object, ObjectSlot slot, HeapObject target) {
2210         RecordSlot(object, slot, target);
2211       });
2212 
2213   // Mark the uncompiled data as black, and ensure all fields have already been
2214   // marked.
2215   DCHECK(non_atomic_marking_state()->IsBlackOrGrey(inferred_name));
2216   non_atomic_marking_state()->WhiteToBlack(uncompiled_data);
2217 
2218   // Use the raw function data setter to avoid validity checks, since we're
2219   // performing the unusual task of decompiling.
2220   shared_info.set_function_data(uncompiled_data, kReleaseStore);
2221   DCHECK(!shared_info.is_compiled());
2222 }
2223 
ClearOldBytecodeCandidates()2224 void MarkCompactCollector::ClearOldBytecodeCandidates() {
2225   DCHECK(FLAG_flush_bytecode ||
2226          weak_objects_.bytecode_flushing_candidates.IsEmpty());
2227   SharedFunctionInfo flushing_candidate;
2228   while (weak_objects_.bytecode_flushing_candidates.Pop(kMainThreadTask,
2229                                                         &flushing_candidate)) {
2230     // If the BytecodeArray is dead, flush it, which will replace the field with
2231     // an uncompiled data object.
2232     if (!non_atomic_marking_state()->IsBlackOrGrey(
2233             flushing_candidate.GetBytecodeArray())) {
2234       FlushBytecodeFromSFI(flushing_candidate);
2235     }
2236 
2237     // Now record the slot, which has either been updated to an uncompiled data,
2238     // or is the BytecodeArray which is still alive.
2239     ObjectSlot slot =
2240         flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
2241     RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
2242   }
2243 }
2244 
ClearFlushedJsFunctions()2245 void MarkCompactCollector::ClearFlushedJsFunctions() {
2246   DCHECK(FLAG_flush_bytecode || weak_objects_.flushed_js_functions.IsEmpty());
2247   JSFunction flushed_js_function;
2248   while (weak_objects_.flushed_js_functions.Pop(kMainThreadTask,
2249                                                 &flushed_js_function)) {
2250     auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
2251                                      Object target) {
2252       RecordSlot(object, slot, HeapObject::cast(target));
2253     };
2254     flushed_js_function.ResetIfBytecodeFlushed(gc_notify_updated_slot);
2255   }
2256 }
2257 
ClearFullMapTransitions()2258 void MarkCompactCollector::ClearFullMapTransitions() {
2259   TransitionArray array;
2260   while (weak_objects_.transition_arrays.Pop(kMainThreadTask, &array)) {
2261     int num_transitions = array.number_of_entries();
2262     if (num_transitions > 0) {
2263       Map map;
2264       // The array might contain "undefined" elements because it's not yet
2265       // filled. Allow it.
2266       if (array.GetTargetIfExists(0, isolate(), &map)) {
2267         DCHECK(!map.is_null());  // Weak pointers aren't cleared yet.
2268         Object constructor_or_backpointer = map.constructor_or_backpointer();
2269         if (constructor_or_backpointer.IsSmi()) {
2270           DCHECK(isolate()->has_active_deserializer());
2271           DCHECK_EQ(constructor_or_backpointer,
2272                     Deserializer::uninitialized_field_value());
2273           continue;
2274         }
2275         Map parent = Map::cast(map.constructor_or_backpointer());
2276         bool parent_is_alive =
2277             non_atomic_marking_state()->IsBlackOrGrey(parent);
2278         DescriptorArray descriptors =
2279             parent_is_alive ? parent.instance_descriptors(kRelaxedLoad)
2280                             : DescriptorArray();
2281         bool descriptors_owner_died =
2282             CompactTransitionArray(parent, array, descriptors);
2283         if (descriptors_owner_died) {
2284           TrimDescriptorArray(parent, descriptors);
2285         }
2286       }
2287     }
2288   }
2289 }
2290 
CompactTransitionArray(Map map,TransitionArray transitions,DescriptorArray descriptors)2291 bool MarkCompactCollector::CompactTransitionArray(Map map,
2292                                                   TransitionArray transitions,
2293                                                   DescriptorArray descriptors) {
2294   DCHECK(!map.is_prototype_map());
2295   int num_transitions = transitions.number_of_entries();
2296   bool descriptors_owner_died = false;
2297   int transition_index = 0;
2298   // Compact all live transitions to the left.
2299   for (int i = 0; i < num_transitions; ++i) {
2300     Map target = transitions.GetTarget(i);
2301     DCHECK_EQ(target.constructor_or_backpointer(), map);
2302     if (non_atomic_marking_state()->IsWhite(target)) {
2303       if (!descriptors.is_null() &&
2304           target.instance_descriptors(kRelaxedLoad) == descriptors) {
2305         DCHECK(!target.is_prototype_map());
2306         descriptors_owner_died = true;
2307       }
2308     } else {
2309       if (i != transition_index) {
2310         Name key = transitions.GetKey(i);
2311         transitions.SetKey(transition_index, key);
2312         HeapObjectSlot key_slot = transitions.GetKeySlot(transition_index);
2313         RecordSlot(transitions, key_slot, key);
2314         MaybeObject raw_target = transitions.GetRawTarget(i);
2315         transitions.SetRawTarget(transition_index, raw_target);
2316         HeapObjectSlot target_slot =
2317             transitions.GetTargetSlot(transition_index);
2318         RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
2319       }
2320       transition_index++;
2321     }
2322   }
2323   // If there are no transitions to be cleared, return.
2324   if (transition_index == num_transitions) {
2325     DCHECK(!descriptors_owner_died);
2326     return false;
2327   }
2328   // Note that we never eliminate a transition array, though we might right-trim
2329   // such that number_of_transitions() == 0. If this assumption changes,
2330   // TransitionArray::Insert() will need to deal with the case that a transition
2331   // array disappeared during GC.
2332   int trim = transitions.Capacity() - transition_index;
2333   if (trim > 0) {
2334     heap_->RightTrimWeakFixedArray(transitions,
2335                                    trim * TransitionArray::kEntrySize);
2336     transitions.SetNumberOfTransitions(transition_index);
2337   }
2338   return descriptors_owner_died;
2339 }
2340 
RightTrimDescriptorArray(DescriptorArray array,int descriptors_to_trim)2341 void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
2342                                                     int descriptors_to_trim) {
2343   int old_nof_all_descriptors = array.number_of_all_descriptors();
2344   int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
2345   DCHECK_LT(0, descriptors_to_trim);
2346   DCHECK_LE(0, new_nof_all_descriptors);
2347   Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address();
2348   Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address();
2349   MemoryChunk* chunk = MemoryChunk::FromHeapObject(array);
2350   DCHECK_NULL(chunk->sweeping_slot_set());
2351   RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end,
2352                                          SlotSet::FREE_EMPTY_BUCKETS);
2353   RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end,
2354                                          SlotSet::FREE_EMPTY_BUCKETS);
2355   heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
2356                                ClearRecordedSlots::kNo);
2357   array.set_number_of_all_descriptors(new_nof_all_descriptors);
2358 }
2359 
TrimDescriptorArray(Map map,DescriptorArray descriptors)2360 void MarkCompactCollector::TrimDescriptorArray(Map map,
2361                                                DescriptorArray descriptors) {
2362   int number_of_own_descriptors = map.NumberOfOwnDescriptors();
2363   if (number_of_own_descriptors == 0) {
2364     DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
2365     return;
2366   }
2367   // TODO(ulan): Trim only if slack is greater than some percentage threshold.
2368   int to_trim =
2369       descriptors.number_of_all_descriptors() - number_of_own_descriptors;
2370   if (to_trim > 0) {
2371     descriptors.set_number_of_descriptors(number_of_own_descriptors);
2372     RightTrimDescriptorArray(descriptors, to_trim);
2373 
2374     TrimEnumCache(map, descriptors);
2375     descriptors.Sort();
2376 
2377     if (FLAG_unbox_double_fields) {
2378       LayoutDescriptor layout_descriptor = map.layout_descriptor(kAcquireLoad);
2379       layout_descriptor = layout_descriptor.Trim(heap_, map, descriptors,
2380                                                  number_of_own_descriptors);
2381       SLOW_DCHECK(layout_descriptor.IsConsistentWithMap(map, true));
2382     }
2383   }
2384   DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
2385   map.set_owns_descriptors(true);
2386 }
2387 
TrimEnumCache(Map map,DescriptorArray descriptors)2388 void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
2389   int live_enum = map.EnumLength();
2390   if (live_enum == kInvalidEnumCacheSentinel) {
2391     live_enum = map.NumberOfEnumerableProperties();
2392   }
2393   if (live_enum == 0) return descriptors.ClearEnumCache();
2394   EnumCache enum_cache = descriptors.enum_cache();
2395 
2396   FixedArray keys = enum_cache.keys();
2397   int to_trim = keys.length() - live_enum;
2398   if (to_trim <= 0) return;
2399   heap_->RightTrimFixedArray(keys, to_trim);
2400 
2401   FixedArray indices = enum_cache.indices();
2402   to_trim = indices.length() - live_enum;
2403   if (to_trim <= 0) return;
2404   heap_->RightTrimFixedArray(indices, to_trim);
2405 }
2406 
ClearWeakCollections()2407 void MarkCompactCollector::ClearWeakCollections() {
2408   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2409   EphemeronHashTable table;
2410 
2411   while (weak_objects_.ephemeron_hash_tables.Pop(kMainThreadTask, &table)) {
2412     for (InternalIndex i : table.IterateEntries()) {
2413       HeapObject key = HeapObject::cast(table.KeyAt(i));
2414 #ifdef VERIFY_HEAP
2415       if (FLAG_verify_heap) {
2416         Object value = table.ValueAt(i);
2417         if (value.IsHeapObject()) {
2418           CHECK_IMPLIES(non_atomic_marking_state()->IsBlackOrGrey(key),
2419                         non_atomic_marking_state()->IsBlackOrGrey(
2420                             HeapObject::cast(value)));
2421         }
2422       }
2423 #endif
2424       if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
2425         table.RemoveEntry(i);
2426       }
2427     }
2428   }
2429   for (auto it = heap_->ephemeron_remembered_set_.begin();
2430        it != heap_->ephemeron_remembered_set_.end();) {
2431     if (!non_atomic_marking_state()->IsBlackOrGrey(it->first)) {
2432       it = heap_->ephemeron_remembered_set_.erase(it);
2433     } else {
2434       ++it;
2435     }
2436   }
2437 }
2438 
ClearWeakReferences()2439 void MarkCompactCollector::ClearWeakReferences() {
2440   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
2441   std::pair<HeapObject, HeapObjectSlot> slot;
2442   HeapObjectReference cleared_weak_ref =
2443       HeapObjectReference::ClearedValue(isolate());
2444   while (weak_objects_.weak_references.Pop(kMainThreadTask, &slot)) {
2445     HeapObject value;
2446     // The slot could have been overwritten, so we have to treat it
2447     // as MaybeObjectSlot.
2448     MaybeObjectSlot location(slot.second);
2449     if ((*location)->GetHeapObjectIfWeak(&value)) {
2450       DCHECK(!value.IsCell());
2451       if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
2452         // The value of the weak reference is alive.
2453         RecordSlot(slot.first, HeapObjectSlot(location), value);
2454       } else {
2455         if (value.IsMap()) {
2456           // The map is non-live.
2457           ClearPotentialSimpleMapTransition(Map::cast(value));
2458         }
2459         location.store(cleared_weak_ref);
2460       }
2461     }
2462   }
2463 }
2464 
ClearJSWeakRefs()2465 void MarkCompactCollector::ClearJSWeakRefs() {
2466   if (!FLAG_harmony_weak_refs) {
2467     return;
2468   }
2469   JSWeakRef weak_ref;
2470   while (weak_objects_.js_weak_refs.Pop(kMainThreadTask, &weak_ref)) {
2471     HeapObject target = HeapObject::cast(weak_ref.target());
2472     if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
2473       weak_ref.set_target(ReadOnlyRoots(isolate()).undefined_value());
2474     } else {
2475       // The value of the JSWeakRef is alive.
2476       ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
2477       RecordSlot(weak_ref, slot, target);
2478     }
2479   }
2480   WeakCell weak_cell;
2481   while (weak_objects_.weak_cells.Pop(kMainThreadTask, &weak_cell)) {
2482     auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
2483                                      Object target) {
2484       if (target.IsHeapObject()) {
2485         RecordSlot(object, slot, HeapObject::cast(target));
2486       }
2487     };
2488     HeapObject target = HeapObject::cast(weak_cell.target());
2489     if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
2490       DCHECK(!target.IsUndefined());
2491       // The value of the WeakCell is dead.
2492       JSFinalizationRegistry finalization_registry =
2493           JSFinalizationRegistry::cast(weak_cell.finalization_registry());
2494       if (!finalization_registry.scheduled_for_cleanup()) {
2495         heap()->EnqueueDirtyJSFinalizationRegistry(finalization_registry,
2496                                                    gc_notify_updated_slot);
2497       }
2498       // We're modifying the pointers in WeakCell and JSFinalizationRegistry
2499       // during GC; thus we need to record the slots it writes. The normal write
2500       // barrier is not enough, since it's disabled before GC.
2501       weak_cell.Nullify(isolate(), gc_notify_updated_slot);
2502       DCHECK(finalization_registry.NeedsCleanup());
2503       DCHECK(finalization_registry.scheduled_for_cleanup());
2504     } else {
2505       // The value of the WeakCell is alive.
2506       ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
2507       RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
2508     }
2509 
2510     HeapObject unregister_token =
2511         HeapObject::cast(weak_cell.unregister_token());
2512     if (!non_atomic_marking_state()->IsBlackOrGrey(unregister_token)) {
2513       // The unregister token is dead. Remove any corresponding entries in the
2514       // key map. Multiple WeakCell with the same token will have all their
2515       // unregister_token field set to undefined when processing the first
2516       // WeakCell. Like above, we're modifying pointers during GC, so record the
2517       // slots.
2518       HeapObject undefined = ReadOnlyRoots(isolate()).undefined_value();
2519       JSFinalizationRegistry finalization_registry =
2520           JSFinalizationRegistry::cast(weak_cell.finalization_registry());
2521       finalization_registry.RemoveUnregisterToken(
2522           JSReceiver::cast(unregister_token), isolate(),
2523           [undefined](WeakCell matched_cell) {
2524             matched_cell.set_unregister_token(undefined);
2525           },
2526           gc_notify_updated_slot);
2527       // The following is necessary because in the case that weak_cell has
2528       // already been popped and removed from the FinalizationRegistry, the call
2529       // to JSFinalizationRegistry::RemoveUnregisterToken above will not find
2530       // weak_cell itself to clear its unregister token.
2531       weak_cell.set_unregister_token(undefined);
2532     } else {
2533       // The unregister_token is alive.
2534       ObjectSlot slot = weak_cell.RawField(WeakCell::kUnregisterTokenOffset);
2535       RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
2536     }
2537   }
2538   heap()->PostFinalizationRegistryCleanupTaskIfNeeded();
2539 }
2540 
AbortWeakObjects()2541 void MarkCompactCollector::AbortWeakObjects() {
2542   weak_objects_.transition_arrays.Clear();
2543   weak_objects_.ephemeron_hash_tables.Clear();
2544   weak_objects_.current_ephemerons.Clear();
2545   weak_objects_.next_ephemerons.Clear();
2546   weak_objects_.discovered_ephemerons.Clear();
2547   weak_objects_.weak_references.Clear();
2548   weak_objects_.weak_objects_in_code.Clear();
2549   weak_objects_.js_weak_refs.Clear();
2550   weak_objects_.weak_cells.Clear();
2551   weak_objects_.bytecode_flushing_candidates.Clear();
2552   weak_objects_.flushed_js_functions.Clear();
2553 }
2554 
IsOnEvacuationCandidate(MaybeObject obj)2555 bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
2556   return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
2557 }
2558 
2559 MarkCompactCollector::RecordRelocSlotInfo
PrepareRecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)2560 MarkCompactCollector::PrepareRecordRelocSlot(Code host, RelocInfo* rinfo,
2561                                              HeapObject target) {
2562   RecordRelocSlotInfo result;
2563   result.should_record = false;
2564   Page* target_page = Page::FromHeapObject(target);
2565   Page* source_page = Page::FromHeapObject(host);
2566   if (target_page->IsEvacuationCandidate() &&
2567       (rinfo->host().is_null() ||
2568        !source_page->ShouldSkipEvacuationSlotRecording())) {
2569     RelocInfo::Mode rmode = rinfo->rmode();
2570     Address addr = rinfo->pc();
2571     SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
2572     if (rinfo->IsInConstantPool()) {
2573       addr = rinfo->constant_pool_entry_address();
2574       if (RelocInfo::IsCodeTargetMode(rmode)) {
2575         slot_type = CODE_ENTRY_SLOT;
2576       } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
2577         slot_type = COMPRESSED_OBJECT_SLOT;
2578       } else {
2579         DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
2580         slot_type = FULL_OBJECT_SLOT;
2581       }
2582     }
2583     uintptr_t offset = addr - source_page->address();
2584     DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
2585     result.should_record = true;
2586     result.memory_chunk = source_page;
2587     result.slot_type = slot_type;
2588     result.offset = static_cast<uint32_t>(offset);
2589   }
2590   return result;
2591 }
2592 
RecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)2593 void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
2594                                            HeapObject target) {
2595   RecordRelocSlotInfo info = PrepareRecordRelocSlot(host, rinfo, target);
2596   if (info.should_record) {
2597     RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
2598                                            info.offset);
2599   }
2600 }
2601 
2602 namespace {
2603 
2604 // Missing specialization MakeSlotValue<FullObjectSlot, WEAK>() will turn
2605 // attempt to store a weak reference to strong-only slot to a compilation error.
2606 template <typename TSlot, HeapObjectReferenceType reference_type>
2607 typename TSlot::TObject MakeSlotValue(HeapObject heap_object);
2608 
2609 template <>
MakeSlotValue(HeapObject heap_object)2610 Object MakeSlotValue<ObjectSlot, HeapObjectReferenceType::STRONG>(
2611     HeapObject heap_object) {
2612   return heap_object;
2613 }
2614 
2615 template <>
MakeSlotValue(HeapObject heap_object)2616 MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::STRONG>(
2617     HeapObject heap_object) {
2618   return HeapObjectReference::Strong(heap_object);
2619 }
2620 
2621 template <>
MakeSlotValue(HeapObject heap_object)2622 MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::WEAK>(
2623     HeapObject heap_object) {
2624   return HeapObjectReference::Weak(heap_object);
2625 }
2626 
2627 template <>
MakeSlotValue(HeapObject heap_object)2628 Object MakeSlotValue<OffHeapObjectSlot, HeapObjectReferenceType::STRONG>(
2629     HeapObject heap_object) {
2630   return heap_object;
2631 }
2632 
2633 #ifdef V8_COMPRESS_POINTERS
2634 template <>
MakeSlotValue(HeapObject heap_object)2635 Object MakeSlotValue<FullObjectSlot, HeapObjectReferenceType::STRONG>(
2636     HeapObject heap_object) {
2637   return heap_object;
2638 }
2639 
2640 template <>
MakeSlotValue(HeapObject heap_object)2641 MaybeObject MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
2642     HeapObject heap_object) {
2643   return HeapObjectReference::Strong(heap_object);
2644 }
2645 
2646 // The following specialization
2647 //   MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>()
2648 // is not used.
2649 #endif
2650 
2651 template <AccessMode access_mode, HeapObjectReferenceType reference_type,
2652           typename TSlot>
UpdateSlot(TSlot slot,typename TSlot::TObject old,HeapObject heap_obj)2653 static inline SlotCallbackResult UpdateSlot(TSlot slot,
2654                                             typename TSlot::TObject old,
2655                                             HeapObject heap_obj) {
2656   static_assert(std::is_same<TSlot, FullObjectSlot>::value ||
2657                     std::is_same<TSlot, ObjectSlot>::value ||
2658                     std::is_same<TSlot, FullMaybeObjectSlot>::value ||
2659                     std::is_same<TSlot, MaybeObjectSlot>::value ||
2660                     std::is_same<TSlot, OffHeapObjectSlot>::value,
2661                 "Only [Full|OffHeap]ObjectSlot and [Full]MaybeObjectSlot are "
2662                 "expected here");
2663   MapWord map_word = heap_obj.map_word();
2664   if (map_word.IsForwardingAddress()) {
2665     DCHECK_IMPLIES(!Heap::InFromPage(heap_obj),
2666                    MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2667                        Page::FromHeapObject(heap_obj)->IsFlagSet(
2668                            Page::COMPACTION_WAS_ABORTED));
2669     typename TSlot::TObject target =
2670         MakeSlotValue<TSlot, reference_type>(map_word.ToForwardingAddress());
2671     if (access_mode == AccessMode::NON_ATOMIC) {
2672       slot.store(target);
2673     } else {
2674       slot.Release_CompareAndSwap(old, target);
2675     }
2676     DCHECK(!Heap::InFromPage(target));
2677     DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
2678   } else {
2679     DCHECK(heap_obj.map().IsMap());
2680   }
2681   // OLD_TO_OLD slots are always removed after updating.
2682   return REMOVE_SLOT;
2683 }
2684 
2685 template <AccessMode access_mode, typename TSlot>
UpdateSlot(IsolateRoot isolate,TSlot slot)2686 static inline SlotCallbackResult UpdateSlot(IsolateRoot isolate, TSlot slot) {
2687   typename TSlot::TObject obj = slot.Relaxed_Load(isolate);
2688   HeapObject heap_obj;
2689   if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
2690     UpdateSlot<access_mode, HeapObjectReferenceType::WEAK>(slot, obj, heap_obj);
2691   } else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
2692     return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
2693                                                                     heap_obj);
2694   }
2695   return REMOVE_SLOT;
2696 }
2697 
2698 template <AccessMode access_mode, typename TSlot>
UpdateStrongSlot(IsolateRoot isolate,TSlot slot)2699 static inline SlotCallbackResult UpdateStrongSlot(IsolateRoot isolate,
2700                                                   TSlot slot) {
2701   typename TSlot::TObject obj = slot.Relaxed_Load(isolate);
2702   DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
2703   HeapObject heap_obj;
2704   if (obj.GetHeapObject(&heap_obj)) {
2705     return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(slot, obj,
2706                                                                     heap_obj);
2707   }
2708   return REMOVE_SLOT;
2709 }
2710 
2711 }  // namespace
2712 
2713 // Visitor for updating root pointers and to-space pointers.
2714 // It does not expect to encounter pointers to dead objects.
2715 class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
2716  public:
PointersUpdatingVisitor(IsolateRoot isolate)2717   explicit PointersUpdatingVisitor(IsolateRoot isolate) : isolate_(isolate) {}
2718 
VisitPointer(HeapObject host,ObjectSlot p)2719   void VisitPointer(HeapObject host, ObjectSlot p) override {
2720     UpdateStrongSlotInternal(isolate_, p);
2721   }
2722 
VisitPointer(HeapObject host,MaybeObjectSlot p)2723   void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
2724     UpdateSlotInternal(isolate_, p);
2725   }
2726 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)2727   void VisitPointers(HeapObject host, ObjectSlot start,
2728                      ObjectSlot end) override {
2729     for (ObjectSlot p = start; p < end; ++p) {
2730       UpdateStrongSlotInternal(isolate_, p);
2731     }
2732   }
2733 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)2734   void VisitPointers(HeapObject host, MaybeObjectSlot start,
2735                      MaybeObjectSlot end) final {
2736     for (MaybeObjectSlot p = start; p < end; ++p) {
2737       UpdateSlotInternal(isolate_, p);
2738     }
2739   }
2740 
VisitRootPointer(Root root,const char * description,FullObjectSlot p)2741   void VisitRootPointer(Root root, const char* description,
2742                         FullObjectSlot p) override {
2743     UpdateRootSlotInternal(isolate_, p);
2744   }
2745 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)2746   void VisitRootPointers(Root root, const char* description,
2747                          FullObjectSlot start, FullObjectSlot end) override {
2748     for (FullObjectSlot p = start; p < end; ++p) {
2749       UpdateRootSlotInternal(isolate_, p);
2750     }
2751   }
2752 
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)2753   void VisitRootPointers(Root root, const char* description,
2754                          OffHeapObjectSlot start,
2755                          OffHeapObjectSlot end) override {
2756     for (OffHeapObjectSlot p = start; p < end; ++p) {
2757       UpdateRootSlotInternal(isolate_, p);
2758     }
2759   }
2760 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)2761   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
2762     // This visitor nevers visits code objects.
2763     UNREACHABLE();
2764   }
2765 
VisitCodeTarget(Code host,RelocInfo * rinfo)2766   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
2767     // This visitor nevers visits code objects.
2768     UNREACHABLE();
2769   }
2770 
2771  private:
UpdateRootSlotInternal(IsolateRoot isolate,FullObjectSlot slot)2772   static inline SlotCallbackResult UpdateRootSlotInternal(IsolateRoot isolate,
2773                                                           FullObjectSlot slot) {
2774     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
2775   }
2776 
UpdateRootSlotInternal(IsolateRoot isolate,OffHeapObjectSlot slot)2777   static inline SlotCallbackResult UpdateRootSlotInternal(
2778       IsolateRoot isolate, OffHeapObjectSlot slot) {
2779     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
2780   }
2781 
UpdateStrongMaybeObjectSlotInternal(IsolateRoot isolate,MaybeObjectSlot slot)2782   static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
2783       IsolateRoot isolate, MaybeObjectSlot slot) {
2784     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
2785   }
2786 
UpdateStrongSlotInternal(IsolateRoot isolate,ObjectSlot slot)2787   static inline SlotCallbackResult UpdateStrongSlotInternal(IsolateRoot isolate,
2788                                                             ObjectSlot slot) {
2789     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
2790   }
2791 
UpdateSlotInternal(IsolateRoot isolate,MaybeObjectSlot slot)2792   static inline SlotCallbackResult UpdateSlotInternal(IsolateRoot isolate,
2793                                                       MaybeObjectSlot slot) {
2794     return UpdateSlot<AccessMode::NON_ATOMIC>(isolate, slot);
2795   }
2796 
2797   IsolateRoot isolate_;
2798 };
2799 
UpdateReferenceInExternalStringTableEntry(Heap * heap,FullObjectSlot p)2800 static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
2801                                                         FullObjectSlot p) {
2802   MapWord map_word = HeapObject::cast(*p).map_word();
2803 
2804   if (map_word.IsForwardingAddress()) {
2805     String new_string = String::cast(map_word.ToForwardingAddress());
2806 
2807     if (new_string.IsExternalString()) {
2808       MemoryChunk::MoveExternalBackingStoreBytes(
2809           ExternalBackingStoreType::kExternalString,
2810           Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
2811           ExternalString::cast(new_string).ExternalPayloadSize());
2812     }
2813     return new_string;
2814   }
2815 
2816   return String::cast(*p);
2817 }
2818 
EvacuatePrologue()2819 void MarkCompactCollector::EvacuatePrologue() {
2820   // New space.
2821   NewSpace* new_space = heap()->new_space();
2822   // Append the list of new space pages to be processed.
2823   for (Page* p :
2824        PageRange(new_space->first_allocatable_address(), new_space->top())) {
2825     new_space_evacuation_pages_.push_back(p);
2826   }
2827   new_space->Flip();
2828   new_space->ResetLinearAllocationArea();
2829 
2830   DCHECK_EQ(new_space->Size(), 0);
2831 
2832   heap()->new_lo_space()->Flip();
2833   heap()->new_lo_space()->ResetPendingObject();
2834 
2835   // Old space.
2836   DCHECK(old_space_evacuation_pages_.empty());
2837   old_space_evacuation_pages_ = std::move(evacuation_candidates_);
2838   evacuation_candidates_.clear();
2839   DCHECK(evacuation_candidates_.empty());
2840 }
2841 
EvacuateEpilogue()2842 void MarkCompactCollector::EvacuateEpilogue() {
2843   aborted_evacuation_candidates_.clear();
2844   // New space.
2845   heap()->new_space()->set_age_mark(heap()->new_space()->top());
2846   DCHECK_IMPLIES(FLAG_always_promote_young_mc,
2847                  heap()->new_space()->Size() == 0);
2848   // Deallocate unmarked large objects.
2849   heap()->lo_space()->FreeUnmarkedObjects();
2850   heap()->code_lo_space()->FreeUnmarkedObjects();
2851   heap()->new_lo_space()->FreeUnmarkedObjects();
2852   // Old space. Deallocate evacuated candidate pages.
2853   ReleaseEvacuationCandidates();
2854   // Give pages that are queued to be freed back to the OS.
2855   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2856 #ifdef DEBUG
2857   // Old-to-old slot sets must be empty after evacuation.
2858   for (Page* p : *heap()->old_space()) {
2859     DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2860     DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2861     DCHECK_NULL(p->invalidated_slots<OLD_TO_OLD>());
2862     DCHECK_NULL(p->invalidated_slots<OLD_TO_NEW>());
2863   }
2864 #endif
2865 }
2866 
2867 class Evacuator : public Malloced {
2868  public:
2869   enum EvacuationMode {
2870     kObjectsNewToOld,
2871     kPageNewToOld,
2872     kObjectsOldToOld,
2873     kPageNewToNew,
2874   };
2875 
EvacuationModeName(EvacuationMode mode)2876   static const char* EvacuationModeName(EvacuationMode mode) {
2877     switch (mode) {
2878       case kObjectsNewToOld:
2879         return "objects-new-to-old";
2880       case kPageNewToOld:
2881         return "page-new-to-old";
2882       case kObjectsOldToOld:
2883         return "objects-old-to-old";
2884       case kPageNewToNew:
2885         return "page-new-to-new";
2886     }
2887   }
2888 
ComputeEvacuationMode(MemoryChunk * chunk)2889   static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
2890     // Note: The order of checks is important in this function.
2891     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
2892       return kPageNewToOld;
2893     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
2894       return kPageNewToNew;
2895     if (chunk->InYoungGeneration()) return kObjectsNewToOld;
2896     return kObjectsOldToOld;
2897   }
2898 
2899   // NewSpacePages with more live bytes than this threshold qualify for fast
2900   // evacuation.
NewSpacePageEvacuationThreshold()2901   static intptr_t NewSpacePageEvacuationThreshold() {
2902     if (FLAG_page_promotion)
2903       return FLAG_page_promotion_threshold *
2904              MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
2905     return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize;
2906   }
2907 
Evacuator(Heap * heap,RecordMigratedSlotVisitor * record_visitor,EvacuationAllocator * local_allocator,bool always_promote_young)2908   Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor,
2909             EvacuationAllocator* local_allocator, bool always_promote_young)
2910       : heap_(heap),
2911         local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
2912         new_space_visitor_(heap_, local_allocator, record_visitor,
2913                            &local_pretenuring_feedback_, always_promote_young),
2914         new_to_new_page_visitor_(heap_, record_visitor,
2915                                  &local_pretenuring_feedback_),
2916         new_to_old_page_visitor_(heap_, record_visitor,
2917                                  &local_pretenuring_feedback_),
2918 
2919         old_space_visitor_(heap_, local_allocator, record_visitor),
2920         local_allocator_(local_allocator),
2921         duration_(0.0),
2922         bytes_compacted_(0) {}
2923 
2924   virtual ~Evacuator() = default;
2925 
2926   void EvacuatePage(MemoryChunk* chunk);
2927 
AddObserver(MigrationObserver * observer)2928   void AddObserver(MigrationObserver* observer) {
2929     new_space_visitor_.AddObserver(observer);
2930     old_space_visitor_.AddObserver(observer);
2931   }
2932 
2933   // Merge back locally cached info sequentially. Note that this method needs
2934   // to be called from the main thread.
2935   virtual void Finalize();
2936 
2937   virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
2938   virtual GCTracer::Scope::ScopeId GetTracingScope() = 0;
2939 
2940  protected:
2941   static const int kInitialLocalPretenuringFeedbackCapacity = 256;
2942 
2943   // |saved_live_bytes| returns the live bytes of the page that was processed.
2944   virtual void RawEvacuatePage(MemoryChunk* chunk,
2945                                intptr_t* saved_live_bytes) = 0;
2946 
heap()2947   inline Heap* heap() { return heap_; }
2948 
ReportCompactionProgress(double duration,intptr_t bytes_compacted)2949   void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
2950     duration_ += duration;
2951     bytes_compacted_ += bytes_compacted;
2952   }
2953 
2954   Heap* heap_;
2955 
2956   Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
2957 
2958   // Visitors for the corresponding spaces.
2959   EvacuateNewSpaceVisitor new_space_visitor_;
2960   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
2961       new_to_new_page_visitor_;
2962   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
2963       new_to_old_page_visitor_;
2964   EvacuateOldSpaceVisitor old_space_visitor_;
2965 
2966   // Locally cached collector data.
2967   EvacuationAllocator* local_allocator_;
2968 
2969   // Book keeping info.
2970   double duration_;
2971   intptr_t bytes_compacted_;
2972 };
2973 
EvacuatePage(MemoryChunk * chunk)2974 void Evacuator::EvacuatePage(MemoryChunk* chunk) {
2975   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
2976   DCHECK(chunk->SweepingDone());
2977   intptr_t saved_live_bytes = 0;
2978   double evacuation_time = 0.0;
2979   {
2980     AlwaysAllocateScope always_allocate(heap());
2981     TimedScope timed_scope(&evacuation_time);
2982     RawEvacuatePage(chunk, &saved_live_bytes);
2983   }
2984   ReportCompactionProgress(evacuation_time, saved_live_bytes);
2985   if (FLAG_trace_evacuation) {
2986     PrintIsolate(heap()->isolate(),
2987                  "evacuation[%p]: page=%p new_space=%d "
2988                  "page_evacuation=%d executable=%d contains_age_mark=%d "
2989                  "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
2990                  static_cast<void*>(this), static_cast<void*>(chunk),
2991                  chunk->InNewSpace(),
2992                  chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
2993                      chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
2994                  chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
2995                  chunk->Contains(heap()->new_space()->age_mark()),
2996                  saved_live_bytes, evacuation_time,
2997                  chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2998   }
2999 }
3000 
Finalize()3001 void Evacuator::Finalize() {
3002   local_allocator_->Finalize();
3003   heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3004   heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3005                                        new_to_old_page_visitor_.moved_bytes());
3006   heap()->IncrementSemiSpaceCopiedObjectSize(
3007       new_space_visitor_.semispace_copied_size() +
3008       new_to_new_page_visitor_.moved_bytes());
3009   heap()->IncrementYoungSurvivorsCounter(
3010       new_space_visitor_.promoted_size() +
3011       new_space_visitor_.semispace_copied_size() +
3012       new_to_old_page_visitor_.moved_bytes() +
3013       new_to_new_page_visitor_.moved_bytes());
3014   heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3015 }
3016 
3017 class FullEvacuator : public Evacuator {
3018  public:
FullEvacuator(MarkCompactCollector * collector)3019   explicit FullEvacuator(MarkCompactCollector* collector)
3020       : Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
3021                   FLAG_always_promote_young_mc),
3022         record_visitor_(collector, &ephemeron_remembered_set_),
3023         local_allocator_(heap_, LocalSpaceKind::kCompactionSpaceForMarkCompact),
3024         collector_(collector) {}
3025 
GetBackgroundTracingScope()3026   GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
3027     return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
3028   }
3029 
GetTracingScope()3030   GCTracer::Scope::ScopeId GetTracingScope() override {
3031     return GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL;
3032   }
3033 
Finalize()3034   void Finalize() override {
3035     Evacuator::Finalize();
3036 
3037     for (auto it = ephemeron_remembered_set_.begin();
3038          it != ephemeron_remembered_set_.end(); ++it) {
3039       auto insert_result =
3040           heap()->ephemeron_remembered_set_.insert({it->first, it->second});
3041       if (!insert_result.second) {
3042         // Insertion didn't happen, there was already an item.
3043         auto set = insert_result.first->second;
3044         for (int entry : it->second) {
3045           set.insert(entry);
3046         }
3047       }
3048     }
3049   }
3050 
3051  protected:
3052   void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
3053   EphemeronRememberedSet ephemeron_remembered_set_;
3054   RecordMigratedSlotVisitor record_visitor_;
3055   EvacuationAllocator local_allocator_;
3056 
3057   MarkCompactCollector* collector_;
3058 };
3059 
RawEvacuatePage(MemoryChunk * chunk,intptr_t * live_bytes)3060 void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
3061   const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
3062   MarkCompactCollector::NonAtomicMarkingState* marking_state =
3063       collector_->non_atomic_marking_state();
3064   *live_bytes = marking_state->live_bytes(chunk);
3065   TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3066                "FullEvacuator::RawEvacuatePage", "evacuation_mode",
3067                EvacuationModeName(evacuation_mode), "live_bytes", *live_bytes);
3068   HeapObject failed_object;
3069   switch (evacuation_mode) {
3070     case kObjectsNewToOld:
3071       LiveObjectVisitor::VisitBlackObjectsNoFail(
3072           chunk, marking_state, &new_space_visitor_,
3073           LiveObjectVisitor::kClearMarkbits);
3074       break;
3075     case kPageNewToOld:
3076       LiveObjectVisitor::VisitBlackObjectsNoFail(
3077           chunk, marking_state, &new_to_old_page_visitor_,
3078           LiveObjectVisitor::kKeepMarking);
3079       new_to_old_page_visitor_.account_moved_bytes(
3080           marking_state->live_bytes(chunk));
3081       break;
3082     case kPageNewToNew:
3083       LiveObjectVisitor::VisitBlackObjectsNoFail(
3084           chunk, marking_state, &new_to_new_page_visitor_,
3085           LiveObjectVisitor::kKeepMarking);
3086       new_to_new_page_visitor_.account_moved_bytes(
3087           marking_state->live_bytes(chunk));
3088       break;
3089     case kObjectsOldToOld: {
3090       const bool success = LiveObjectVisitor::VisitBlackObjects(
3091           chunk, marking_state, &old_space_visitor_,
3092           LiveObjectVisitor::kClearMarkbits, &failed_object);
3093       if (!success) {
3094         // Aborted compaction page. Actual processing happens on the main
3095         // thread for simplicity reasons.
3096         collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
3097       }
3098       break;
3099     }
3100   }
3101 }
3102 
3103 class PageEvacuationJob : public v8::JobTask {
3104  public:
PageEvacuationJob(Isolate * isolate,std::vector<std::unique_ptr<Evacuator>> * evacuators,std::vector<std::pair<ParallelWorkItem,MemoryChunk * >> evacuation_items)3105   PageEvacuationJob(
3106       Isolate* isolate, std::vector<std::unique_ptr<Evacuator>>* evacuators,
3107       std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items)
3108       : evacuators_(evacuators),
3109         evacuation_items_(std::move(evacuation_items)),
3110         remaining_evacuation_items_(evacuation_items_.size()),
3111         generator_(evacuation_items_.size()),
3112         tracer_(isolate->heap()->tracer()) {}
3113 
Run(JobDelegate * delegate)3114   void Run(JobDelegate* delegate) override {
3115     Evacuator* evacuator = (*evacuators_)[delegate->GetTaskId()].get();
3116     if (delegate->IsJoiningThread()) {
3117       TRACE_GC(tracer_, evacuator->GetTracingScope());
3118       ProcessItems(delegate, evacuator);
3119     } else {
3120       TRACE_BACKGROUND_GC(tracer_, evacuator->GetBackgroundTracingScope());
3121       ProcessItems(delegate, evacuator);
3122     }
3123   }
3124 
ProcessItems(JobDelegate * delegate,Evacuator * evacuator)3125   void ProcessItems(JobDelegate* delegate, Evacuator* evacuator) {
3126     while (remaining_evacuation_items_.load(std::memory_order_relaxed) > 0) {
3127       base::Optional<size_t> index = generator_.GetNext();
3128       if (!index) return;
3129       for (size_t i = *index; i < evacuation_items_.size(); ++i) {
3130         auto& work_item = evacuation_items_[i];
3131         if (!work_item.first.TryAcquire()) break;
3132         evacuator->EvacuatePage(work_item.second);
3133         if (remaining_evacuation_items_.fetch_sub(
3134                 1, std::memory_order_relaxed) <= 1) {
3135           return;
3136         }
3137       }
3138     }
3139   }
3140 
GetMaxConcurrency(size_t worker_count) const3141   size_t GetMaxConcurrency(size_t worker_count) const override {
3142     const size_t kItemsPerWorker = MB / Page::kPageSize;
3143     // Ceiling division to ensure enough workers for all
3144     // |remaining_evacuation_items_|
3145     const size_t wanted_num_workers =
3146         (remaining_evacuation_items_.load(std::memory_order_relaxed) +
3147          kItemsPerWorker - 1) /
3148         kItemsPerWorker;
3149     return std::min<size_t>(wanted_num_workers, evacuators_->size());
3150   }
3151 
3152  private:
3153   std::vector<std::unique_ptr<Evacuator>>* evacuators_;
3154   std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items_;
3155   std::atomic<size_t> remaining_evacuation_items_{0};
3156   IndexGenerator generator_;
3157 
3158   GCTracer* tracer_;
3159 };
3160 
3161 template <class Evacuator, class Collector>
CreateAndExecuteEvacuationTasks(Collector * collector,std::vector<std::pair<ParallelWorkItem,MemoryChunk * >> evacuation_items,MigrationObserver * migration_observer,const intptr_t live_bytes)3162 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
3163     Collector* collector,
3164     std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
3165     MigrationObserver* migration_observer, const intptr_t live_bytes) {
3166   // Used for trace summary.
3167   double compaction_speed = 0;
3168   if (FLAG_trace_evacuation) {
3169     compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3170   }
3171 
3172   const bool profiling = isolate()->LogObjectRelocation();
3173   ProfilingMigrationObserver profiling_observer(heap());
3174 
3175   const size_t pages_count = evacuation_items.size();
3176   std::vector<std::unique_ptr<v8::internal::Evacuator>> evacuators;
3177   const int wanted_num_tasks = NumberOfParallelCompactionTasks();
3178   for (int i = 0; i < wanted_num_tasks; i++) {
3179     auto evacuator = std::make_unique<Evacuator>(collector);
3180     if (profiling) evacuator->AddObserver(&profiling_observer);
3181     if (migration_observer != nullptr)
3182       evacuator->AddObserver(migration_observer);
3183     evacuators.push_back(std::move(evacuator));
3184   }
3185   V8::GetCurrentPlatform()
3186       ->PostJob(v8::TaskPriority::kUserBlocking,
3187                 std::make_unique<PageEvacuationJob>(
3188                     isolate(), &evacuators, std::move(evacuation_items)))
3189       ->Join();
3190 
3191   for (auto& evacuator : evacuators) evacuator->Finalize();
3192   evacuators.clear();
3193 
3194   if (FLAG_trace_evacuation) {
3195     PrintIsolate(isolate(),
3196                  "%8.0f ms: evacuation-summary: parallel=%s pages=%zu "
3197                  "wanted_tasks=%d cores=%d live_bytes=%" V8PRIdPTR
3198                  " compaction_speed=%.f\n",
3199                  isolate()->time_millis_since_init(),
3200                  FLAG_parallel_compaction ? "yes" : "no", pages_count,
3201                  wanted_num_tasks,
3202                  V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
3203                  live_bytes, compaction_speed);
3204   }
3205 }
3206 
ShouldMovePage(Page * p,intptr_t live_bytes,bool always_promote_young)3207 bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes,
3208                                               bool always_promote_young) {
3209   const bool reduce_memory = heap()->ShouldReduceMemory();
3210   const Address age_mark = heap()->new_space()->age_mark();
3211   return !reduce_memory && !p->NeverEvacuate() &&
3212          (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
3213          (always_promote_young || !p->Contains(age_mark)) &&
3214          heap()->CanExpandOldGeneration(live_bytes);
3215 }
3216 
EvacuatePagesInParallel()3217 void MarkCompactCollector::EvacuatePagesInParallel() {
3218   std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
3219   intptr_t live_bytes = 0;
3220 
3221   // Evacuation of new space pages cannot be aborted, so it needs to run
3222   // before old space evacuation.
3223   for (Page* page : new_space_evacuation_pages_) {
3224     intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
3225     if (live_bytes_on_page == 0) continue;
3226     live_bytes += live_bytes_on_page;
3227     if (ShouldMovePage(page, live_bytes_on_page,
3228                        FLAG_always_promote_young_mc)) {
3229       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) ||
3230           FLAG_always_promote_young_mc) {
3231         EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3232         DCHECK_EQ(heap()->old_space(), page->owner());
3233         // The move added page->allocated_bytes to the old space, but we are
3234         // going to sweep the page and add page->live_byte_count.
3235         heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
3236                                                     page);
3237       } else {
3238         EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
3239       }
3240     }
3241     evacuation_items.emplace_back(ParallelWorkItem{}, page);
3242   }
3243 
3244   for (Page* page : old_space_evacuation_pages_) {
3245     live_bytes += non_atomic_marking_state()->live_bytes(page);
3246     evacuation_items.emplace_back(ParallelWorkItem{}, page);
3247   }
3248 
3249   // Promote young generation large objects.
3250   IncrementalMarking::NonAtomicMarkingState* marking_state =
3251       heap()->incremental_marking()->non_atomic_marking_state();
3252 
3253   for (auto it = heap()->new_lo_space()->begin();
3254        it != heap()->new_lo_space()->end();) {
3255     LargePage* current = *it;
3256     it++;
3257     HeapObject object = current->GetObject();
3258     DCHECK(!marking_state->IsGrey(object));
3259     if (marking_state->IsBlack(object)) {
3260       heap_->lo_space()->PromoteNewLargeObject(current);
3261       current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
3262       evacuation_items.emplace_back(ParallelWorkItem{}, current);
3263     }
3264   }
3265 
3266   if (evacuation_items.empty()) return;
3267 
3268   TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3269                "MarkCompactCollector::EvacuatePagesInParallel", "pages",
3270                evacuation_items.size());
3271 
3272   CreateAndExecuteEvacuationTasks<FullEvacuator>(
3273       this, std::move(evacuation_items), nullptr, live_bytes);
3274 
3275   // After evacuation there might still be swept pages that weren't
3276   // added to one of the compaction space but still reside in the
3277   // sweeper's swept_list_. Merge remembered sets for those pages as
3278   // well such that after mark-compact all pages either store slots
3279   // in the sweeping or old-to-new remembered set.
3280   sweeper()->MergeOldToNewRememberedSetsForSweptPages();
3281 
3282   PostProcessEvacuationCandidates();
3283 }
3284 
3285 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3286  public:
RetainAs(Object object)3287   Object RetainAs(Object object) override {
3288     if (object.IsHeapObject()) {
3289       HeapObject heap_object = HeapObject::cast(object);
3290       MapWord map_word = heap_object.map_word();
3291       if (map_word.IsForwardingAddress()) {
3292         return map_word.ToForwardingAddress();
3293       }
3294     }
3295     return object;
3296   }
3297 };
3298 
RecordLiveSlotsOnPage(Page * page)3299 void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
3300   EvacuateRecordOnlyVisitor visitor(heap());
3301   LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
3302                                              &visitor,
3303                                              LiveObjectVisitor::kKeepMarking);
3304 }
3305 
3306 template <class Visitor, typename MarkingState>
VisitBlackObjects(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode,HeapObject * failed_object)3307 bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
3308                                           MarkingState* marking_state,
3309                                           Visitor* visitor,
3310                                           IterationMode iteration_mode,
3311                                           HeapObject* failed_object) {
3312   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3313                "LiveObjectVisitor::VisitBlackObjects");
3314   for (auto object_and_size :
3315        LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
3316     HeapObject const object = object_and_size.first;
3317     if (!visitor->Visit(object, object_and_size.second)) {
3318       if (iteration_mode == kClearMarkbits) {
3319         marking_state->bitmap(chunk)->ClearRange(
3320             chunk->AddressToMarkbitIndex(chunk->area_start()),
3321             chunk->AddressToMarkbitIndex(object.address()));
3322         *failed_object = object;
3323       }
3324       return false;
3325     }
3326   }
3327   if (iteration_mode == kClearMarkbits) {
3328     marking_state->ClearLiveness(chunk);
3329   }
3330   return true;
3331 }
3332 
3333 template <class Visitor, typename MarkingState>
VisitBlackObjectsNoFail(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode)3334 void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
3335                                                 MarkingState* marking_state,
3336                                                 Visitor* visitor,
3337                                                 IterationMode iteration_mode) {
3338   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3339                "LiveObjectVisitor::VisitBlackObjectsNoFail");
3340   if (chunk->IsLargePage()) {
3341     HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
3342     if (marking_state->IsBlack(object)) {
3343       const bool success = visitor->Visit(object, object.Size());
3344       USE(success);
3345       DCHECK(success);
3346     }
3347   } else {
3348     for (auto object_and_size :
3349          LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
3350       HeapObject const object = object_and_size.first;
3351       DCHECK(marking_state->IsBlack(object));
3352       const bool success = visitor->Visit(object, object_and_size.second);
3353       USE(success);
3354       DCHECK(success);
3355     }
3356   }
3357   if (iteration_mode == kClearMarkbits) {
3358     marking_state->ClearLiveness(chunk);
3359   }
3360 }
3361 
3362 template <class Visitor, typename MarkingState>
VisitGreyObjectsNoFail(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode)3363 void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
3364                                                MarkingState* marking_state,
3365                                                Visitor* visitor,
3366                                                IterationMode iteration_mode) {
3367   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3368                "LiveObjectVisitor::VisitGreyObjectsNoFail");
3369   if (chunk->IsLargePage()) {
3370     HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
3371     if (marking_state->IsGrey(object)) {
3372       const bool success = visitor->Visit(object, object.Size());
3373       USE(success);
3374       DCHECK(success);
3375     }
3376   } else {
3377     for (auto object_and_size :
3378          LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
3379       HeapObject const object = object_and_size.first;
3380       DCHECK(marking_state->IsGrey(object));
3381       const bool success = visitor->Visit(object, object_and_size.second);
3382       USE(success);
3383       DCHECK(success);
3384     }
3385   }
3386   if (iteration_mode == kClearMarkbits) {
3387     marking_state->ClearLiveness(chunk);
3388   }
3389 }
3390 
3391 template <typename MarkingState>
RecomputeLiveBytes(MemoryChunk * chunk,MarkingState * marking_state)3392 void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
3393                                            MarkingState* marking_state) {
3394   int new_live_size = 0;
3395   for (auto object_and_size :
3396        LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
3397     new_live_size += object_and_size.second;
3398   }
3399   marking_state->SetLiveBytes(chunk, new_live_size);
3400 }
3401 
Evacuate()3402 void MarkCompactCollector::Evacuate() {
3403   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3404   base::MutexGuard guard(heap()->relocation_mutex());
3405 
3406   {
3407     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
3408     EvacuatePrologue();
3409   }
3410 
3411   {
3412     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
3413     EvacuationScope evacuation_scope(this);
3414     EvacuatePagesInParallel();
3415   }
3416 
3417   UpdatePointersAfterEvacuation();
3418 
3419   {
3420     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
3421     if (!heap()->new_space()->Rebalance()) {
3422       heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
3423     }
3424   }
3425 
3426   // Give pages that are queued to be freed back to the OS. Note that filtering
3427   // slots only handles old space (for unboxed doubles), and thus map space can
3428   // still contain stale pointers. We only free the chunks after pointer updates
3429   // to still have access to page headers.
3430   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3431 
3432   {
3433     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3434 
3435     for (Page* p : new_space_evacuation_pages_) {
3436       if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3437         p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3438         sweeper()->AddPageForIterability(p);
3439       } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3440         p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3441         DCHECK_EQ(OLD_SPACE, p->owner_identity());
3442         sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
3443       }
3444     }
3445     new_space_evacuation_pages_.clear();
3446 
3447     for (Page* p : old_space_evacuation_pages_) {
3448       if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3449         sweeper()->AddPage(p->owner_identity(), p, Sweeper::REGULAR);
3450         p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3451       }
3452     }
3453   }
3454 
3455   {
3456     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
3457     EvacuateEpilogue();
3458   }
3459 
3460 #ifdef VERIFY_HEAP
3461   if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) {
3462     FullEvacuationVerifier verifier(heap());
3463     verifier.Run();
3464   }
3465 #endif
3466 }
3467 
3468 class UpdatingItem : public ParallelWorkItem {
3469  public:
3470   virtual ~UpdatingItem() = default;
3471   virtual void Process() = 0;
3472 };
3473 
3474 class PointersUpdatingJob : public v8::JobTask {
3475  public:
PointersUpdatingJob(Isolate * isolate,std::vector<std::unique_ptr<UpdatingItem>> updating_items,int slots,GCTracer::Scope::ScopeId scope,GCTracer::BackgroundScope::ScopeId background_scope)3476   explicit PointersUpdatingJob(
3477       Isolate* isolate,
3478       std::vector<std::unique_ptr<UpdatingItem>> updating_items, int slots,
3479       GCTracer::Scope::ScopeId scope,
3480       GCTracer::BackgroundScope::ScopeId background_scope)
3481       : updating_items_(std::move(updating_items)),
3482         remaining_updating_items_(updating_items_.size()),
3483         generator_(updating_items_.size()),
3484         slots_(slots),
3485         tracer_(isolate->heap()->tracer()),
3486         scope_(scope),
3487         background_scope_(background_scope) {}
3488 
Run(JobDelegate * delegate)3489   void Run(JobDelegate* delegate) override {
3490     if (delegate->IsJoiningThread()) {
3491       TRACE_GC(tracer_, scope_);
3492       UpdatePointers(delegate);
3493     } else {
3494       TRACE_BACKGROUND_GC(tracer_, background_scope_);
3495       UpdatePointers(delegate);
3496     }
3497   }
3498 
UpdatePointers(JobDelegate * delegate)3499   void UpdatePointers(JobDelegate* delegate) {
3500     while (remaining_updating_items_.load(std::memory_order_relaxed) > 0) {
3501       base::Optional<size_t> index = generator_.GetNext();
3502       if (!index) return;
3503       for (size_t i = *index; i < updating_items_.size(); ++i) {
3504         auto& work_item = updating_items_[i];
3505         if (!work_item->TryAcquire()) break;
3506         work_item->Process();
3507         if (remaining_updating_items_.fetch_sub(1, std::memory_order_relaxed) <=
3508             1) {
3509           return;
3510         }
3511       }
3512     }
3513   }
3514 
GetMaxConcurrency(size_t worker_count) const3515   size_t GetMaxConcurrency(size_t worker_count) const override {
3516     size_t items = remaining_updating_items_.load(std::memory_order_relaxed);
3517     if (!FLAG_parallel_pointer_update) return items > 0;
3518     const size_t kMaxPointerUpdateTasks = 8;
3519     const size_t kSlotsPerTask = 600;
3520     size_t wanted_tasks = items;
3521     // Limit the number of update tasks as task creation often dominates the
3522     // actual work that is being done.
3523     if (slots_ >= 0) {
3524       // Round up to ensure enough workers for all items.
3525       wanted_tasks =
3526           std::min<size_t>(items, (slots_ + kSlotsPerTask - 1) / kSlotsPerTask);
3527     }
3528     return std::min<size_t>(kMaxPointerUpdateTasks, wanted_tasks);
3529   }
3530 
3531  private:
3532   std::vector<std::unique_ptr<UpdatingItem>> updating_items_;
3533   std::atomic<size_t> remaining_updating_items_{0};
3534   IndexGenerator generator_;
3535   const int slots_;
3536 
3537   GCTracer* tracer_;
3538   GCTracer::Scope::ScopeId scope_;
3539   GCTracer::BackgroundScope::ScopeId background_scope_;
3540 };
3541 
3542 template <typename MarkingState>
3543 class ToSpaceUpdatingItem : public UpdatingItem {
3544  public:
ToSpaceUpdatingItem(MemoryChunk * chunk,Address start,Address end,MarkingState * marking_state)3545   explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
3546                                MarkingState* marking_state)
3547       : chunk_(chunk),
3548         start_(start),
3549         end_(end),
3550         marking_state_(marking_state) {}
3551   ~ToSpaceUpdatingItem() override = default;
3552 
Process()3553   void Process() override {
3554     if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3555       // New->new promoted pages contain garbage so they require iteration using
3556       // markbits.
3557       ProcessVisitLive();
3558     } else {
3559       ProcessVisitAll();
3560     }
3561   }
3562 
3563  private:
ProcessVisitAll()3564   void ProcessVisitAll() {
3565     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3566                  "ToSpaceUpdatingItem::ProcessVisitAll");
3567     PointersUpdatingVisitor visitor(
3568         GetIsolateForPtrComprFromOnHeapAddress(start_));
3569     for (Address cur = start_; cur < end_;) {
3570       HeapObject object = HeapObject::FromAddress(cur);
3571       Map map = object.map();
3572       int size = object.SizeFromMap(map);
3573       object.IterateBodyFast(map, size, &visitor);
3574       cur += size;
3575     }
3576   }
3577 
ProcessVisitLive()3578   void ProcessVisitLive() {
3579     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3580                  "ToSpaceUpdatingItem::ProcessVisitLive");
3581     // For young generation evacuations we want to visit grey objects, for
3582     // full MC, we need to visit black objects.
3583     PointersUpdatingVisitor visitor(
3584         GetIsolateForPtrComprFromOnHeapAddress(start_));
3585     for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
3586              chunk_, marking_state_->bitmap(chunk_))) {
3587       object_and_size.first.IterateBodyFast(&visitor);
3588     }
3589   }
3590 
3591   MemoryChunk* chunk_;
3592   Address start_;
3593   Address end_;
3594   MarkingState* marking_state_;
3595 };
3596 
3597 template <typename MarkingState, GarbageCollector collector>
3598 class RememberedSetUpdatingItem : public UpdatingItem {
3599  public:
RememberedSetUpdatingItem(Heap * heap,MarkingState * marking_state,MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)3600   explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
3601                                      MemoryChunk* chunk,
3602                                      RememberedSetUpdatingMode updating_mode)
3603       : heap_(heap),
3604         marking_state_(marking_state),
3605         chunk_(chunk),
3606         updating_mode_(updating_mode) {}
3607   ~RememberedSetUpdatingItem() override = default;
3608 
Process()3609   void Process() override {
3610     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3611                  "RememberedSetUpdatingItem::Process");
3612     base::MutexGuard guard(chunk_->mutex());
3613     CodePageMemoryModificationScope memory_modification_scope(chunk_);
3614     UpdateUntypedPointers();
3615     UpdateTypedPointers();
3616   }
3617 
3618  private:
3619   template <typename TSlot>
CheckAndUpdateOldToNewSlot(TSlot slot)3620   inline SlotCallbackResult CheckAndUpdateOldToNewSlot(TSlot slot) {
3621     static_assert(
3622         std::is_same<TSlot, FullMaybeObjectSlot>::value ||
3623             std::is_same<TSlot, MaybeObjectSlot>::value,
3624         "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
3625     using THeapObjectSlot = typename TSlot::THeapObjectSlot;
3626     HeapObject heap_object;
3627     if (!(*slot).GetHeapObject(&heap_object)) {
3628       return REMOVE_SLOT;
3629     }
3630     if (Heap::InFromPage(heap_object)) {
3631       MapWord map_word = heap_object.map_word();
3632       if (map_word.IsForwardingAddress()) {
3633         HeapObjectReference::Update(THeapObjectSlot(slot),
3634                                     map_word.ToForwardingAddress());
3635       }
3636       bool success = (*slot).GetHeapObject(&heap_object);
3637       USE(success);
3638       DCHECK(success);
3639       // If the object was in from space before and is after executing the
3640       // callback in to space, the object is still live.
3641       // Unfortunately, we do not know about the slot. It could be in a
3642       // just freed free space object.
3643       if (Heap::InToPage(heap_object)) {
3644         return KEEP_SLOT;
3645       }
3646     } else if (Heap::InToPage(heap_object)) {
3647       // Slots can point to "to" space if the page has been moved, or if the
3648       // slot has been recorded multiple times in the remembered set, or
3649       // if the slot was already updated during old->old updating.
3650       // In case the page has been moved, check markbits to determine liveness
3651       // of the slot. In the other case, the slot can just be kept.
3652       if (Page::FromHeapObject(heap_object)
3653               ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3654         // IsBlackOrGrey is required because objects are marked as grey for
3655         // the young generation collector while they are black for the full
3656         // MC.);
3657         if (marking_state_->IsBlackOrGrey(heap_object)) {
3658           return KEEP_SLOT;
3659         } else {
3660           return REMOVE_SLOT;
3661         }
3662       }
3663       return KEEP_SLOT;
3664     } else {
3665       DCHECK(!Heap::InYoungGeneration(heap_object));
3666     }
3667     return REMOVE_SLOT;
3668   }
3669 
UpdateUntypedPointers()3670   void UpdateUntypedPointers() {
3671     if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
3672       DCHECK_IMPLIES(
3673           collector == MARK_COMPACTOR,
3674           chunk_->SweepingDone() &&
3675               chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>() == nullptr);
3676 
3677       InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
3678       int slots = RememberedSet<OLD_TO_NEW>::Iterate(
3679           chunk_,
3680           [this, &filter](MaybeObjectSlot slot) {
3681             if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
3682             return CheckAndUpdateOldToNewSlot(slot);
3683           },
3684           SlotSet::FREE_EMPTY_BUCKETS);
3685 
3686       DCHECK_IMPLIES(
3687           collector == MARK_COMPACTOR && FLAG_always_promote_young_mc,
3688           slots == 0);
3689 
3690       if (slots == 0) {
3691         chunk_->ReleaseSlotSet<OLD_TO_NEW>();
3692       }
3693     }
3694 
3695     if (chunk_->sweeping_slot_set<AccessMode::NON_ATOMIC>()) {
3696       DCHECK_IMPLIES(
3697           collector == MARK_COMPACTOR,
3698           !chunk_->SweepingDone() &&
3699               (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>()) ==
3700                   nullptr);
3701       DCHECK(!chunk_->IsLargePage());
3702 
3703       InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
3704       int slots = RememberedSetSweeping::Iterate(
3705           chunk_,
3706           [this, &filter](MaybeObjectSlot slot) {
3707             if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
3708             return CheckAndUpdateOldToNewSlot(slot);
3709           },
3710           SlotSet::FREE_EMPTY_BUCKETS);
3711 
3712       DCHECK_IMPLIES(
3713           collector == MARK_COMPACTOR && FLAG_always_promote_young_mc,
3714           slots == 0);
3715 
3716       if (slots == 0) {
3717         chunk_->ReleaseSweepingSlotSet();
3718       }
3719     }
3720 
3721     if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
3722       // The invalidated slots are not needed after old-to-new slots were
3723       // processed.
3724       chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
3725     }
3726 
3727     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3728         (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
3729       InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(chunk_);
3730       IsolateRoot isolate = heap_->isolate();
3731       RememberedSet<OLD_TO_OLD>::Iterate(
3732           chunk_,
3733           [&filter, isolate](MaybeObjectSlot slot) {
3734             if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
3735             return UpdateSlot<AccessMode::NON_ATOMIC>(isolate, slot);
3736           },
3737           SlotSet::FREE_EMPTY_BUCKETS);
3738       chunk_->ReleaseSlotSet<OLD_TO_OLD>();
3739     }
3740     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3741         chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
3742       // The invalidated slots are not needed after old-to-old slots were
3743       // processsed.
3744       chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
3745     }
3746   }
3747 
UpdateTypedPointers()3748   void UpdateTypedPointers() {
3749     if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
3750         nullptr) {
3751       CHECK_NE(chunk_->owner(), heap_->map_space());
3752       const auto check_and_update_old_to_new_slot_fn =
3753           [this](FullMaybeObjectSlot slot) {
3754             return CheckAndUpdateOldToNewSlot(slot);
3755           };
3756       RememberedSet<OLD_TO_NEW>::IterateTyped(
3757           chunk_, [=](SlotType slot_type, Address slot) {
3758             return UpdateTypedSlotHelper::UpdateTypedSlot(
3759                 heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
3760           });
3761     }
3762     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3763         (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
3764          nullptr)) {
3765       CHECK_NE(chunk_->owner(), heap_->map_space());
3766       RememberedSet<OLD_TO_OLD>::IterateTyped(chunk_, [=](SlotType slot_type,
3767                                                           Address slot) {
3768         // Using UpdateStrongSlot is OK here, because there are no weak
3769         // typed slots.
3770         IsolateRoot isolate = heap_->isolate();
3771         return UpdateTypedSlotHelper::UpdateTypedSlot(
3772             heap_, slot_type, slot, [isolate](FullMaybeObjectSlot slot) {
3773               return UpdateStrongSlot<AccessMode::NON_ATOMIC>(isolate, slot);
3774             });
3775       });
3776     }
3777   }
3778 
3779   Heap* heap_;
3780   MarkingState* marking_state_;
3781   MemoryChunk* chunk_;
3782   RememberedSetUpdatingMode updating_mode_;
3783 };
3784 
CreateToSpaceUpdatingItem(MemoryChunk * chunk,Address start,Address end)3785 std::unique_ptr<UpdatingItem> MarkCompactCollector::CreateToSpaceUpdatingItem(
3786     MemoryChunk* chunk, Address start, Address end) {
3787   return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
3788       chunk, start, end, non_atomic_marking_state());
3789 }
3790 
3791 std::unique_ptr<UpdatingItem>
CreateRememberedSetUpdatingItem(MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)3792 MarkCompactCollector::CreateRememberedSetUpdatingItem(
3793     MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
3794   return std::make_unique<
3795       RememberedSetUpdatingItem<NonAtomicMarkingState, MARK_COMPACTOR>>(
3796       heap(), non_atomic_marking_state(), chunk, updating_mode);
3797 }
3798 
CollectToSpaceUpdatingItems(std::vector<std::unique_ptr<UpdatingItem>> * items)3799 int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
3800     std::vector<std::unique_ptr<UpdatingItem>>* items) {
3801   // Seed to space pages.
3802   const Address space_start = heap()->new_space()->first_allocatable_address();
3803   const Address space_end = heap()->new_space()->top();
3804   int pages = 0;
3805   for (Page* page : PageRange(space_start, space_end)) {
3806     Address start =
3807         page->Contains(space_start) ? space_start : page->area_start();
3808     Address end = page->Contains(space_end) ? space_end : page->area_end();
3809     items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
3810     pages++;
3811   }
3812   return pages;
3813 }
3814 
3815 template <typename IterateableSpace>
CollectRememberedSetUpdatingItems(std::vector<std::unique_ptr<UpdatingItem>> * items,IterateableSpace * space,RememberedSetUpdatingMode mode)3816 int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
3817     std::vector<std::unique_ptr<UpdatingItem>>* items, IterateableSpace* space,
3818     RememberedSetUpdatingMode mode) {
3819   int pages = 0;
3820   for (MemoryChunk* chunk : *space) {
3821     const bool contains_old_to_old_slots =
3822         chunk->slot_set<OLD_TO_OLD>() != nullptr ||
3823         chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
3824     const bool contains_old_to_new_slots =
3825         chunk->slot_set<OLD_TO_NEW>() != nullptr ||
3826         chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
3827     const bool contains_old_to_new_sweeping_slots =
3828         chunk->sweeping_slot_set() != nullptr;
3829     const bool contains_old_to_old_invalidated_slots =
3830         chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
3831     const bool contains_old_to_new_invalidated_slots =
3832         chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
3833     if (!contains_old_to_new_slots && !contains_old_to_new_sweeping_slots &&
3834         !contains_old_to_old_slots && !contains_old_to_old_invalidated_slots &&
3835         !contains_old_to_new_invalidated_slots)
3836       continue;
3837     if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
3838         contains_old_to_new_sweeping_slots ||
3839         contains_old_to_old_invalidated_slots ||
3840         contains_old_to_new_invalidated_slots) {
3841       items->emplace_back(CreateRememberedSetUpdatingItem(chunk, mode));
3842       pages++;
3843     }
3844   }
3845   return pages;
3846 }
3847 
3848 class EphemeronTableUpdatingItem : public UpdatingItem {
3849  public:
3850   enum EvacuationState { kRegular, kAborted };
3851 
EphemeronTableUpdatingItem(Heap * heap)3852   explicit EphemeronTableUpdatingItem(Heap* heap) : heap_(heap) {}
3853   ~EphemeronTableUpdatingItem() override = default;
3854 
Process()3855   void Process() override {
3856     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3857                  "EphemeronTableUpdatingItem::Process");
3858 
3859     for (auto it = heap_->ephemeron_remembered_set_.begin();
3860          it != heap_->ephemeron_remembered_set_.end();) {
3861       EphemeronHashTable table = it->first;
3862       auto& indices = it->second;
3863       if (table.map_word().IsForwardingAddress()) {
3864         // The table has moved, and RecordMigratedSlotVisitor::VisitEphemeron
3865         // inserts entries for the moved table into ephemeron_remembered_set_.
3866         it = heap_->ephemeron_remembered_set_.erase(it);
3867         continue;
3868       }
3869       DCHECK(table.map().IsMap());
3870       DCHECK(table.Object::IsEphemeronHashTable());
3871       for (auto iti = indices.begin(); iti != indices.end();) {
3872         // EphemeronHashTable keys must be heap objects.
3873         HeapObjectSlot key_slot(table.RawFieldOfElementAt(
3874             EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
3875         HeapObject key = key_slot.ToHeapObject();
3876         MapWord map_word = key.map_word();
3877         if (map_word.IsForwardingAddress()) {
3878           key = map_word.ToForwardingAddress();
3879           key_slot.StoreHeapObject(key);
3880         }
3881         if (!heap_->InYoungGeneration(key)) {
3882           iti = indices.erase(iti);
3883         } else {
3884           ++iti;
3885         }
3886       }
3887       if (indices.size() == 0) {
3888         it = heap_->ephemeron_remembered_set_.erase(it);
3889       } else {
3890         ++it;
3891       }
3892     }
3893   }
3894 
3895  private:
3896   Heap* const heap_;
3897 };
3898 
UpdatePointersAfterEvacuation()3899 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3900   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3901 
3902   PointersUpdatingVisitor updating_visitor(isolate());
3903 
3904   {
3905     TRACE_GC(heap()->tracer(),
3906              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
3907     // The external string table is updated at the end.
3908     heap_->IterateRoots(&updating_visitor, base::EnumSet<SkipRoot>{
3909                                                SkipRoot::kExternalStringTable});
3910   }
3911 
3912   {
3913     TRACE_GC(heap()->tracer(),
3914              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
3915     std::vector<std::unique_ptr<UpdatingItem>> updating_items;
3916 
3917     CollectRememberedSetUpdatingItems(&updating_items, heap()->old_space(),
3918                                       RememberedSetUpdatingMode::ALL);
3919     CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
3920                                       RememberedSetUpdatingMode::ALL);
3921     CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
3922                                       RememberedSetUpdatingMode::ALL);
3923     CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
3924                                       RememberedSetUpdatingMode::ALL);
3925 
3926     CollectToSpaceUpdatingItems(&updating_items);
3927     updating_items.push_back(
3928         std::make_unique<EphemeronTableUpdatingItem>(heap()));
3929 
3930     V8::GetCurrentPlatform()
3931         ->PostJob(v8::TaskPriority::kUserBlocking,
3932                   std::make_unique<PointersUpdatingJob>(
3933                       isolate(), std::move(updating_items), old_to_new_slots_,
3934                       GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
3935                       GCTracer::BackgroundScope::
3936                           MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
3937         ->Join();
3938   }
3939 
3940   {
3941     // - Update pointers in map space in a separate phase to avoid data races
3942     //   with Map->LayoutDescriptor edge.
3943     // - Update array buffer trackers in the second phase to have access to
3944     //   byte length which is potentially a HeapNumber.
3945     TRACE_GC(heap()->tracer(),
3946              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
3947     std::vector<std::unique_ptr<UpdatingItem>> updating_items;
3948 
3949     CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
3950                                       RememberedSetUpdatingMode::ALL);
3951     if (!updating_items.empty()) {
3952       V8::GetCurrentPlatform()
3953           ->PostJob(v8::TaskPriority::kUserBlocking,
3954                     std::make_unique<PointersUpdatingJob>(
3955                         isolate(), std::move(updating_items), old_to_new_slots_,
3956                         GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
3957                         GCTracer::BackgroundScope::
3958                             MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
3959           ->Join();
3960     }
3961   }
3962 
3963   {
3964     TRACE_GC(heap()->tracer(),
3965              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3966     // Update pointers from external string table.
3967     heap_->UpdateReferencesInExternalStringTable(
3968         &UpdateReferenceInExternalStringTableEntry);
3969 
3970     EvacuationWeakObjectRetainer evacuation_object_retainer;
3971     heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3972   }
3973 }
3974 
ReportAbortedEvacuationCandidate(HeapObject failed_object,MemoryChunk * chunk)3975 void MarkCompactCollector::ReportAbortedEvacuationCandidate(
3976     HeapObject failed_object, MemoryChunk* chunk) {
3977   base::MutexGuard guard(&mutex_);
3978 
3979   aborted_evacuation_candidates_.push_back(
3980       std::make_pair(failed_object, static_cast<Page*>(chunk)));
3981 }
3982 
PostProcessEvacuationCandidates()3983 void MarkCompactCollector::PostProcessEvacuationCandidates() {
3984   for (auto object_and_page : aborted_evacuation_candidates_) {
3985     HeapObject failed_object = object_and_page.first;
3986     Page* page = object_and_page.second;
3987     page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3988     // Aborted compaction page. We have to record slots here, since we
3989     // might not have recorded them in first place.
3990 
3991     // Remove outdated slots.
3992     RememberedSetSweeping::RemoveRange(page, page->address(),
3993                                        failed_object.address(),
3994                                        SlotSet::FREE_EMPTY_BUCKETS);
3995     RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
3996                                            failed_object.address(),
3997                                            SlotSet::FREE_EMPTY_BUCKETS);
3998     RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
3999                                                 failed_object.address());
4000 
4001     // Remove invalidated slots.
4002     if (failed_object.address() > page->area_start()) {
4003       InvalidatedSlotsCleanup old_to_new_cleanup =
4004           InvalidatedSlotsCleanup::OldToNew(page);
4005       old_to_new_cleanup.Free(page->area_start(), failed_object.address());
4006     }
4007 
4008     // Recompute live bytes.
4009     LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
4010     // Re-record slots.
4011     EvacuateRecordOnlyVisitor record_visitor(heap());
4012     LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
4013                                                &record_visitor,
4014                                                LiveObjectVisitor::kKeepMarking);
4015     // Array buffers will be processed during pointer updating.
4016   }
4017   const int aborted_pages =
4018       static_cast<int>(aborted_evacuation_candidates_.size());
4019   int aborted_pages_verified = 0;
4020   for (Page* p : old_space_evacuation_pages_) {
4021     if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
4022       // After clearing the evacuation candidate flag the page is again in a
4023       // regular state.
4024       p->ClearEvacuationCandidate();
4025       aborted_pages_verified++;
4026     } else {
4027       DCHECK(p->IsEvacuationCandidate());
4028       DCHECK(p->SweepingDone());
4029       p->owner()->memory_chunk_list().Remove(p);
4030     }
4031   }
4032   DCHECK_EQ(aborted_pages_verified, aborted_pages);
4033   if (FLAG_trace_evacuation && (aborted_pages > 0)) {
4034     PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
4035                  isolate()->time_millis_since_init(), aborted_pages);
4036   }
4037 }
4038 
ReleaseEvacuationCandidates()4039 void MarkCompactCollector::ReleaseEvacuationCandidates() {
4040   for (Page* p : old_space_evacuation_pages_) {
4041     if (!p->IsEvacuationCandidate()) continue;
4042     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
4043     non_atomic_marking_state()->SetLiveBytes(p, 0);
4044     CHECK(p->SweepingDone());
4045     space->ReleasePage(p);
4046   }
4047   old_space_evacuation_pages_.clear();
4048   compacting_ = false;
4049 }
4050 
StartSweepSpace(PagedSpace * space)4051 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
4052   space->ClearAllocatorState();
4053 
4054   int will_be_swept = 0;
4055   bool unused_page_present = false;
4056 
4057   // Loop needs to support deletion if live bytes == 0 for a page.
4058   for (auto it = space->begin(); it != space->end();) {
4059     Page* p = *(it++);
4060     DCHECK(p->SweepingDone());
4061 
4062     if (p->IsEvacuationCandidate()) {
4063       // Will be processed in Evacuate.
4064       DCHECK(!evacuation_candidates_.empty());
4065       continue;
4066     }
4067 
4068     // One unused page is kept, all further are released before sweeping them.
4069     if (non_atomic_marking_state()->live_bytes(p) == 0) {
4070       if (unused_page_present) {
4071         if (FLAG_gc_verbose) {
4072           PrintIsolate(isolate(), "sweeping: released page: %p",
4073                        static_cast<void*>(p));
4074         }
4075         space->memory_chunk_list().Remove(p);
4076         space->ReleasePage(p);
4077         continue;
4078       }
4079       unused_page_present = true;
4080     }
4081 
4082     sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
4083     will_be_swept++;
4084   }
4085 
4086   if (FLAG_gc_verbose) {
4087     PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
4088                  space->name(), will_be_swept);
4089   }
4090 }
4091 
StartSweepSpaces()4092 void MarkCompactCollector::StartSweepSpaces() {
4093   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4094 #ifdef DEBUG
4095   state_ = SWEEP_SPACES;
4096 #endif
4097 
4098   {
4099     {
4100       GCTracer::Scope sweep_scope(heap()->tracer(),
4101                                   GCTracer::Scope::MC_SWEEP_OLD);
4102       StartSweepSpace(heap()->old_space());
4103     }
4104     {
4105       GCTracer::Scope sweep_scope(heap()->tracer(),
4106                                   GCTracer::Scope::MC_SWEEP_CODE);
4107       StartSweepSpace(heap()->code_space());
4108     }
4109     {
4110       GCTracer::Scope sweep_scope(heap()->tracer(),
4111                                   GCTracer::Scope::MC_SWEEP_MAP);
4112       StartSweepSpace(heap()->map_space());
4113     }
4114     sweeper()->StartSweeping();
4115   }
4116 }
4117 
4118 #ifdef ENABLE_MINOR_MC
4119 
4120 namespace {
4121 
4122 #ifdef VERIFY_HEAP
4123 
4124 class YoungGenerationMarkingVerifier : public MarkingVerifier {
4125  public:
YoungGenerationMarkingVerifier(Heap * heap)4126   explicit YoungGenerationMarkingVerifier(Heap* heap)
4127       : MarkingVerifier(heap),
4128         marking_state_(
4129             heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
4130 
bitmap(const MemoryChunk * chunk)4131   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
4132       const MemoryChunk* chunk) override {
4133     return marking_state_->bitmap(chunk);
4134   }
4135 
IsMarked(HeapObject object)4136   bool IsMarked(HeapObject object) override {
4137     return marking_state_->IsGrey(object);
4138   }
4139 
IsBlackOrGrey(HeapObject object)4140   bool IsBlackOrGrey(HeapObject object) override {
4141     return marking_state_->IsBlackOrGrey(object);
4142   }
4143 
Run()4144   void Run() override {
4145     VerifyRoots();
4146     VerifyMarking(heap_->new_space());
4147   }
4148 
4149  protected:
VerifyPointers(ObjectSlot start,ObjectSlot end)4150   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
4151     VerifyPointersImpl(start, end);
4152   }
4153 
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)4154   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
4155     VerifyPointersImpl(start, end);
4156   }
4157 
VisitCodeTarget(Code host,RelocInfo * rinfo)4158   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
4159     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
4160     VerifyHeapObjectImpl(target);
4161   }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4162   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
4163     VerifyHeapObjectImpl(rinfo->target_object());
4164   }
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)4165   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
4166     VerifyPointersImpl(start, end);
4167   }
4168 
4169  private:
VerifyHeapObjectImpl(HeapObject heap_object)4170   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
4171     CHECK_IMPLIES(Heap::InYoungGeneration(heap_object), IsMarked(heap_object));
4172   }
4173 
4174   template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)4175   V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
4176     for (TSlot slot = start; slot < end; ++slot) {
4177       typename TSlot::TObject object = *slot;
4178       HeapObject heap_object;
4179       // Minor MC treats weak references as strong.
4180       if (object.GetHeapObject(&heap_object)) {
4181         VerifyHeapObjectImpl(heap_object);
4182       }
4183     }
4184   }
4185 
4186   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4187 };
4188 
4189 class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
4190  public:
YoungGenerationEvacuationVerifier(Heap * heap)4191   explicit YoungGenerationEvacuationVerifier(Heap* heap)
4192       : EvacuationVerifier(heap) {}
4193 
Run()4194   void Run() override {
4195     VerifyRoots();
4196     VerifyEvacuation(heap_->new_space());
4197     VerifyEvacuation(heap_->old_space());
4198     VerifyEvacuation(heap_->code_space());
4199     VerifyEvacuation(heap_->map_space());
4200   }
4201 
4202  protected:
VerifyHeapObjectImpl(HeapObject heap_object)4203   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
4204     CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
4205                   Heap::InToPage(heap_object));
4206   }
4207 
4208   template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)4209   void VerifyPointersImpl(TSlot start, TSlot end) {
4210     for (TSlot current = start; current < end; ++current) {
4211       typename TSlot::TObject object = *current;
4212       HeapObject heap_object;
4213       if (object.GetHeapObject(&heap_object)) {
4214         VerifyHeapObjectImpl(heap_object);
4215       }
4216     }
4217   }
4218 
VerifyPointers(ObjectSlot start,ObjectSlot end)4219   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
4220     VerifyPointersImpl(start, end);
4221   }
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)4222   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
4223     VerifyPointersImpl(start, end);
4224   }
VisitCodeTarget(Code host,RelocInfo * rinfo)4225   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
4226     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
4227     VerifyHeapObjectImpl(target);
4228   }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4229   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
4230     VerifyHeapObjectImpl(rinfo->target_object());
4231   }
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)4232   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
4233     VerifyPointersImpl(start, end);
4234   }
4235 };
4236 
4237 #endif  // VERIFY_HEAP
4238 
IsUnmarkedObjectForYoungGeneration(Heap * heap,FullObjectSlot p)4239 bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
4240   DCHECK_IMPLIES(Heap::InYoungGeneration(*p), Heap::InToPage(*p));
4241   return Heap::InYoungGeneration(*p) && !heap->minor_mark_compact_collector()
4242                                              ->non_atomic_marking_state()
4243                                              ->IsGrey(HeapObject::cast(*p));
4244 }
4245 
4246 }  // namespace
4247 
4248 class YoungGenerationMarkingVisitor final
4249     : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
4250  public:
YoungGenerationMarkingVisitor(MinorMarkCompactCollector::MarkingState * marking_state,MinorMarkCompactCollector::MarkingWorklist * global_worklist,int task_id)4251   YoungGenerationMarkingVisitor(
4252       MinorMarkCompactCollector::MarkingState* marking_state,
4253       MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
4254       : worklist_(global_worklist, task_id), marking_state_(marking_state) {}
4255 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)4256   V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
4257                                ObjectSlot end) final {
4258     VisitPointersImpl(host, start, end);
4259   }
4260 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)4261   V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
4262                                MaybeObjectSlot end) final {
4263     VisitPointersImpl(host, start, end);
4264   }
4265 
VisitPointer(HeapObject host,ObjectSlot slot)4266   V8_INLINE void VisitPointer(HeapObject host, ObjectSlot slot) final {
4267     VisitPointerImpl(host, slot);
4268   }
4269 
VisitPointer(HeapObject host,MaybeObjectSlot slot)4270   V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot slot) final {
4271     VisitPointerImpl(host, slot);
4272   }
4273 
VisitCodeTarget(Code host,RelocInfo * rinfo)4274   V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
4275     // Code objects are not expected in new space.
4276     UNREACHABLE();
4277   }
4278 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4279   V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
4280     // Code objects are not expected in new space.
4281     UNREACHABLE();
4282   }
4283 
VisitJSArrayBuffer(Map map,JSArrayBuffer object)4284   V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
4285     object.YoungMarkExtension();
4286     int size = JSArrayBuffer::BodyDescriptor::SizeOf(map, object);
4287     JSArrayBuffer::BodyDescriptor::IterateBody(map, object, size, this);
4288     return size;
4289   }
4290 
4291  private:
4292   template <typename TSlot>
VisitPointersImpl(HeapObject host,TSlot start,TSlot end)4293   V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
4294     for (TSlot slot = start; slot < end; ++slot) {
4295       VisitPointer(host, slot);
4296     }
4297   }
4298 
4299   template <typename TSlot>
VisitPointerImpl(HeapObject host,TSlot slot)4300   V8_INLINE void VisitPointerImpl(HeapObject host, TSlot slot) {
4301     typename TSlot::TObject target = *slot;
4302     if (Heap::InYoungGeneration(target)) {
4303       // Treat weak references as strong.
4304       // TODO(marja): Proper weakness handling for minor-mcs.
4305       HeapObject target_object = target.GetHeapObject();
4306       MarkObjectViaMarkingWorklist(target_object);
4307     }
4308   }
4309 
MarkObjectViaMarkingWorklist(HeapObject object)4310   inline void MarkObjectViaMarkingWorklist(HeapObject object) {
4311     if (marking_state_->WhiteToGrey(object)) {
4312       // Marking deque overflow is unsupported for the young generation.
4313       CHECK(worklist_.Push(object));
4314     }
4315   }
4316 
4317   MinorMarkCompactCollector::MarkingWorklist::View worklist_;
4318   MinorMarkCompactCollector::MarkingState* marking_state_;
4319 };
4320 
SetUp()4321 void MinorMarkCompactCollector::SetUp() {}
4322 
TearDown()4323 void MinorMarkCompactCollector::TearDown() {}
4324 
MinorMarkCompactCollector(Heap * heap)4325 MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
4326     : MarkCompactCollectorBase(heap),
4327       worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
4328       main_marking_visitor_(new YoungGenerationMarkingVisitor(
4329           marking_state(), worklist_, kMainMarker)),
4330       page_parallel_job_semaphore_(0) {
4331   static_assert(
4332       kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
4333       "more marker tasks than marking deque can handle");
4334 }
4335 
~MinorMarkCompactCollector()4336 MinorMarkCompactCollector::~MinorMarkCompactCollector() {
4337   delete worklist_;
4338   delete main_marking_visitor_;
4339 }
4340 
CleanupSweepToIteratePages()4341 void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
4342   for (Page* p : sweep_to_iterate_pages_) {
4343     if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
4344       p->ClearFlag(Page::SWEEP_TO_ITERATE);
4345       non_atomic_marking_state()->ClearLiveness(p);
4346     }
4347   }
4348   sweep_to_iterate_pages_.clear();
4349 }
4350 
SweepArrayBufferExtensions()4351 void MinorMarkCompactCollector::SweepArrayBufferExtensions() {
4352   heap_->array_buffer_sweeper()->RequestSweepYoung();
4353 }
4354 
4355 class YoungGenerationMigrationObserver final : public MigrationObserver {
4356  public:
YoungGenerationMigrationObserver(Heap * heap,MarkCompactCollector * mark_compact_collector)4357   YoungGenerationMigrationObserver(Heap* heap,
4358                                    MarkCompactCollector* mark_compact_collector)
4359       : MigrationObserver(heap),
4360         mark_compact_collector_(mark_compact_collector) {}
4361 
Move(AllocationSpace dest,HeapObject src,HeapObject dst,int size)4362   inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
4363                    int size) final {
4364     // Migrate color to old generation marking in case the object survived young
4365     // generation garbage collection.
4366     if (heap_->incremental_marking()->IsMarking()) {
4367       DCHECK(
4368           heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
4369       heap_->incremental_marking()->TransferColor(src, dst);
4370     }
4371   }
4372 
4373  protected:
4374   base::Mutex mutex_;
4375   MarkCompactCollector* mark_compact_collector_;
4376 };
4377 
4378 class YoungGenerationRecordMigratedSlotVisitor final
4379     : public RecordMigratedSlotVisitor {
4380  public:
YoungGenerationRecordMigratedSlotVisitor(MarkCompactCollector * collector)4381   explicit YoungGenerationRecordMigratedSlotVisitor(
4382       MarkCompactCollector* collector)
4383       : RecordMigratedSlotVisitor(collector, nullptr) {}
4384 
VisitCodeTarget(Code host,RelocInfo * rinfo)4385   void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4386   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
4387     UNREACHABLE();
4388   }
4389 
MarkArrayBufferExtensionPromoted(HeapObject object)4390   void MarkArrayBufferExtensionPromoted(HeapObject object) final {
4391     if (!object.IsJSArrayBuffer()) return;
4392     JSArrayBuffer::cast(object).YoungMarkExtensionPromoted();
4393   }
4394 
4395  private:
4396   // Only record slots for host objects that are considered as live by the full
4397   // collector.
IsLive(HeapObject object)4398   inline bool IsLive(HeapObject object) {
4399     return collector_->non_atomic_marking_state()->IsBlack(object);
4400   }
4401 
RecordMigratedSlot(HeapObject host,MaybeObject value,Address slot)4402   inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
4403                                  Address slot) final {
4404     if (value->IsStrongOrWeak()) {
4405       BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
4406       if (p->InYoungGeneration()) {
4407         DCHECK_IMPLIES(
4408             p->IsToPage(),
4409             p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
4410         MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
4411         DCHECK(chunk->SweepingDone());
4412         RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
4413       } else if (p->IsEvacuationCandidate() && IsLive(host)) {
4414         RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
4415             MemoryChunk::FromHeapObject(host), slot);
4416       }
4417     }
4418   }
4419 };
4420 
UpdatePointersAfterEvacuation()4421 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
4422   TRACE_GC(heap()->tracer(),
4423            GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
4424 
4425   PointersUpdatingVisitor updating_visitor(isolate());
4426   std::vector<std::unique_ptr<UpdatingItem>> updating_items;
4427 
4428   // Create batches of global handles.
4429   CollectToSpaceUpdatingItems(&updating_items);
4430   CollectRememberedSetUpdatingItems(&updating_items, heap()->old_space(),
4431                                     RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4432   CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
4433                                     RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4434   CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
4435                                     RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4436   CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
4437                                     RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4438   CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
4439                                     RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
4440 
4441   {
4442     TRACE_GC(heap()->tracer(),
4443              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
4444     heap()->IterateRoots(&updating_visitor,
4445                          base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
4446                                                  SkipRoot::kOldGeneration});
4447   }
4448   {
4449     TRACE_GC(heap()->tracer(),
4450              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
4451     V8::GetCurrentPlatform()
4452         ->PostJob(
4453             v8::TaskPriority::kUserBlocking,
4454             std::make_unique<PointersUpdatingJob>(
4455                 isolate(), std::move(updating_items), old_to_new_slots_,
4456                 GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
4457                 GCTracer::BackgroundScope::
4458                     MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
4459         ->Join();
4460   }
4461 
4462   {
4463     TRACE_GC(heap()->tracer(),
4464              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
4465 
4466     EvacuationWeakObjectRetainer evacuation_object_retainer;
4467     heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4468 
4469     // Update pointers from external string table.
4470     heap()->UpdateYoungReferencesInExternalStringTable(
4471         &UpdateReferenceInExternalStringTableEntry);
4472   }
4473 }
4474 
4475 class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
4476  public:
RootMarkingVisitor(MinorMarkCompactCollector * collector)4477   explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
4478       : collector_(collector) {}
4479 
VisitRootPointer(Root root,const char * description,FullObjectSlot p)4480   void VisitRootPointer(Root root, const char* description,
4481                         FullObjectSlot p) final {
4482     MarkObjectByPointer(p);
4483   }
4484 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)4485   void VisitRootPointers(Root root, const char* description,
4486                          FullObjectSlot start, FullObjectSlot end) final {
4487     for (FullObjectSlot p = start; p < end; ++p) {
4488       MarkObjectByPointer(p);
4489     }
4490   }
4491 
4492  private:
MarkObjectByPointer(FullObjectSlot p)4493   V8_INLINE void MarkObjectByPointer(FullObjectSlot p) {
4494     if (!(*p).IsHeapObject()) return;
4495     collector_->MarkRootObject(HeapObject::cast(*p));
4496   }
4497   MinorMarkCompactCollector* const collector_;
4498 };
4499 
CollectGarbage()4500 void MinorMarkCompactCollector::CollectGarbage() {
4501   {
4502     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
4503     heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
4504     CleanupSweepToIteratePages();
4505   }
4506 
4507   heap()->array_buffer_sweeper()->EnsureFinished();
4508 
4509   MarkLiveObjects();
4510   ClearNonLiveReferences();
4511 #ifdef VERIFY_HEAP
4512   if (FLAG_verify_heap) {
4513     YoungGenerationMarkingVerifier verifier(heap());
4514     verifier.Run();
4515   }
4516 #endif  // VERIFY_HEAP
4517 
4518   Evacuate();
4519 #ifdef VERIFY_HEAP
4520   if (FLAG_verify_heap) {
4521     YoungGenerationEvacuationVerifier verifier(heap());
4522     verifier.Run();
4523   }
4524 #endif  // VERIFY_HEAP
4525 
4526   {
4527     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
4528     heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
4529   }
4530 
4531   {
4532     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
4533     for (Page* p :
4534          PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
4535       DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
4536       non_atomic_marking_state()->ClearLiveness(p);
4537       if (FLAG_concurrent_marking) {
4538         // Ensure that concurrent marker does not track pages that are
4539         // going to be unmapped.
4540         heap()->concurrent_marking()->ClearMemoryChunkData(p);
4541       }
4542     }
4543     // Since we promote all surviving large objects immediatelly, all remaining
4544     // large objects must be dead.
4545     // TODO(ulan): Don't free all as soon as we have an intermediate generation.
4546     heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
4547   }
4548 
4549   SweepArrayBufferExtensions();
4550 }
4551 
MakeIterable(Page * p,MarkingTreatmentMode marking_mode,FreeSpaceTreatmentMode free_space_mode)4552 void MinorMarkCompactCollector::MakeIterable(
4553     Page* p, MarkingTreatmentMode marking_mode,
4554     FreeSpaceTreatmentMode free_space_mode) {
4555   CHECK(!p->IsLargePage());
4556   // We have to clear the full collectors markbits for the areas that we
4557   // remove here.
4558   MarkCompactCollector* full_collector = heap()->mark_compact_collector();
4559   Address free_start = p->area_start();
4560 
4561   for (auto object_and_size :
4562        LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
4563     HeapObject const object = object_and_size.first;
4564     DCHECK(non_atomic_marking_state()->IsGrey(object));
4565     Address free_end = object.address();
4566     if (free_end != free_start) {
4567       CHECK_GT(free_end, free_start);
4568       size_t size = static_cast<size_t>(free_end - free_start);
4569       full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
4570           p->AddressToMarkbitIndex(free_start),
4571           p->AddressToMarkbitIndex(free_end));
4572       if (free_space_mode == ZAP_FREE_SPACE) {
4573         ZapCode(free_start, size);
4574       }
4575       p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
4576                                       ClearRecordedSlots::kNo);
4577     }
4578     Map map = object.synchronized_map();
4579     int size = object.SizeFromMap(map);
4580     free_start = free_end + size;
4581   }
4582 
4583   if (free_start != p->area_end()) {
4584     CHECK_GT(p->area_end(), free_start);
4585     size_t size = static_cast<size_t>(p->area_end() - free_start);
4586     full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
4587         p->AddressToMarkbitIndex(free_start),
4588         p->AddressToMarkbitIndex(p->area_end()));
4589     if (free_space_mode == ZAP_FREE_SPACE) {
4590       ZapCode(free_start, size);
4591     }
4592     p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
4593                                     ClearRecordedSlots::kNo);
4594   }
4595 
4596   if (marking_mode == MarkingTreatmentMode::CLEAR) {
4597     non_atomic_marking_state()->ClearLiveness(p);
4598     p->ClearFlag(Page::SWEEP_TO_ITERATE);
4599   }
4600 }
4601 
4602 namespace {
4603 
4604 // Helper class for pruning the string table.
4605 class YoungGenerationExternalStringTableCleaner : public RootVisitor {
4606  public:
YoungGenerationExternalStringTableCleaner(MinorMarkCompactCollector * collector)4607   YoungGenerationExternalStringTableCleaner(
4608       MinorMarkCompactCollector* collector)
4609       : heap_(collector->heap()),
4610         marking_state_(collector->non_atomic_marking_state()) {}
4611 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)4612   void VisitRootPointers(Root root, const char* description,
4613                          FullObjectSlot start, FullObjectSlot end) override {
4614     DCHECK_EQ(static_cast<int>(root),
4615               static_cast<int>(Root::kExternalStringsTable));
4616     // Visit all HeapObject pointers in [start, end).
4617     for (FullObjectSlot p = start; p < end; ++p) {
4618       Object o = *p;
4619       if (o.IsHeapObject()) {
4620         HeapObject heap_object = HeapObject::cast(o);
4621         if (marking_state_->IsWhite(heap_object)) {
4622           if (o.IsExternalString()) {
4623             heap_->FinalizeExternalString(String::cast(*p));
4624           } else {
4625             // The original external string may have been internalized.
4626             DCHECK(o.IsThinString());
4627           }
4628           // Set the entry to the_hole_value (as deleted).
4629           p.store(ReadOnlyRoots(heap_).the_hole_value());
4630         }
4631       }
4632     }
4633   }
4634 
4635  private:
4636   Heap* heap_;
4637   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4638 };
4639 
4640 // Marked young generation objects and all old generation objects will be
4641 // retained.
4642 class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
4643  public:
MinorMarkCompactWeakObjectRetainer(MinorMarkCompactCollector * collector)4644   explicit MinorMarkCompactWeakObjectRetainer(
4645       MinorMarkCompactCollector* collector)
4646       : marking_state_(collector->non_atomic_marking_state()) {}
4647 
RetainAs(Object object)4648   Object RetainAs(Object object) override {
4649     HeapObject heap_object = HeapObject::cast(object);
4650     if (!Heap::InYoungGeneration(heap_object)) return object;
4651 
4652     // Young generation marking only marks to grey instead of black.
4653     DCHECK(!marking_state_->IsBlack(heap_object));
4654     if (marking_state_->IsGrey(heap_object)) {
4655       return object;
4656     }
4657     return Object();
4658   }
4659 
4660  private:
4661   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4662 };
4663 
4664 }  // namespace
4665 
ClearNonLiveReferences()4666 void MinorMarkCompactCollector::ClearNonLiveReferences() {
4667   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
4668 
4669   {
4670     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
4671     // Internalized strings are always stored in old space, so there is no need
4672     // to clean them here.
4673     YoungGenerationExternalStringTableCleaner external_visitor(this);
4674     heap()->external_string_table_.IterateYoung(&external_visitor);
4675     heap()->external_string_table_.CleanUpYoung();
4676   }
4677 
4678   {
4679     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
4680     // Process the weak references.
4681     MinorMarkCompactWeakObjectRetainer retainer(this);
4682     heap()->ProcessYoungWeakReferences(&retainer);
4683   }
4684 }
4685 
EvacuatePrologue()4686 void MinorMarkCompactCollector::EvacuatePrologue() {
4687   NewSpace* new_space = heap()->new_space();
4688   // Append the list of new space pages to be processed.
4689   for (Page* p :
4690        PageRange(new_space->first_allocatable_address(), new_space->top())) {
4691     new_space_evacuation_pages_.push_back(p);
4692   }
4693 
4694   new_space->Flip();
4695   new_space->ResetLinearAllocationArea();
4696 
4697   heap()->new_lo_space()->Flip();
4698   heap()->new_lo_space()->ResetPendingObject();
4699 }
4700 
EvacuateEpilogue()4701 void MinorMarkCompactCollector::EvacuateEpilogue() {
4702   heap()->new_space()->set_age_mark(heap()->new_space()->top());
4703   // Give pages that are queued to be freed back to the OS.
4704   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
4705 }
4706 
4707 std::unique_ptr<UpdatingItem>
CreateToSpaceUpdatingItem(MemoryChunk * chunk,Address start,Address end)4708 MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
4709                                                      Address start,
4710                                                      Address end) {
4711   return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
4712       chunk, start, end, non_atomic_marking_state());
4713 }
4714 
4715 std::unique_ptr<UpdatingItem>
CreateRememberedSetUpdatingItem(MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)4716 MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
4717     MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
4718   return std::make_unique<
4719       RememberedSetUpdatingItem<NonAtomicMarkingState, MINOR_MARK_COMPACTOR>>(
4720       heap(), non_atomic_marking_state(), chunk, updating_mode);
4721 }
4722 
4723 class PageMarkingItem;
4724 class RootMarkingItem;
4725 class YoungGenerationMarkingTask;
4726 
4727 class YoungGenerationMarkingTask {
4728  public:
YoungGenerationMarkingTask(Isolate * isolate,MinorMarkCompactCollector * collector,MinorMarkCompactCollector::MarkingWorklist * global_worklist,int task_id)4729   YoungGenerationMarkingTask(
4730       Isolate* isolate, MinorMarkCompactCollector* collector,
4731       MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
4732       : marking_worklist_(global_worklist, task_id),
4733         marking_state_(collector->marking_state()),
4734         visitor_(marking_state_, global_worklist, task_id) {
4735     local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
4736                               Page::kPageSize);
4737   }
4738 
slots() const4739   int slots() const { return slots_; }
4740 
IncrementSlots()4741   void IncrementSlots() { ++slots_; }
4742 
MarkObject(Object object)4743   void MarkObject(Object object) {
4744     if (!Heap::InYoungGeneration(object)) return;
4745     HeapObject heap_object = HeapObject::cast(object);
4746     if (marking_state_->WhiteToGrey(heap_object)) {
4747       const int size = visitor_.Visit(heap_object);
4748       IncrementLiveBytes(heap_object, size);
4749     }
4750   }
4751 
EmptyMarkingWorklist()4752   void EmptyMarkingWorklist() {
4753     HeapObject object;
4754     while (marking_worklist_.Pop(&object)) {
4755       const int size = visitor_.Visit(object);
4756       IncrementLiveBytes(object, size);
4757     }
4758   }
4759 
IncrementLiveBytes(HeapObject object,intptr_t bytes)4760   void IncrementLiveBytes(HeapObject object, intptr_t bytes) {
4761     local_live_bytes_[Page::FromHeapObject(object)] += bytes;
4762   }
4763 
FlushLiveBytes()4764   void FlushLiveBytes() {
4765     for (auto pair : local_live_bytes_) {
4766       marking_state_->IncrementLiveBytes(pair.first, pair.second);
4767     }
4768   }
4769 
4770  private:
4771   MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
4772   MinorMarkCompactCollector::MarkingState* marking_state_;
4773   YoungGenerationMarkingVisitor visitor_;
4774   std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
4775   int slots_ = 0;
4776 };
4777 
4778 class PageMarkingItem : public ParallelWorkItem {
4779  public:
PageMarkingItem(MemoryChunk * chunk)4780   explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
4781   ~PageMarkingItem() = default;
4782 
Process(YoungGenerationMarkingTask * task)4783   void Process(YoungGenerationMarkingTask* task) {
4784     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4785                  "PageMarkingItem::Process");
4786     base::MutexGuard guard(chunk_->mutex());
4787     MarkUntypedPointers(task);
4788     MarkTypedPointers(task);
4789   }
4790 
4791  private:
heap()4792   inline Heap* heap() { return chunk_->heap(); }
4793 
MarkUntypedPointers(YoungGenerationMarkingTask * task)4794   void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
4795     InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
4796     RememberedSet<OLD_TO_NEW>::Iterate(
4797         chunk_,
4798         [this, task, &filter](MaybeObjectSlot slot) {
4799           if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
4800           return CheckAndMarkObject(task, slot);
4801         },
4802         SlotSet::FREE_EMPTY_BUCKETS);
4803     filter = InvalidatedSlotsFilter::OldToNew(chunk_);
4804     RememberedSetSweeping::Iterate(
4805         chunk_,
4806         [this, task, &filter](MaybeObjectSlot slot) {
4807           if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
4808           return CheckAndMarkObject(task, slot);
4809         },
4810         SlotSet::FREE_EMPTY_BUCKETS);
4811   }
4812 
MarkTypedPointers(YoungGenerationMarkingTask * task)4813   void MarkTypedPointers(YoungGenerationMarkingTask* task) {
4814     RememberedSet<OLD_TO_NEW>::IterateTyped(
4815         chunk_, [=](SlotType slot_type, Address slot) {
4816           return UpdateTypedSlotHelper::UpdateTypedSlot(
4817               heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
4818                 return CheckAndMarkObject(task, slot);
4819               });
4820         });
4821   }
4822 
4823   template <typename TSlot>
4824   V8_INLINE SlotCallbackResult
CheckAndMarkObject(YoungGenerationMarkingTask * task,TSlot slot)4825   CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot) {
4826     static_assert(
4827         std::is_same<TSlot, FullMaybeObjectSlot>::value ||
4828             std::is_same<TSlot, MaybeObjectSlot>::value,
4829         "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
4830     MaybeObject object = *slot;
4831     if (Heap::InYoungGeneration(object)) {
4832       // Marking happens before flipping the young generation, so the object
4833       // has to be in a to page.
4834       DCHECK(Heap::InToPage(object));
4835       HeapObject heap_object;
4836       bool success = object.GetHeapObject(&heap_object);
4837       USE(success);
4838       DCHECK(success);
4839       task->MarkObject(heap_object);
4840       task->IncrementSlots();
4841       return KEEP_SLOT;
4842     }
4843     return REMOVE_SLOT;
4844   }
4845 
4846   MemoryChunk* chunk_;
4847 };
4848 
4849 class YoungGenerationMarkingJob : public v8::JobTask {
4850  public:
YoungGenerationMarkingJob(Isolate * isolate,MinorMarkCompactCollector * collector,MinorMarkCompactCollector::MarkingWorklist * global_worklist,std::vector<PageMarkingItem> marking_items,std::atomic<int> * slots)4851   YoungGenerationMarkingJob(
4852       Isolate* isolate, MinorMarkCompactCollector* collector,
4853       MinorMarkCompactCollector::MarkingWorklist* global_worklist,
4854       std::vector<PageMarkingItem> marking_items, std::atomic<int>* slots)
4855       : isolate_(isolate),
4856         collector_(collector),
4857         global_worklist_(global_worklist),
4858         marking_items_(std::move(marking_items)),
4859         remaining_marking_items_(marking_items_.size()),
4860         generator_(marking_items_.size()),
4861         slots_(slots) {}
4862 
Run(JobDelegate * delegate)4863   void Run(JobDelegate* delegate) override {
4864     if (delegate->IsJoiningThread()) {
4865       TRACE_GC(collector_->heap()->tracer(),
4866                GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
4867       ProcessItems(delegate);
4868     } else {
4869       TRACE_BACKGROUND_GC(
4870           collector_->heap()->tracer(),
4871           GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
4872       ProcessItems(delegate);
4873     }
4874   }
4875 
GetMaxConcurrency(size_t worker_count) const4876   size_t GetMaxConcurrency(size_t worker_count) const override {
4877     // Pages are not private to markers but we can still use them to estimate
4878     // the amount of marking that is required.
4879     const int kPagesPerTask = 2;
4880     size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
4881     size_t num_tasks = std::max((items + 1) / kPagesPerTask,
4882                                 global_worklist_->GlobalPoolSize());
4883     return std::min<size_t>(
4884         num_tasks, MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks);
4885   }
4886 
4887  private:
ProcessItems(JobDelegate * delegate)4888   void ProcessItems(JobDelegate* delegate) {
4889     double marking_time = 0.0;
4890     {
4891       TimedScope scope(&marking_time);
4892       YoungGenerationMarkingTask task(isolate_, collector_, global_worklist_,
4893                                       delegate->GetTaskId());
4894       ProcessMarkingItems(&task);
4895       task.EmptyMarkingWorklist();
4896       task.FlushLiveBytes();
4897       *slots_ += task.slots();
4898     }
4899     if (FLAG_trace_minor_mc_parallel_marking) {
4900       PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
4901                    static_cast<void*>(this), marking_time);
4902     }
4903   }
4904 
ProcessMarkingItems(YoungGenerationMarkingTask * task)4905   void ProcessMarkingItems(YoungGenerationMarkingTask* task) {
4906     while (remaining_marking_items_.load(std::memory_order_relaxed) > 0) {
4907       base::Optional<size_t> index = generator_.GetNext();
4908       if (!index) return;
4909       for (size_t i = *index; i < marking_items_.size(); ++i) {
4910         auto& work_item = marking_items_[i];
4911         if (!work_item.TryAcquire()) break;
4912         work_item.Process(task);
4913         task->EmptyMarkingWorklist();
4914         if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <=
4915             1) {
4916           return;
4917         }
4918       }
4919     }
4920   }
4921 
4922   Isolate* isolate_;
4923   MinorMarkCompactCollector* collector_;
4924   MinorMarkCompactCollector::MarkingWorklist* global_worklist_;
4925   std::vector<PageMarkingItem> marking_items_;
4926   std::atomic_size_t remaining_marking_items_{0};
4927   IndexGenerator generator_;
4928   std::atomic<int>* slots_;
4929 };
4930 
MarkRootSetInParallel(RootMarkingVisitor * root_visitor)4931 void MinorMarkCompactCollector::MarkRootSetInParallel(
4932     RootMarkingVisitor* root_visitor) {
4933   std::atomic<int> slots;
4934   {
4935     std::vector<PageMarkingItem> marking_items;
4936 
4937     // Seed the root set (roots + old->new set).
4938     {
4939       TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
4940       isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
4941           &JSObject::IsUnmodifiedApiObject);
4942       // MinorMC treats all weak roots except for global handles as strong.
4943       // That is why we don't set skip_weak = true here and instead visit
4944       // global handles separately.
4945       heap()->IterateRoots(
4946           root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
4947                                                 SkipRoot::kGlobalHandles,
4948                                                 SkipRoot::kOldGeneration});
4949       isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
4950           root_visitor);
4951       // Create items for each page.
4952       RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
4953           heap(), [&marking_items](MemoryChunk* chunk) {
4954             marking_items.emplace_back(chunk);
4955           });
4956     }
4957 
4958     // Add tasks and run in parallel.
4959     {
4960       // The main thread might hold local items, while GlobalPoolSize() == 0.
4961       // Flush to ensure these items are visible globally and picked up by the
4962       // job.
4963       worklist()->FlushToGlobal(kMainThreadTask);
4964       TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
4965       V8::GetCurrentPlatform()
4966           ->PostJob(v8::TaskPriority::kUserBlocking,
4967                     std::make_unique<YoungGenerationMarkingJob>(
4968                         isolate(), this, worklist(), std::move(marking_items),
4969                         &slots))
4970           ->Join();
4971 
4972       DCHECK(worklist()->IsEmpty());
4973     }
4974   }
4975   old_to_new_slots_ = slots;
4976 }
4977 
MarkLiveObjects()4978 void MinorMarkCompactCollector::MarkLiveObjects() {
4979   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
4980 
4981   PostponeInterruptsScope postpone(isolate());
4982 
4983   RootMarkingVisitor root_visitor(this);
4984 
4985   MarkRootSetInParallel(&root_visitor);
4986 
4987   // Mark rest on the main thread.
4988   {
4989     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
4990     DrainMarkingWorklist();
4991   }
4992 
4993   {
4994     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
4995     isolate()->global_handles()->MarkYoungWeakDeadObjectsPending(
4996         &IsUnmarkedObjectForYoungGeneration);
4997     isolate()->global_handles()->IterateYoungWeakDeadObjectsForFinalizers(
4998         &root_visitor);
4999     isolate()->global_handles()->IterateYoungWeakObjectsForPhantomHandles(
5000         &root_visitor, &IsUnmarkedObjectForYoungGeneration);
5001     DrainMarkingWorklist();
5002   }
5003 
5004   if (FLAG_minor_mc_trace_fragmentation) {
5005     TraceFragmentation();
5006   }
5007 }
5008 
DrainMarkingWorklist()5009 void MinorMarkCompactCollector::DrainMarkingWorklist() {
5010   MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
5011   HeapObject object;
5012   while (marking_worklist.Pop(&object)) {
5013     DCHECK(!object.IsFreeSpaceOrFiller());
5014     DCHECK(object.IsHeapObject());
5015     DCHECK(heap()->Contains(object));
5016     DCHECK(non_atomic_marking_state()->IsGrey(object));
5017     main_marking_visitor()->Visit(object);
5018   }
5019   DCHECK(marking_worklist.IsLocalEmpty());
5020 }
5021 
TraceFragmentation()5022 void MinorMarkCompactCollector::TraceFragmentation() {
5023   NewSpace* new_space = heap()->new_space();
5024   const std::array<size_t, 4> free_size_class_limits = {0, 1024, 2048, 4096};
5025   size_t free_bytes_of_class[free_size_class_limits.size()] = {0};
5026   size_t live_bytes = 0;
5027   size_t allocatable_bytes = 0;
5028   for (Page* p :
5029        PageRange(new_space->first_allocatable_address(), new_space->top())) {
5030     Address free_start = p->area_start();
5031     for (auto object_and_size : LiveObjectRange<kGreyObjects>(
5032              p, non_atomic_marking_state()->bitmap(p))) {
5033       HeapObject const object = object_and_size.first;
5034       Address free_end = object.address();
5035       if (free_end != free_start) {
5036         size_t free_bytes = free_end - free_start;
5037         int free_bytes_index = 0;
5038         for (auto free_size_class_limit : free_size_class_limits) {
5039           if (free_bytes >= free_size_class_limit) {
5040             free_bytes_of_class[free_bytes_index] += free_bytes;
5041           }
5042           free_bytes_index++;
5043         }
5044       }
5045       Map map = object.synchronized_map();
5046       int size = object.SizeFromMap(map);
5047       live_bytes += size;
5048       free_start = free_end + size;
5049     }
5050     size_t area_end =
5051         p->Contains(new_space->top()) ? new_space->top() : p->area_end();
5052     if (free_start != area_end) {
5053       size_t free_bytes = area_end - free_start;
5054       int free_bytes_index = 0;
5055       for (auto free_size_class_limit : free_size_class_limits) {
5056         if (free_bytes >= free_size_class_limit) {
5057           free_bytes_of_class[free_bytes_index] += free_bytes;
5058         }
5059         free_bytes_index++;
5060       }
5061     }
5062     allocatable_bytes += area_end - p->area_start();
5063     CHECK_EQ(allocatable_bytes, live_bytes + free_bytes_of_class[0]);
5064   }
5065   PrintIsolate(
5066       isolate(),
5067       "Minor Mark-Compact Fragmentation: allocatable_bytes=%zu live_bytes=%zu "
5068       "free_bytes=%zu free_bytes_1K=%zu free_bytes_2K=%zu free_bytes_4K=%zu\n",
5069       allocatable_bytes, live_bytes, free_bytes_of_class[0],
5070       free_bytes_of_class[1], free_bytes_of_class[2], free_bytes_of_class[3]);
5071 }
5072 
Evacuate()5073 void MinorMarkCompactCollector::Evacuate() {
5074   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
5075   base::MutexGuard guard(heap()->relocation_mutex());
5076 
5077   {
5078     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
5079     EvacuatePrologue();
5080   }
5081 
5082   {
5083     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
5084     EvacuatePagesInParallel();
5085   }
5086 
5087   UpdatePointersAfterEvacuation();
5088 
5089   {
5090     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
5091     if (!heap()->new_space()->Rebalance()) {
5092       heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
5093     }
5094   }
5095 
5096   {
5097     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
5098     for (Page* p : new_space_evacuation_pages_) {
5099       if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
5100           p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
5101         p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
5102         p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
5103         p->SetFlag(Page::SWEEP_TO_ITERATE);
5104         sweep_to_iterate_pages_.push_back(p);
5105       }
5106     }
5107     new_space_evacuation_pages_.clear();
5108   }
5109 
5110   {
5111     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
5112     EvacuateEpilogue();
5113   }
5114 }
5115 
5116 namespace {
5117 
5118 class YoungGenerationEvacuator : public Evacuator {
5119  public:
YoungGenerationEvacuator(MinorMarkCompactCollector * collector)5120   explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
5121       : Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
5122                   false),
5123         record_visitor_(collector->heap()->mark_compact_collector()),
5124         local_allocator_(heap_,
5125                          LocalSpaceKind::kCompactionSpaceForMinorMarkCompact),
5126         collector_(collector) {}
5127 
GetBackgroundTracingScope()5128   GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
5129     return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
5130   }
5131 
GetTracingScope()5132   GCTracer::Scope::ScopeId GetTracingScope() override {
5133     return GCTracer::Scope::MINOR_MC_EVACUATE_COPY_PARALLEL;
5134   }
5135 
5136  protected:
5137   void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
5138 
5139   YoungGenerationRecordMigratedSlotVisitor record_visitor_;
5140   EvacuationAllocator local_allocator_;
5141   MinorMarkCompactCollector* collector_;
5142 };
5143 
RawEvacuatePage(MemoryChunk * chunk,intptr_t * live_bytes)5144 void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
5145                                                intptr_t* live_bytes) {
5146   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
5147                "YoungGenerationEvacuator::RawEvacuatePage");
5148   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
5149       collector_->non_atomic_marking_state();
5150   *live_bytes = marking_state->live_bytes(chunk);
5151   switch (ComputeEvacuationMode(chunk)) {
5152     case kObjectsNewToOld:
5153       LiveObjectVisitor::VisitGreyObjectsNoFail(
5154           chunk, marking_state, &new_space_visitor_,
5155           LiveObjectVisitor::kClearMarkbits);
5156       break;
5157     case kPageNewToOld:
5158       LiveObjectVisitor::VisitGreyObjectsNoFail(
5159           chunk, marking_state, &new_to_old_page_visitor_,
5160           LiveObjectVisitor::kKeepMarking);
5161       new_to_old_page_visitor_.account_moved_bytes(
5162           marking_state->live_bytes(chunk));
5163       if (!chunk->IsLargePage()) {
5164         if (heap()->ShouldZapGarbage()) {
5165           collector_->MakeIterable(static_cast<Page*>(chunk),
5166                                    MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
5167         } else if (heap()->incremental_marking()->IsMarking()) {
5168           // When incremental marking is on, we need to clear the mark bits of
5169           // the full collector. We cannot yet discard the young generation mark
5170           // bits as they are still relevant for pointers updating.
5171           collector_->MakeIterable(static_cast<Page*>(chunk),
5172                                    MarkingTreatmentMode::KEEP,
5173                                    IGNORE_FREE_SPACE);
5174         }
5175       }
5176       break;
5177     case kPageNewToNew:
5178       LiveObjectVisitor::VisitGreyObjectsNoFail(
5179           chunk, marking_state, &new_to_new_page_visitor_,
5180           LiveObjectVisitor::kKeepMarking);
5181       new_to_new_page_visitor_.account_moved_bytes(
5182           marking_state->live_bytes(chunk));
5183       DCHECK(!chunk->IsLargePage());
5184       if (heap()->ShouldZapGarbage()) {
5185         collector_->MakeIterable(static_cast<Page*>(chunk),
5186                                  MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
5187       } else if (heap()->incremental_marking()->IsMarking()) {
5188         // When incremental marking is on, we need to clear the mark bits of
5189         // the full collector. We cannot yet discard the young generation mark
5190         // bits as they are still relevant for pointers updating.
5191         collector_->MakeIterable(static_cast<Page*>(chunk),
5192                                  MarkingTreatmentMode::KEEP, IGNORE_FREE_SPACE);
5193       }
5194       break;
5195     case kObjectsOldToOld:
5196       UNREACHABLE();
5197   }
5198 }
5199 
5200 }  // namespace
5201 
EvacuatePagesInParallel()5202 void MinorMarkCompactCollector::EvacuatePagesInParallel() {
5203   std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
5204   intptr_t live_bytes = 0;
5205 
5206   for (Page* page : new_space_evacuation_pages_) {
5207     intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
5208     if (live_bytes_on_page == 0) continue;
5209     live_bytes += live_bytes_on_page;
5210     if (ShouldMovePage(page, live_bytes_on_page, false)) {
5211       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
5212         EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
5213       } else {
5214         EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
5215       }
5216     }
5217     evacuation_items.emplace_back(ParallelWorkItem{}, page);
5218   }
5219 
5220   // Promote young generation large objects.
5221   for (auto it = heap()->new_lo_space()->begin();
5222        it != heap()->new_lo_space()->end();) {
5223     LargePage* current = *it;
5224     it++;
5225     HeapObject object = current->GetObject();
5226     DCHECK(!non_atomic_marking_state_.IsBlack(object));
5227     if (non_atomic_marking_state_.IsGrey(object)) {
5228       heap_->lo_space()->PromoteNewLargeObject(current);
5229       current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
5230       evacuation_items.emplace_back(ParallelWorkItem{}, current);
5231     }
5232   }
5233   if (evacuation_items.empty()) return;
5234 
5235   YoungGenerationMigrationObserver observer(heap(),
5236                                             heap()->mark_compact_collector());
5237   CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
5238       this, std::move(evacuation_items), &observer, live_bytes);
5239 }
5240 
5241 #endif  // ENABLE_MINOR_MC
5242 
5243 }  // namespace internal
5244 }  // namespace v8
5245