• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/mark-compact.h"
6 
7 #include <unordered_map>
8 #include <unordered_set>
9 
10 #include "src/base/logging.h"
11 #include "src/base/optional.h"
12 #include "src/base/utils/random-number-generator.h"
13 #include "src/codegen/compilation-cache.h"
14 #include "src/common/globals.h"
15 #include "src/deoptimizer/deoptimizer.h"
16 #include "src/execution/execution.h"
17 #include "src/execution/frames-inl.h"
18 #include "src/execution/isolate-utils-inl.h"
19 #include "src/execution/isolate-utils.h"
20 #include "src/execution/vm-state-inl.h"
21 #include "src/handles/global-handles.h"
22 #include "src/heap/array-buffer-sweeper.h"
23 #include "src/heap/basic-memory-chunk.h"
24 #include "src/heap/code-object-registry.h"
25 #include "src/heap/concurrent-allocator.h"
26 #include "src/heap/evacuation-allocator-inl.h"
27 #include "src/heap/gc-tracer-inl.h"
28 #include "src/heap/gc-tracer.h"
29 #include "src/heap/heap.h"
30 #include "src/heap/incremental-marking-inl.h"
31 #include "src/heap/index-generator.h"
32 #include "src/heap/invalidated-slots-inl.h"
33 #include "src/heap/large-spaces.h"
34 #include "src/heap/mark-compact-inl.h"
35 #include "src/heap/marking-barrier.h"
36 #include "src/heap/marking-visitor-inl.h"
37 #include "src/heap/marking-visitor.h"
38 #include "src/heap/memory-chunk-layout.h"
39 #include "src/heap/memory-measurement-inl.h"
40 #include "src/heap/memory-measurement.h"
41 #include "src/heap/object-stats.h"
42 #include "src/heap/objects-visiting-inl.h"
43 #include "src/heap/parallel-work-item.h"
44 #include "src/heap/read-only-heap.h"
45 #include "src/heap/read-only-spaces.h"
46 #include "src/heap/safepoint.h"
47 #include "src/heap/slot-set.h"
48 #include "src/heap/spaces-inl.h"
49 #include "src/heap/sweeper.h"
50 #include "src/heap/weak-object-worklists.h"
51 #include "src/ic/stub-cache.h"
52 #include "src/init/v8.h"
53 #include "src/logging/tracing-flags.h"
54 #include "src/objects/embedder-data-array-inl.h"
55 #include "src/objects/foreign.h"
56 #include "src/objects/hash-table-inl.h"
57 #include "src/objects/instance-type.h"
58 #include "src/objects/js-array-buffer-inl.h"
59 #include "src/objects/js-objects-inl.h"
60 #include "src/objects/maybe-object.h"
61 #include "src/objects/objects.h"
62 #include "src/objects/slots-inl.h"
63 #include "src/objects/smi.h"
64 #include "src/objects/transitions-inl.h"
65 #include "src/objects/visitors.h"
66 #include "src/snapshot/shared-heap-serializer.h"
67 #include "src/tasks/cancelable-task.h"
68 #include "src/tracing/tracing-category-observer.h"
69 #include "src/utils/utils-inl.h"
70 
71 namespace v8 {
72 namespace internal {
73 
74 const char* Marking::kWhiteBitPattern = "00";
75 const char* Marking::kBlackBitPattern = "11";
76 const char* Marking::kGreyBitPattern = "10";
77 const char* Marking::kImpossibleBitPattern = "01";
78 
79 // The following has to hold in order for {MarkingState::MarkBitFrom} to not
80 // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
81 STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
82 
83 // =============================================================================
84 // Verifiers
85 // =============================================================================
86 
87 #ifdef VERIFY_HEAP
88 namespace {
89 
90 class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
91  public:
92   virtual void Run() = 0;
93 
94  protected:
MarkingVerifier(Heap * heap)95   explicit MarkingVerifier(Heap* heap)
96       : ObjectVisitorWithCageBases(heap), heap_(heap) {}
97 
98   virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
99       const MemoryChunk* chunk) = 0;
100 
101   virtual void VerifyMap(Map map) = 0;
102   virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
103   virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
104   virtual void VerifyCodePointer(CodeObjectSlot slot) = 0;
105   virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
106 
107   virtual bool IsMarked(HeapObject object) = 0;
108 
109   virtual bool IsBlackOrGrey(HeapObject object) = 0;
110 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)111   void VisitPointers(HeapObject host, ObjectSlot start,
112                      ObjectSlot end) override {
113     VerifyPointers(start, end);
114   }
115 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)116   void VisitPointers(HeapObject host, MaybeObjectSlot start,
117                      MaybeObjectSlot end) override {
118     VerifyPointers(start, end);
119   }
120 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)121   void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
122     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
123     VerifyCodePointer(slot);
124   }
125 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)126   void VisitRootPointers(Root root, const char* description,
127                          FullObjectSlot start, FullObjectSlot end) override {
128     VerifyRootPointers(start, end);
129   }
130 
VisitMapPointer(HeapObject object)131   void VisitMapPointer(HeapObject object) override {
132     VerifyMap(object.map(cage_base()));
133   }
134 
135   void VerifyRoots();
136   void VerifyMarkingOnPage(const Page* page, Address start, Address end);
137   void VerifyMarking(NewSpace* new_space);
138   void VerifyMarking(PagedSpace* paged_space);
139   void VerifyMarking(LargeObjectSpace* lo_space);
140 
141   Heap* heap_;
142 };
143 
VerifyRoots()144 void MarkingVerifier::VerifyRoots() {
145   heap_->IterateRootsIncludingClients(this,
146                                       base::EnumSet<SkipRoot>{SkipRoot::kWeak});
147 }
148 
VerifyMarkingOnPage(const Page * page,Address start,Address end)149 void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
150                                           Address end) {
151   Address next_object_must_be_here_or_later = start;
152 
153   for (auto object_and_size :
154        LiveObjectRange<kAllLiveObjects>(page, bitmap(page))) {
155     HeapObject object = object_and_size.first;
156     size_t size = object_and_size.second;
157     Address current = object.address();
158     if (current < start) continue;
159     if (current >= end) break;
160     CHECK(IsMarked(object));
161     CHECK(current >= next_object_must_be_here_or_later);
162     object.Iterate(cage_base(), this);
163     next_object_must_be_here_or_later = current + size;
164     // The object is either part of a black area of black allocation or a
165     // regular black object
166     CHECK(bitmap(page)->AllBitsSetInRange(
167               page->AddressToMarkbitIndex(current),
168               page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
169           bitmap(page)->AllBitsClearInRange(
170               page->AddressToMarkbitIndex(current + kTaggedSize * 2),
171               page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
172     current = next_object_must_be_here_or_later;
173   }
174 }
175 
VerifyMarking(NewSpace * space)176 void MarkingVerifier::VerifyMarking(NewSpace* space) {
177   if (!space) return;
178   Address end = space->top();
179   // The bottom position is at the start of its page. Allows us to use
180   // page->area_start() as start of range on all pages.
181   CHECK_EQ(space->first_allocatable_address(),
182            space->first_page()->area_start());
183 
184   PageRange range(space->first_allocatable_address(), end);
185   for (auto it = range.begin(); it != range.end();) {
186     Page* page = *(it++);
187     Address limit = it != range.end() ? page->area_end() : end;
188     CHECK(limit == end || !page->Contains(end));
189     VerifyMarkingOnPage(page, page->area_start(), limit);
190   }
191 }
192 
VerifyMarking(PagedSpace * space)193 void MarkingVerifier::VerifyMarking(PagedSpace* space) {
194   for (Page* p : *space) {
195     VerifyMarkingOnPage(p, p->area_start(), p->area_end());
196   }
197 }
198 
VerifyMarking(LargeObjectSpace * lo_space)199 void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
200   if (!lo_space) return;
201   LargeObjectSpaceObjectIterator it(lo_space);
202   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
203     if (IsBlackOrGrey(obj)) {
204       obj.Iterate(cage_base(), this);
205     }
206   }
207 }
208 
209 class FullMarkingVerifier : public MarkingVerifier {
210  public:
FullMarkingVerifier(Heap * heap)211   explicit FullMarkingVerifier(Heap* heap)
212       : MarkingVerifier(heap),
213         marking_state_(
214             heap->mark_compact_collector()->non_atomic_marking_state()) {}
215 
Run()216   void Run() override {
217     VerifyRoots();
218     VerifyMarking(heap_->new_space());
219     VerifyMarking(heap_->new_lo_space());
220     VerifyMarking(heap_->old_space());
221     VerifyMarking(heap_->code_space());
222     if (heap_->map_space()) VerifyMarking(heap_->map_space());
223     VerifyMarking(heap_->lo_space());
224     VerifyMarking(heap_->code_lo_space());
225   }
226 
227  protected:
bitmap(const MemoryChunk * chunk)228   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
229       const MemoryChunk* chunk) override {
230     return marking_state_->bitmap(chunk);
231   }
232 
IsMarked(HeapObject object)233   bool IsMarked(HeapObject object) override {
234     return marking_state_->IsBlack(object);
235   }
236 
IsBlackOrGrey(HeapObject object)237   bool IsBlackOrGrey(HeapObject object) override {
238     return marking_state_->IsBlackOrGrey(object);
239   }
240 
VerifyMap(Map map)241   void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
242 
VerifyPointers(ObjectSlot start,ObjectSlot end)243   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
244     VerifyPointersImpl(start, end);
245   }
246 
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)247   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
248     VerifyPointersImpl(start, end);
249   }
250 
VerifyCodePointer(CodeObjectSlot slot)251   void VerifyCodePointer(CodeObjectSlot slot) override {
252     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
253     Object maybe_code = slot.load(code_cage_base());
254     HeapObject code;
255     // The slot might contain smi during CodeDataContainer creation, so skip it.
256     if (maybe_code.GetHeapObject(&code)) {
257       VerifyHeapObjectImpl(code);
258     }
259   }
260 
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)261   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
262     VerifyPointersImpl(start, end);
263   }
264 
VisitCodeTarget(Code host,RelocInfo * rinfo)265   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
266     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
267     VerifyHeapObjectImpl(target);
268   }
269 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)270   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
271     DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
272     HeapObject target_object = rinfo->target_object(cage_base());
273     if (!host.IsWeakObject(target_object)) {
274       VerifyHeapObjectImpl(target_object);
275     }
276   }
277 
278  private:
VerifyHeapObjectImpl(HeapObject heap_object)279   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
280     if (heap_->IsShared() !=
281         BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
282       return;
283 
284     if (heap_->ShouldBeInSharedOldSpace(heap_object)) {
285       CHECK(heap_->SharedHeapContains(heap_object));
286     }
287 
288     CHECK(marking_state_->IsBlackOrGrey(heap_object));
289   }
290 
291   template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)292   V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
293     for (TSlot slot = start; slot < end; ++slot) {
294       typename TSlot::TObject object = slot.load(cage_base());
295       HeapObject heap_object;
296       if (object.GetHeapObjectIfStrong(&heap_object)) {
297         VerifyHeapObjectImpl(heap_object);
298       }
299     }
300   }
301 
302   MarkCompactCollector::NonAtomicMarkingState* marking_state_;
303 };
304 
305 class EvacuationVerifier : public ObjectVisitorWithCageBases,
306                            public RootVisitor {
307  public:
308   virtual void Run() = 0;
309 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)310   void VisitPointers(HeapObject host, ObjectSlot start,
311                      ObjectSlot end) override {
312     VerifyPointers(start, end);
313   }
314 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)315   void VisitPointers(HeapObject host, MaybeObjectSlot start,
316                      MaybeObjectSlot end) override {
317     VerifyPointers(start, end);
318   }
319 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)320   void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
321     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
322     VerifyCodePointer(slot);
323   }
324 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)325   void VisitRootPointers(Root root, const char* description,
326                          FullObjectSlot start, FullObjectSlot end) override {
327     VerifyRootPointers(start, end);
328   }
329 
VisitMapPointer(HeapObject object)330   void VisitMapPointer(HeapObject object) override {
331     VerifyMap(object.map(cage_base()));
332   }
333 
334  protected:
EvacuationVerifier(Heap * heap)335   explicit EvacuationVerifier(Heap* heap)
336       : ObjectVisitorWithCageBases(heap), heap_(heap) {}
337 
heap()338   inline Heap* heap() { return heap_; }
339 
340   virtual void VerifyMap(Map map) = 0;
341   virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
342   virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
343   virtual void VerifyCodePointer(CodeObjectSlot slot) = 0;
344   virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
345 
346   void VerifyRoots();
347   void VerifyEvacuationOnPage(Address start, Address end);
348   void VerifyEvacuation(NewSpace* new_space);
349   void VerifyEvacuation(PagedSpace* paged_space);
350 
351   Heap* heap_;
352 };
353 
VerifyRoots()354 void EvacuationVerifier::VerifyRoots() {
355   heap_->IterateRootsIncludingClients(this,
356                                       base::EnumSet<SkipRoot>{SkipRoot::kWeak});
357 }
358 
VerifyEvacuationOnPage(Address start,Address end)359 void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
360   Address current = start;
361   while (current < end) {
362     HeapObject object = HeapObject::FromAddress(current);
363     if (!object.IsFreeSpaceOrFiller(cage_base())) {
364       object.Iterate(cage_base(), this);
365     }
366     current += object.Size(cage_base());
367   }
368 }
369 
VerifyEvacuation(NewSpace * space)370 void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
371   if (!space) return;
372   PageRange range(space->first_allocatable_address(), space->top());
373   for (auto it = range.begin(); it != range.end();) {
374     Page* page = *(it++);
375     Address current = page->area_start();
376     Address limit = it != range.end() ? page->area_end() : space->top();
377     CHECK(limit == space->top() || !page->Contains(space->top()));
378     VerifyEvacuationOnPage(current, limit);
379   }
380 }
381 
VerifyEvacuation(PagedSpace * space)382 void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
383   for (Page* p : *space) {
384     if (p->IsEvacuationCandidate()) continue;
385     if (p->Contains(space->top())) {
386       CodePageMemoryModificationScope memory_modification_scope(p);
387       heap_->CreateFillerObjectAt(
388           space->top(), static_cast<int>(space->limit() - space->top()),
389           ClearRecordedSlots::kNo);
390     }
391     VerifyEvacuationOnPage(p->area_start(), p->area_end());
392   }
393 }
394 
395 class FullEvacuationVerifier : public EvacuationVerifier {
396  public:
FullEvacuationVerifier(Heap * heap)397   explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
398 
Run()399   void Run() override {
400     DCHECK(!heap_->mark_compact_collector()->sweeping_in_progress());
401     VerifyRoots();
402     VerifyEvacuation(heap_->new_space());
403     VerifyEvacuation(heap_->old_space());
404     VerifyEvacuation(heap_->code_space());
405     if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
406   }
407 
408  protected:
VerifyHeapObjectImpl(HeapObject heap_object)409   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
410     if (heap_->IsShared() !=
411         BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
412       return;
413 
414     CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
415                   Heap::InToPage(heap_object));
416     CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
417   }
418 
419   template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)420   void VerifyPointersImpl(TSlot start, TSlot end) {
421     for (TSlot current = start; current < end; ++current) {
422       typename TSlot::TObject object = current.load(cage_base());
423       HeapObject heap_object;
424       if (object.GetHeapObjectIfStrong(&heap_object)) {
425         VerifyHeapObjectImpl(heap_object);
426       }
427     }
428   }
VerifyMap(Map map)429   void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
VerifyPointers(ObjectSlot start,ObjectSlot end)430   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
431     VerifyPointersImpl(start, end);
432   }
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)433   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
434     VerifyPointersImpl(start, end);
435   }
VerifyCodePointer(CodeObjectSlot slot)436   void VerifyCodePointer(CodeObjectSlot slot) override {
437     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
438     Object maybe_code = slot.load(code_cage_base());
439     HeapObject code;
440     // The slot might contain smi during CodeDataContainer creation, so skip it.
441     if (maybe_code.GetHeapObject(&code)) {
442       VerifyHeapObjectImpl(code);
443     }
444   }
VisitCodeTarget(Code host,RelocInfo * rinfo)445   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
446     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
447     VerifyHeapObjectImpl(target);
448   }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)449   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
450     VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
451   }
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)452   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
453     VerifyPointersImpl(start, end);
454   }
455 };
456 
457 }  // namespace
458 #endif  // VERIFY_HEAP
459 
460 // =============================================================================
461 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
462 // =============================================================================
463 
464 namespace {
465 
NumberOfAvailableCores()466 int NumberOfAvailableCores() {
467   static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
468   // This number of cores should be greater than zero and never change.
469   DCHECK_GE(num_cores, 1);
470   DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
471   return num_cores;
472 }
473 
474 }  // namespace
475 
NumberOfParallelCompactionTasks()476 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks() {
477   int tasks = FLAG_parallel_compaction ? NumberOfAvailableCores() : 1;
478   if (!heap_->CanPromoteYoungAndExpandOldGeneration(
479           static_cast<size_t>(tasks * Page::kPageSize))) {
480     // Optimize for memory usage near the heap limit.
481     tasks = 1;
482   }
483   return tasks;
484 }
485 
MarkCompactCollector(Heap * heap)486 MarkCompactCollector::MarkCompactCollector(Heap* heap)
487     : MarkCompactCollectorBase(heap),
488 #ifdef DEBUG
489       state_(IDLE),
490 #endif
491       is_shared_heap_(heap->IsShared()),
492       marking_state_(heap->isolate()),
493       non_atomic_marking_state_(heap->isolate()),
494       sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
495 }
496 
~MarkCompactCollector()497 MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
498 
SetUp()499 void MarkCompactCollector::SetUp() {
500   DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
501   DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
502   DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
503   DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
504 }
505 
TearDown()506 void MarkCompactCollector::TearDown() {
507   AbortCompaction();
508   if (heap()->incremental_marking()->IsMarking()) {
509     local_marking_worklists()->Publish();
510     heap()->marking_barrier()->Publish();
511     // Marking barriers of LocalHeaps will be published in their destructors.
512     marking_worklists()->Clear();
513     local_weak_objects()->Publish();
514     weak_objects()->Clear();
515   }
516   sweeper()->TearDown();
517 }
518 
519 // static
IsMapOrForwardedMap(Map map)520 bool MarkCompactCollector::IsMapOrForwardedMap(Map map) {
521   MapWord map_word = map.map_word(kRelaxedLoad);
522 
523   if (map_word.IsForwardingAddress()) {
524     return map_word.ToForwardingAddress().IsMap();
525   } else {
526     return map_word.ToMap().IsMap();
527   }
528 }
529 
AddEvacuationCandidate(Page * p)530 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
531   DCHECK(!p->NeverEvacuate());
532 
533   if (FLAG_trace_evacuation_candidates) {
534     PrintIsolate(
535         isolate(),
536         "Evacuation candidate: Free bytes: %6zu. Free Lists length: %4d.\n",
537         p->area_size() - p->allocated_bytes(), p->FreeListsLength());
538   }
539 
540   p->MarkEvacuationCandidate();
541   evacuation_candidates_.push_back(p);
542 }
543 
TraceFragmentation(PagedSpace * space)544 static void TraceFragmentation(PagedSpace* space) {
545   int number_of_pages = space->CountTotalPages();
546   intptr_t reserved = (number_of_pages * space->AreaSize());
547   intptr_t free = reserved - space->SizeOfObjects();
548   PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
549          static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
550 }
551 
StartCompaction(StartCompactionMode mode)552 bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
553   DCHECK(!compacting_);
554   DCHECK(evacuation_candidates_.empty());
555 
556   // Bailouts for completely disabled compaction.
557   if (!FLAG_compact ||
558       (mode == StartCompactionMode::kAtomic && !heap()->IsGCWithoutStack() &&
559        !FLAG_compact_with_stack) ||
560       (FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())) {
561     return false;
562   }
563 
564   CollectEvacuationCandidates(heap()->old_space());
565 
566   if (heap()->map_space() && FLAG_compact_maps) {
567     CollectEvacuationCandidates(heap()->map_space());
568   }
569 
570   if (FLAG_compact_code_space &&
571       (heap()->IsGCWithoutStack() || FLAG_compact_code_space_with_stack)) {
572     CollectEvacuationCandidates(heap()->code_space());
573   } else if (FLAG_trace_fragmentation) {
574     TraceFragmentation(heap()->code_space());
575   }
576 
577   if (FLAG_trace_fragmentation && heap()->map_space()) {
578     TraceFragmentation(heap()->map_space());
579   }
580 
581   compacting_ = !evacuation_candidates_.empty();
582   return compacting_;
583 }
584 
StartMarking()585 void MarkCompactCollector::StartMarking() {
586   std::vector<Address> contexts =
587       heap()->memory_measurement()->StartProcessing();
588   if (FLAG_stress_per_context_marking_worklist) {
589     contexts.clear();
590     HandleScope handle_scope(heap()->isolate());
591     for (auto context : heap()->FindAllNativeContexts()) {
592       contexts.push_back(context->ptr());
593     }
594   }
595   code_flush_mode_ = Heap::GetCodeFlushMode(isolate());
596   marking_worklists()->CreateContextWorklists(contexts);
597   auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
598   local_marking_worklists_ = std::make_unique<MarkingWorklists::Local>(
599       marking_worklists(),
600       cpp_heap ? cpp_heap->CreateCppMarkingStateForMutatorThread()
601                : MarkingWorklists::Local::kNoCppMarkingState);
602   local_weak_objects_ = std::make_unique<WeakObjects::Local>(weak_objects());
603   marking_visitor_ = std::make_unique<MarkingVisitor>(
604       marking_state(), local_marking_worklists(), local_weak_objects_.get(),
605       heap_, epoch(), code_flush_mode(),
606       heap_->local_embedder_heap_tracer()->InUse(),
607       heap_->ShouldCurrentGCKeepAgesUnchanged());
608 // Marking bits are cleared by the sweeper.
609 #ifdef VERIFY_HEAP
610   if (FLAG_verify_heap) {
611     VerifyMarkbitsAreClean();
612   }
613 #endif  // VERIFY_HEAP
614 }
615 
CollectGarbage()616 void MarkCompactCollector::CollectGarbage() {
617   // Make sure that Prepare() has been called. The individual steps below will
618   // update the state as they proceed.
619   DCHECK(state_ == PREPARE_GC);
620 
621   MarkLiveObjects();
622   ClearNonLiveReferences();
623   VerifyMarking();
624   heap()->memory_measurement()->FinishProcessing(native_context_stats_);
625   RecordObjectStats();
626 
627   StartSweepSpaces();
628   Evacuate();
629   Finish();
630 }
631 
632 #ifdef VERIFY_HEAP
VerifyMarkbitsAreDirty(ReadOnlySpace * space)633 void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
634   ReadOnlyHeapObjectIterator iterator(space);
635   for (HeapObject object = iterator.Next(); !object.is_null();
636        object = iterator.Next()) {
637     CHECK(non_atomic_marking_state()->IsBlack(object));
638   }
639 }
640 
VerifyMarkbitsAreClean(PagedSpace * space)641 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
642   for (Page* p : *space) {
643     CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
644     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
645   }
646 }
647 
VerifyMarkbitsAreClean(NewSpace * space)648 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
649   if (!space) return;
650   for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
651     CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
652     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
653   }
654 }
655 
VerifyMarkbitsAreClean(LargeObjectSpace * space)656 void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
657   if (!space) return;
658   LargeObjectSpaceObjectIterator it(space);
659   for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
660     CHECK(non_atomic_marking_state()->IsWhite(obj));
661     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
662                     MemoryChunk::FromHeapObject(obj)));
663   }
664 }
665 
VerifyMarkbitsAreClean()666 void MarkCompactCollector::VerifyMarkbitsAreClean() {
667   VerifyMarkbitsAreClean(heap_->old_space());
668   VerifyMarkbitsAreClean(heap_->code_space());
669   if (heap_->map_space()) {
670     VerifyMarkbitsAreClean(heap_->map_space());
671   }
672   VerifyMarkbitsAreClean(heap_->new_space());
673   // Read-only space should always be black since we never collect any objects
674   // in it or linked from it.
675   VerifyMarkbitsAreDirty(heap_->read_only_space());
676   VerifyMarkbitsAreClean(heap_->lo_space());
677   VerifyMarkbitsAreClean(heap_->code_lo_space());
678   VerifyMarkbitsAreClean(heap_->new_lo_space());
679 }
680 
681 #endif  // VERIFY_HEAP
682 
FinishSweepingIfOutOfWork()683 void MarkCompactCollector::FinishSweepingIfOutOfWork() {
684   if (sweeper()->sweeping_in_progress() && FLAG_concurrent_sweeping &&
685       !sweeper()->AreSweeperTasksRunning()) {
686     // At this point we know that all concurrent sweeping tasks have run
687     // out of work and quit: all pages are swept. The main thread still needs
688     // to complete sweeping though.
689     EnsureSweepingCompleted(SweepingForcedFinalizationMode::kV8Only);
690   }
691   if (heap()->cpp_heap()) {
692     // Ensure that sweeping is also completed for the C++ managed heap, if one
693     // exists and it's out of work.
694     CppHeap::From(heap()->cpp_heap())->FinishSweepingIfOutOfWork();
695   }
696 }
697 
EnsureSweepingCompleted(SweepingForcedFinalizationMode mode)698 void MarkCompactCollector::EnsureSweepingCompleted(
699     SweepingForcedFinalizationMode mode) {
700   if (sweeper()->sweeping_in_progress()) {
701     TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
702                    ThreadKind::kMain);
703 
704     sweeper()->EnsureCompleted();
705     heap()->old_space()->RefillFreeList();
706     heap()->code_space()->RefillFreeList();
707     if (heap()->map_space()) {
708       heap()->map_space()->RefillFreeList();
709       heap()->map_space()->SortFreeList();
710     }
711 
712     heap()->tracer()->NotifySweepingCompleted();
713 
714 #ifdef VERIFY_HEAP
715     if (FLAG_verify_heap && !evacuation()) {
716       FullEvacuationVerifier verifier(heap());
717       verifier.Run();
718     }
719 #endif
720   }
721 
722   if (mode == SweepingForcedFinalizationMode::kUnifiedHeap &&
723       heap()->cpp_heap()) {
724     // Ensure that sweeping is also completed for the C++ managed heap, if one
725     // exists.
726     CppHeap::From(heap()->cpp_heap())->FinishSweepingIfRunning();
727     DCHECK(
728         !CppHeap::From(heap()->cpp_heap())->sweeper().IsSweepingInProgress());
729   }
730 
731   DCHECK_IMPLIES(mode == SweepingForcedFinalizationMode::kUnifiedHeap ||
732                      !heap()->cpp_heap(),
733                  !heap()->tracer()->IsSweepingInProgress());
734 }
735 
EnsurePageIsSwept(Page * page)736 void MarkCompactCollector::EnsurePageIsSwept(Page* page) {
737   sweeper()->EnsurePageIsSwept(page);
738 }
739 
DrainSweepingWorklistForSpace(AllocationSpace space)740 void MarkCompactCollector::DrainSweepingWorklistForSpace(
741     AllocationSpace space) {
742   if (!sweeper()->sweeping_in_progress()) return;
743   sweeper()->DrainSweepingWorklistForSpace(space);
744 }
745 
ComputeEvacuationHeuristics(size_t area_size,int * target_fragmentation_percent,size_t * max_evacuated_bytes)746 void MarkCompactCollector::ComputeEvacuationHeuristics(
747     size_t area_size, int* target_fragmentation_percent,
748     size_t* max_evacuated_bytes) {
749   // For memory reducing and optimize for memory mode we directly define both
750   // constants.
751   const int kTargetFragmentationPercentForReduceMemory = 20;
752   const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
753   const int kTargetFragmentationPercentForOptimizeMemory = 20;
754   const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
755 
756   // For regular mode (which is latency critical) we define less aggressive
757   // defaults to start and switch to a trace-based (using compaction speed)
758   // approach as soon as we have enough samples.
759   const int kTargetFragmentationPercent = 70;
760   const size_t kMaxEvacuatedBytes = 4 * MB;
761   // Time to take for a single area (=payload of page). Used as soon as there
762   // exist enough compaction speed samples.
763   const float kTargetMsPerArea = .5;
764 
765   if (heap()->ShouldReduceMemory()) {
766     *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
767     *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
768   } else if (heap()->ShouldOptimizeForMemoryUsage()) {
769     *target_fragmentation_percent =
770         kTargetFragmentationPercentForOptimizeMemory;
771     *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
772   } else {
773     const double estimated_compaction_speed =
774         heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
775     if (estimated_compaction_speed != 0) {
776       // Estimate the target fragmentation based on traced compaction speed
777       // and a goal for a single page.
778       const double estimated_ms_per_area =
779           1 + area_size / estimated_compaction_speed;
780       *target_fragmentation_percent = static_cast<int>(
781           100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
782       if (*target_fragmentation_percent <
783           kTargetFragmentationPercentForReduceMemory) {
784         *target_fragmentation_percent =
785             kTargetFragmentationPercentForReduceMemory;
786       }
787     } else {
788       *target_fragmentation_percent = kTargetFragmentationPercent;
789     }
790     *max_evacuated_bytes = kMaxEvacuatedBytes;
791   }
792 }
793 
CollectEvacuationCandidates(PagedSpace * space)794 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
795   DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
796          space->identity() == MAP_SPACE);
797 
798   int number_of_pages = space->CountTotalPages();
799   size_t area_size = space->AreaSize();
800 
801   const bool in_standard_path =
802       !(FLAG_manual_evacuation_candidates_selection ||
803         FLAG_stress_compaction_random || FLAG_stress_compaction ||
804         FLAG_compact_on_every_full_gc);
805   // Those variables will only be initialized if |in_standard_path|, and are not
806   // used otherwise.
807   size_t max_evacuated_bytes;
808   int target_fragmentation_percent;
809   size_t free_bytes_threshold;
810   if (in_standard_path) {
811     // We use two conditions to decide whether a page qualifies as an evacuation
812     // candidate, or not:
813     // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
814     //   between live bytes and capacity of this page (= area).
815     // * Evacuation quota: A global quota determining how much bytes should be
816     //   compacted.
817     ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
818                                 &max_evacuated_bytes);
819     free_bytes_threshold = target_fragmentation_percent * (area_size / 100);
820   }
821 
822   // Pairs of (live_bytes_in_page, page).
823   using LiveBytesPagePair = std::pair<size_t, Page*>;
824   std::vector<LiveBytesPagePair> pages;
825   pages.reserve(number_of_pages);
826 
827   DCHECK(!sweeping_in_progress());
828   Page* owner_of_linear_allocation_area =
829       space->top() == space->limit()
830           ? nullptr
831           : Page::FromAllocationAreaAddress(space->top());
832   for (Page* p : *space) {
833     if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
834         !p->CanAllocate())
835       continue;
836 
837     if (p->IsPinned()) {
838       DCHECK(
839           !p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING));
840       continue;
841     }
842 
843     // Invariant: Evacuation candidates are just created when marking is
844     // started. This means that sweeping has finished. Furthermore, at the end
845     // of a GC all evacuation candidates are cleared and their slot buffers are
846     // released.
847     CHECK(!p->IsEvacuationCandidate());
848     CHECK_NULL(p->slot_set<OLD_TO_OLD>());
849     CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
850     CHECK(p->SweepingDone());
851     DCHECK(p->area_size() == area_size);
852     if (in_standard_path) {
853       // Only the pages with at more than |free_bytes_threshold| free bytes are
854       // considered for evacuation.
855       if (area_size - p->allocated_bytes() >= free_bytes_threshold) {
856         pages.push_back(std::make_pair(p->allocated_bytes(), p));
857       }
858     } else {
859       pages.push_back(std::make_pair(p->allocated_bytes(), p));
860     }
861 
862     // Unpin pages for the next GC
863     if (p->IsFlagSet(MemoryChunk::PINNED)) {
864       p->ClearFlag(MemoryChunk::PINNED);
865     }
866   }
867 
868   int candidate_count = 0;
869   size_t total_live_bytes = 0;
870 
871   const bool reduce_memory = heap()->ShouldReduceMemory();
872   if (FLAG_manual_evacuation_candidates_selection) {
873     for (size_t i = 0; i < pages.size(); i++) {
874       Page* p = pages[i].second;
875       if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
876         candidate_count++;
877         total_live_bytes += pages[i].first;
878         p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
879         AddEvacuationCandidate(p);
880       }
881     }
882   } else if (FLAG_stress_compaction_random) {
883     double fraction = isolate()->fuzzer_rng()->NextDouble();
884     size_t pages_to_mark_count =
885         static_cast<size_t>(fraction * (pages.size() + 1));
886     for (uint64_t i : isolate()->fuzzer_rng()->NextSample(
887              pages.size(), pages_to_mark_count)) {
888       candidate_count++;
889       total_live_bytes += pages[i].first;
890       AddEvacuationCandidate(pages[i].second);
891     }
892   } else if (FLAG_stress_compaction) {
893     for (size_t i = 0; i < pages.size(); i++) {
894       Page* p = pages[i].second;
895       if (i % 2 == 0) {
896         candidate_count++;
897         total_live_bytes += pages[i].first;
898         AddEvacuationCandidate(p);
899       }
900     }
901   } else {
902     // The following approach determines the pages that should be evacuated.
903     //
904     // Sort pages from the most free to the least free, then select
905     // the first n pages for evacuation such that:
906     // - the total size of evacuated objects does not exceed the specified
907     // limit.
908     // - fragmentation of (n+1)-th page does not exceed the specified limit.
909     std::sort(pages.begin(), pages.end(),
910               [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
911                 return a.first < b.first;
912               });
913     for (size_t i = 0; i < pages.size(); i++) {
914       size_t live_bytes = pages[i].first;
915       DCHECK_GE(area_size, live_bytes);
916       if (FLAG_compact_on_every_full_gc ||
917           ((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
918         candidate_count++;
919         total_live_bytes += live_bytes;
920       }
921       if (FLAG_trace_fragmentation_verbose) {
922         PrintIsolate(isolate(),
923                      "compaction-selection-page: space=%s free_bytes_page=%zu "
924                      "fragmentation_limit_kb=%zu "
925                      "fragmentation_limit_percent=%d sum_compaction_kb=%zu "
926                      "compaction_limit_kb=%zu\n",
927                      space->name(), (area_size - live_bytes) / KB,
928                      free_bytes_threshold / KB, target_fragmentation_percent,
929                      total_live_bytes / KB, max_evacuated_bytes / KB);
930       }
931     }
932     // How many pages we will allocated for the evacuated objects
933     // in the worst case: ceil(total_live_bytes / area_size)
934     int estimated_new_pages =
935         static_cast<int>((total_live_bytes + area_size - 1) / area_size);
936     DCHECK_LE(estimated_new_pages, candidate_count);
937     int estimated_released_pages = candidate_count - estimated_new_pages;
938     // Avoid (compact -> expand) cycles.
939     if ((estimated_released_pages == 0) && !FLAG_compact_on_every_full_gc) {
940       candidate_count = 0;
941     }
942     for (int i = 0; i < candidate_count; i++) {
943       AddEvacuationCandidate(pages[i].second);
944     }
945   }
946 
947   if (FLAG_trace_fragmentation) {
948     PrintIsolate(isolate(),
949                  "compaction-selection: space=%s reduce_memory=%d pages=%d "
950                  "total_live_bytes=%zu\n",
951                  space->name(), reduce_memory, candidate_count,
952                  total_live_bytes / KB);
953   }
954 }
955 
AbortCompaction()956 void MarkCompactCollector::AbortCompaction() {
957   if (compacting_) {
958     RememberedSet<OLD_TO_OLD>::ClearAll(heap());
959     if (V8_EXTERNAL_CODE_SPACE_BOOL) {
960       RememberedSet<OLD_TO_CODE>::ClearAll(heap());
961     }
962     for (Page* p : evacuation_candidates_) {
963       p->ClearEvacuationCandidate();
964     }
965     compacting_ = false;
966     evacuation_candidates_.clear();
967   }
968   DCHECK(evacuation_candidates_.empty());
969 }
970 
Prepare()971 void MarkCompactCollector::Prepare() {
972 #ifdef DEBUG
973   DCHECK(state_ == IDLE);
974   state_ = PREPARE_GC;
975 #endif
976 
977   DCHECK(!sweeping_in_progress());
978 
979   if (!heap()->incremental_marking()->IsMarking()) {
980     const auto embedder_flags = heap_->flags_for_embedder_tracer();
981     {
982       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
983       // PrepareForTrace should be called before visitor initialization in
984       // StartMarking.
985       heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
986     }
987     StartCompaction(StartCompactionMode::kAtomic);
988     StartMarking();
989     {
990       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
991       // TracePrologue immediately starts marking which requires V8 worklists to
992       // be set up.
993       heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
994     }
995   }
996 
997   heap_->FreeLinearAllocationAreas();
998 
999   PagedSpaceIterator spaces(heap());
1000   for (PagedSpace* space = spaces.Next(); space != nullptr;
1001        space = spaces.Next()) {
1002     space->PrepareForMarkCompact();
1003   }
1004 
1005   // All objects are guaranteed to be initialized in atomic pause
1006   if (heap()->new_lo_space()) {
1007     heap()->new_lo_space()->ResetPendingObject();
1008   }
1009 
1010   if (heap()->new_space()) {
1011     DCHECK_EQ(heap()->new_space()->top(),
1012               heap()->new_space()->original_top_acquire());
1013   }
1014 }
1015 
FinishConcurrentMarking()1016 void MarkCompactCollector::FinishConcurrentMarking() {
1017   // FinishConcurrentMarking is called for both, concurrent and parallel,
1018   // marking. It is safe to call this function when tasks are already finished.
1019   if (FLAG_parallel_marking || FLAG_concurrent_marking) {
1020     heap()->concurrent_marking()->Join();
1021     heap()->concurrent_marking()->FlushMemoryChunkData(
1022         non_atomic_marking_state());
1023     heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
1024   }
1025   if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap())) {
1026     cpp_heap->FinishConcurrentMarkingIfNeeded();
1027   }
1028 }
1029 
VerifyMarking()1030 void MarkCompactCollector::VerifyMarking() {
1031   CHECK(local_marking_worklists()->IsEmpty());
1032   DCHECK(heap_->incremental_marking()->IsStopped());
1033 #ifdef VERIFY_HEAP
1034   if (FLAG_verify_heap) {
1035     FullMarkingVerifier verifier(heap());
1036     verifier.Run();
1037   }
1038 #endif
1039 #ifdef VERIFY_HEAP
1040   if (FLAG_verify_heap) {
1041     heap()->old_space()->VerifyLiveBytes();
1042     if (heap()->map_space()) heap()->map_space()->VerifyLiveBytes();
1043     heap()->code_space()->VerifyLiveBytes();
1044   }
1045 #endif
1046 }
1047 
Finish()1048 void MarkCompactCollector::Finish() {
1049   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
1050 
1051   SweepArrayBufferExtensions();
1052 
1053 #ifdef DEBUG
1054   heap()->VerifyCountersBeforeConcurrentSweeping();
1055 #endif
1056 
1057   marking_visitor_.reset();
1058   local_marking_worklists_.reset();
1059   marking_worklists_.ReleaseContextWorklists();
1060   native_context_stats_.Clear();
1061 
1062   CHECK(weak_objects_.current_ephemerons.IsEmpty());
1063   CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1064   local_weak_objects_->next_ephemerons_local.Publish();
1065   local_weak_objects_.reset();
1066   weak_objects_.next_ephemerons.Clear();
1067 
1068   sweeper()->StartSweeperTasks();
1069   sweeper()->StartIterabilityTasks();
1070 
1071   // Clear the marking state of live large objects.
1072   heap_->lo_space()->ClearMarkingStateOfLiveObjects();
1073   heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
1074 
1075 #ifdef DEBUG
1076   DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
1077   state_ = IDLE;
1078 #endif
1079   heap_->isolate()->inner_pointer_to_code_cache()->Flush();
1080 
1081   // The stub caches are not traversed during GC; clear them to force
1082   // their lazy re-initialization. This must be done after the
1083   // GC, because it relies on the new address of certain old space
1084   // objects (empty string, illegal builtin).
1085   isolate()->load_stub_cache()->Clear();
1086   isolate()->store_stub_cache()->Clear();
1087 
1088   if (have_code_to_deoptimize_) {
1089     // Some code objects were marked for deoptimization during the GC.
1090     Deoptimizer::DeoptimizeMarkedCode(isolate());
1091     have_code_to_deoptimize_ = false;
1092   }
1093 }
1094 
SweepArrayBufferExtensions()1095 void MarkCompactCollector::SweepArrayBufferExtensions() {
1096   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS);
1097   heap_->array_buffer_sweeper()->RequestSweep(
1098       ArrayBufferSweeper::SweepingType::kFull);
1099 }
1100 
1101 class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
1102  public:
RootMarkingVisitor(MarkCompactCollector * collector)1103   explicit RootMarkingVisitor(MarkCompactCollector* collector)
1104       : collector_(collector), is_shared_heap_(collector->is_shared_heap()) {}
1105 
VisitRootPointer(Root root,const char * description,FullObjectSlot p)1106   void VisitRootPointer(Root root, const char* description,
1107                         FullObjectSlot p) final {
1108     DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
1109     MarkObjectByPointer(root, p);
1110   }
1111 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)1112   void VisitRootPointers(Root root, const char* description,
1113                          FullObjectSlot start, FullObjectSlot end) final {
1114     for (FullObjectSlot p = start; p < end; ++p) {
1115       MarkObjectByPointer(root, p);
1116     }
1117   }
1118 
VisitRunningCode(FullObjectSlot p)1119   void VisitRunningCode(FullObjectSlot p) final {
1120     Code code = Code::cast(*p);
1121 
1122     // If Code is currently executing, then we must not remove its
1123     // deoptimization literals, which it might need in order to successfully
1124     // deoptimize.
1125     //
1126     // Must match behavior in RootsReferencesExtractor::VisitRunningCode, so
1127     // that heap snapshots accurately describe the roots.
1128     if (code.kind() != CodeKind::BASELINE) {
1129       DeoptimizationData deopt_data =
1130           DeoptimizationData::cast(code.deoptimization_data());
1131       if (deopt_data.length() > 0) {
1132         DeoptimizationLiteralArray literals = deopt_data.LiteralArray();
1133         int literals_length = literals.length();
1134         for (int i = 0; i < literals_length; ++i) {
1135           MaybeObject maybe_literal = literals.Get(i);
1136           HeapObject heap_literal;
1137           if (maybe_literal.GetHeapObject(&heap_literal)) {
1138             MarkObjectByPointer(Root::kStackRoots,
1139                                 FullObjectSlot(&heap_literal));
1140           }
1141         }
1142       }
1143     }
1144 
1145     // And then mark the Code itself.
1146     VisitRootPointer(Root::kStackRoots, nullptr, p);
1147   }
1148 
1149  private:
MarkObjectByPointer(Root root,FullObjectSlot p)1150   V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
1151     Object object = *p;
1152     if (!object.IsHeapObject()) return;
1153     HeapObject heap_object = HeapObject::cast(object);
1154     BasicMemoryChunk* target_page =
1155         BasicMemoryChunk::FromHeapObject(heap_object);
1156     if (is_shared_heap_ != target_page->InSharedHeap()) return;
1157     collector_->MarkRootObject(root, heap_object);
1158   }
1159 
1160   MarkCompactCollector* const collector_;
1161   const bool is_shared_heap_;
1162 };
1163 
1164 // This visitor is used to visit the body of special objects held alive by
1165 // other roots.
1166 //
1167 // It is currently used for
1168 // - Code held alive by the top optimized frame. This code cannot be deoptimized
1169 // and thus have to be kept alive in an isolate way, i.e., it should not keep
1170 // alive other code objects reachable through the weak list but they should
1171 // keep alive its embedded pointers (which would otherwise be dropped).
1172 // - Prefix of the string table.
1173 class MarkCompactCollector::CustomRootBodyMarkingVisitor final
1174     : public ObjectVisitorWithCageBases {
1175  public:
CustomRootBodyMarkingVisitor(MarkCompactCollector * collector)1176   explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
1177       : ObjectVisitorWithCageBases(collector->isolate()),
1178         collector_(collector) {}
1179 
VisitPointer(HeapObject host,ObjectSlot p)1180   void VisitPointer(HeapObject host, ObjectSlot p) final {
1181     MarkObject(host, p.load(cage_base()));
1182   }
1183 
VisitMapPointer(HeapObject host)1184   void VisitMapPointer(HeapObject host) final {
1185     MarkObject(host, host.map(cage_base()));
1186   }
1187 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)1188   void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
1189     for (ObjectSlot p = start; p < end; ++p) {
1190       // The map slot should be handled in VisitMapPointer.
1191       DCHECK_NE(host.map_slot(), p);
1192       DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
1193       MarkObject(host, p.load(cage_base()));
1194     }
1195   }
1196 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)1197   void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
1198     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
1199     MarkObject(host, slot.load(code_cage_base()));
1200   }
1201 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)1202   void VisitPointers(HeapObject host, MaybeObjectSlot start,
1203                      MaybeObjectSlot end) final {
1204     // At the moment, custom roots cannot contain weak pointers.
1205     UNREACHABLE();
1206   }
1207 
VisitCodeTarget(Code host,RelocInfo * rinfo)1208   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
1209     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1210     MarkObject(host, target);
1211   }
1212 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)1213   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
1214     MarkObject(host, rinfo->target_object(cage_base()));
1215   }
1216 
1217  private:
MarkObject(HeapObject host,Object object)1218   V8_INLINE void MarkObject(HeapObject host, Object object) {
1219     if (!object.IsHeapObject()) return;
1220     HeapObject heap_object = HeapObject::cast(object);
1221     // We use this visitor both in client and shared GCs. The client GC should
1222     // not mark objects in the shared heap. In shared GCs we are marking each
1223     // client's top stack frame, so it is actually legal to encounter references
1224     // into the client heap here in a shared GC. We need to bail out in these
1225     // cases as well.
1226     if (collector_->is_shared_heap() != heap_object.InSharedHeap()) return;
1227     collector_->MarkObject(host, heap_object);
1228   }
1229 
1230   MarkCompactCollector* const collector_;
1231 };
1232 
1233 class MarkCompactCollector::SharedHeapObjectVisitor final
1234     : public ObjectVisitorWithCageBases {
1235  public:
SharedHeapObjectVisitor(MarkCompactCollector * collector)1236   explicit SharedHeapObjectVisitor(MarkCompactCollector* collector)
1237       : ObjectVisitorWithCageBases(collector->isolate()),
1238         collector_(collector) {}
1239 
VisitPointer(HeapObject host,ObjectSlot p)1240   void VisitPointer(HeapObject host, ObjectSlot p) final {
1241     MarkObject(host, p, p.load(cage_base()));
1242   }
1243 
VisitPointer(HeapObject host,MaybeObjectSlot p)1244   void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
1245     MaybeObject object = p.load(cage_base());
1246     HeapObject heap_object;
1247     if (object.GetHeapObject(&heap_object))
1248       MarkObject(host, ObjectSlot(p), heap_object);
1249   }
1250 
VisitMapPointer(HeapObject host)1251   void VisitMapPointer(HeapObject host) final {
1252     MarkObject(host, host.map_slot(), host.map(cage_base()));
1253   }
1254 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)1255   void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
1256     for (ObjectSlot p = start; p < end; ++p) {
1257       // The map slot should be handled in VisitMapPointer.
1258       DCHECK_NE(host.map_slot(), p);
1259       DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
1260       MarkObject(host, p, p.load(cage_base()));
1261     }
1262   }
1263 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)1264   void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
1265     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
1266     MarkObject(host, ObjectSlot(slot.address()), slot.load(code_cage_base()));
1267   }
1268 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)1269   void VisitPointers(HeapObject host, MaybeObjectSlot start,
1270                      MaybeObjectSlot end) final {
1271     for (MaybeObjectSlot p = start; p < end; ++p) {
1272       // The map slot should be handled in VisitMapPointer.
1273       DCHECK_NE(host.map_slot(), ObjectSlot(p));
1274       VisitPointer(host, p);
1275     }
1276   }
1277 
VisitCodeTarget(Code host,RelocInfo * rinfo)1278   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
1279     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1280     RecordRelocSlot(host, rinfo, target);
1281   }
1282 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)1283   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
1284     HeapObject target = rinfo->target_object(cage_base());
1285     RecordRelocSlot(host, rinfo, target);
1286   }
1287 
1288  private:
MarkObject(HeapObject host,ObjectSlot slot,Object object)1289   V8_INLINE void MarkObject(HeapObject host, ObjectSlot slot, Object object) {
1290     DCHECK(!host.InSharedHeap());
1291     if (!object.IsHeapObject()) return;
1292     HeapObject heap_object = HeapObject::cast(object);
1293     if (!heap_object.InSharedHeap()) return;
1294     RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
1295         MemoryChunk::FromHeapObject(host), slot.address());
1296     collector_->MarkRootObject(Root::kClientHeap, heap_object);
1297   }
1298 
RecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)1299   V8_INLINE void RecordRelocSlot(Code host, RelocInfo* rinfo,
1300                                  HeapObject target) {
1301     if (ShouldRecordRelocSlot(host, rinfo, target)) {
1302       RecordRelocSlotInfo info = ProcessRelocInfo(host, rinfo, target);
1303       RememberedSet<OLD_TO_SHARED>::InsertTyped(info.memory_chunk,
1304                                                 info.slot_type, info.offset);
1305     }
1306   }
1307 
ShouldRecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)1308   V8_INLINE bool ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
1309                                        HeapObject target) {
1310     return BasicMemoryChunk::FromHeapObject(target)->InSharedHeap();
1311   }
1312 
1313   MarkCompactCollector* const collector_;
1314 };
1315 
1316 class InternalizedStringTableCleaner : public RootVisitor {
1317  public:
InternalizedStringTableCleaner(Heap * heap)1318   explicit InternalizedStringTableCleaner(Heap* heap)
1319       : heap_(heap), pointers_removed_(0) {}
1320 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)1321   void VisitRootPointers(Root root, const char* description,
1322                          FullObjectSlot start, FullObjectSlot end) override {
1323     UNREACHABLE();
1324   }
1325 
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)1326   void VisitRootPointers(Root root, const char* description,
1327                          OffHeapObjectSlot start,
1328                          OffHeapObjectSlot end) override {
1329     DCHECK_EQ(root, Root::kStringTable);
1330     // Visit all HeapObject pointers in [start, end).
1331     MarkCompactCollector::NonAtomicMarkingState* marking_state =
1332         heap_->mark_compact_collector()->non_atomic_marking_state();
1333     Isolate* isolate = heap_->isolate();
1334     for (OffHeapObjectSlot p = start; p < end; ++p) {
1335       Object o = p.load(isolate);
1336       if (o.IsHeapObject()) {
1337         HeapObject heap_object = HeapObject::cast(o);
1338         DCHECK(!Heap::InYoungGeneration(heap_object));
1339         if (marking_state->IsWhite(heap_object)) {
1340           pointers_removed_++;
1341           // Set the entry to the_hole_value (as deleted).
1342           p.store(StringTable::deleted_element());
1343         }
1344       }
1345     }
1346   }
1347 
PointersRemoved()1348   int PointersRemoved() { return pointers_removed_; }
1349 
1350  private:
1351   Heap* heap_;
1352   int pointers_removed_;
1353 };
1354 
1355 class ExternalStringTableCleaner : public RootVisitor {
1356  public:
ExternalStringTableCleaner(Heap * heap)1357   explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
1358 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)1359   void VisitRootPointers(Root root, const char* description,
1360                          FullObjectSlot start, FullObjectSlot end) override {
1361     // Visit all HeapObject pointers in [start, end).
1362     MarkCompactCollector::NonAtomicMarkingState* marking_state =
1363         heap_->mark_compact_collector()->non_atomic_marking_state();
1364     Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
1365     for (FullObjectSlot p = start; p < end; ++p) {
1366       Object o = *p;
1367       if (o.IsHeapObject()) {
1368         HeapObject heap_object = HeapObject::cast(o);
1369         if (marking_state->IsWhite(heap_object)) {
1370           if (o.IsExternalString()) {
1371             heap_->FinalizeExternalString(String::cast(o));
1372           } else {
1373             // The original external string may have been internalized.
1374             DCHECK(o.IsThinString());
1375           }
1376           // Set the entry to the_hole_value (as deleted).
1377           p.store(the_hole);
1378         }
1379       }
1380     }
1381   }
1382 
1383  private:
1384   Heap* heap_;
1385 };
1386 
1387 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1388 // are retained.
1389 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1390  public:
MarkCompactWeakObjectRetainer(MarkCompactCollector::NonAtomicMarkingState * marking_state)1391   explicit MarkCompactWeakObjectRetainer(
1392       MarkCompactCollector::NonAtomicMarkingState* marking_state)
1393       : marking_state_(marking_state) {}
1394 
RetainAs(Object object)1395   Object RetainAs(Object object) override {
1396     HeapObject heap_object = HeapObject::cast(object);
1397     DCHECK(!marking_state_->IsGrey(heap_object));
1398     if (marking_state_->IsBlack(heap_object)) {
1399       return object;
1400     } else if (object.IsAllocationSite() &&
1401                !(AllocationSite::cast(object).IsZombie())) {
1402       // "dead" AllocationSites need to live long enough for a traversal of new
1403       // space. These sites get a one-time reprieve.
1404 
1405       Object nested = object;
1406       while (nested.IsAllocationSite()) {
1407         AllocationSite current_site = AllocationSite::cast(nested);
1408         // MarkZombie will override the nested_site, read it first before
1409         // marking
1410         nested = current_site.nested_site();
1411         current_site.MarkZombie();
1412         marking_state_->WhiteToBlack(current_site);
1413       }
1414 
1415       return object;
1416     } else {
1417       return Object();
1418     }
1419   }
1420 
1421  private:
1422   MarkCompactCollector::NonAtomicMarkingState* marking_state_;
1423 };
1424 
1425 class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
1426  public:
RecordMigratedSlotVisitor(MarkCompactCollector * collector,EphemeronRememberedSet * ephemeron_remembered_set)1427   explicit RecordMigratedSlotVisitor(
1428       MarkCompactCollector* collector,
1429       EphemeronRememberedSet* ephemeron_remembered_set)
1430       : ObjectVisitorWithCageBases(collector->isolate()),
1431         collector_(collector),
1432         ephemeron_remembered_set_(ephemeron_remembered_set) {}
1433 
VisitPointer(HeapObject host,ObjectSlot p)1434   inline void VisitPointer(HeapObject host, ObjectSlot p) final {
1435     DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
1436     RecordMigratedSlot(host, MaybeObject::FromObject(p.load(cage_base())),
1437                        p.address());
1438   }
1439 
VisitMapPointer(HeapObject host)1440   inline void VisitMapPointer(HeapObject host) final {
1441     VisitPointer(host, host.map_slot());
1442   }
1443 
VisitPointer(HeapObject host,MaybeObjectSlot p)1444   inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
1445     DCHECK(!MapWord::IsPacked(p.Relaxed_Load(cage_base()).ptr()));
1446     RecordMigratedSlot(host, p.load(cage_base()), p.address());
1447   }
1448 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)1449   inline void VisitPointers(HeapObject host, ObjectSlot start,
1450                             ObjectSlot end) final {
1451     while (start < end) {
1452       VisitPointer(host, start);
1453       ++start;
1454     }
1455   }
1456 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)1457   inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
1458                             MaybeObjectSlot end) final {
1459     while (start < end) {
1460       VisitPointer(host, start);
1461       ++start;
1462     }
1463   }
1464 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)1465   inline void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
1466     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
1467     // This code is similar to the implementation of VisitPointer() modulo
1468     // new kind of slot.
1469     DCHECK(!HasWeakHeapObjectTag(slot.load(code_cage_base())));
1470     Object code = slot.load(code_cage_base());
1471     RecordMigratedSlot(host, MaybeObject::FromObject(code), slot.address());
1472   }
1473 
VisitEphemeron(HeapObject host,int index,ObjectSlot key,ObjectSlot value)1474   inline void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
1475                              ObjectSlot value) override {
1476     DCHECK(host.IsEphemeronHashTable());
1477     DCHECK(!Heap::InYoungGeneration(host));
1478 
1479     VisitPointer(host, value);
1480 
1481     if (ephemeron_remembered_set_ && Heap::InYoungGeneration(*key)) {
1482       auto table = EphemeronHashTable::unchecked_cast(host);
1483       auto insert_result =
1484           ephemeron_remembered_set_->insert({table, std::unordered_set<int>()});
1485       insert_result.first->second.insert(index);
1486     } else {
1487       VisitPointer(host, key);
1488     }
1489   }
1490 
VisitCodeTarget(Code host,RelocInfo * rinfo)1491   inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
1492     DCHECK_EQ(host, rinfo->host());
1493     DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
1494     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1495     // The target is always in old space, we don't have to record the slot in
1496     // the old-to-new remembered set.
1497     DCHECK(!Heap::InYoungGeneration(target));
1498     collector_->RecordRelocSlot(host, rinfo, target);
1499   }
1500 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)1501   inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
1502     DCHECK_EQ(host, rinfo->host());
1503     DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
1504     HeapObject object = rinfo->target_object(cage_base());
1505     GenerationalBarrierForCode(host, rinfo, object);
1506     collector_->RecordRelocSlot(host, rinfo, object);
1507   }
1508 
1509   // Entries that are skipped for recording.
VisitExternalReference(Code host,RelocInfo * rinfo)1510   inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
VisitExternalReference(Foreign host,Address * p)1511   inline void VisitExternalReference(Foreign host, Address* p) final {}
VisitRuntimeEntry(Code host,RelocInfo * rinfo)1512   inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
VisitInternalReference(Code host,RelocInfo * rinfo)1513   inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
1514 
MarkArrayBufferExtensionPromoted(HeapObject object)1515   virtual void MarkArrayBufferExtensionPromoted(HeapObject object) {}
1516 
1517  protected:
RecordMigratedSlot(HeapObject host,MaybeObject value,Address slot)1518   inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
1519                                          Address slot) {
1520     if (value->IsStrongOrWeak()) {
1521       BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
1522       if (p->InYoungGeneration()) {
1523         DCHECK_IMPLIES(
1524             p->IsToPage(),
1525             p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
1526 
1527         MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
1528         DCHECK(chunk->SweepingDone());
1529         RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
1530       } else if (p->IsEvacuationCandidate()) {
1531         if (V8_EXTERNAL_CODE_SPACE_BOOL &&
1532             p->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
1533           RememberedSet<OLD_TO_CODE>::Insert<AccessMode::NON_ATOMIC>(
1534               MemoryChunk::FromHeapObject(host), slot);
1535         } else {
1536           RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
1537               MemoryChunk::FromHeapObject(host), slot);
1538         }
1539       }
1540     }
1541   }
1542 
1543   MarkCompactCollector* collector_;
1544   EphemeronRememberedSet* ephemeron_remembered_set_;
1545 };
1546 
1547 class MigrationObserver {
1548  public:
MigrationObserver(Heap * heap)1549   explicit MigrationObserver(Heap* heap) : heap_(heap) {}
1550 
1551   virtual ~MigrationObserver() = default;
1552   virtual void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
1553                     int size) = 0;
1554 
1555  protected:
1556   Heap* heap_;
1557 };
1558 
1559 class ProfilingMigrationObserver final : public MigrationObserver {
1560  public:
ProfilingMigrationObserver(Heap * heap)1561   explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
1562 
Move(AllocationSpace dest,HeapObject src,HeapObject dst,int size)1563   inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
1564                    int size) final {
1565     if (dest == CODE_SPACE || (dest == OLD_SPACE && dst.IsBytecodeArray())) {
1566       PROFILE(heap_->isolate(),
1567               CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
1568     }
1569     heap_->OnMoveEvent(dst, src, size);
1570   }
1571 };
1572 
1573 class HeapObjectVisitor {
1574  public:
1575   virtual ~HeapObjectVisitor() = default;
1576   virtual bool Visit(HeapObject object, int size) = 0;
1577 };
1578 
1579 class EvacuateVisitorBase : public HeapObjectVisitor {
1580  public:
AddObserver(MigrationObserver * observer)1581   void AddObserver(MigrationObserver* observer) {
1582     migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
1583     observers_.push_back(observer);
1584   }
1585 
1586  protected:
1587   enum MigrationMode { kFast, kObserved };
1588 
cage_base()1589   PtrComprCageBase cage_base() {
1590 #if V8_COMPRESS_POINTERS
1591     return PtrComprCageBase{heap_->isolate()};
1592 #else
1593     return PtrComprCageBase{};
1594 #endif  // V8_COMPRESS_POINTERS
1595   }
1596 
1597   using MigrateFunction = void (*)(EvacuateVisitorBase* base, HeapObject dst,
1598                                    HeapObject src, int size,
1599                                    AllocationSpace dest);
1600 
1601   template <MigrationMode mode>
RawMigrateObject(EvacuateVisitorBase * base,HeapObject dst,HeapObject src,int size,AllocationSpace dest)1602   static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
1603                                HeapObject src, int size, AllocationSpace dest) {
1604     Address dst_addr = dst.address();
1605     Address src_addr = src.address();
1606     PtrComprCageBase cage_base = base->cage_base();
1607     DCHECK(base->heap_->AllowedToBeMigrated(src.map(cage_base), src, dest));
1608     DCHECK_NE(dest, LO_SPACE);
1609     DCHECK_NE(dest, CODE_LO_SPACE);
1610     if (dest == OLD_SPACE) {
1611       DCHECK_OBJECT_SIZE(size);
1612       DCHECK(IsAligned(size, kTaggedSize));
1613       base->heap_->CopyBlock(dst_addr, src_addr, size);
1614       if (mode != MigrationMode::kFast)
1615         base->ExecuteMigrationObservers(dest, src, dst, size);
1616       // In case the object's map gets relocated during GC we load the old map
1617       // here. This is fine since they store the same content.
1618       dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
1619       if (V8_UNLIKELY(FLAG_minor_mc)) {
1620         base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
1621       }
1622     } else if (dest == MAP_SPACE) {
1623       DCHECK_OBJECT_SIZE(size);
1624       DCHECK(IsAligned(size, kTaggedSize));
1625       base->heap_->CopyBlock(dst_addr, src_addr, size);
1626       if (mode != MigrationMode::kFast)
1627         base->ExecuteMigrationObservers(dest, src, dst, size);
1628       dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
1629     } else if (dest == CODE_SPACE) {
1630       DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
1631       base->heap_->CopyBlock(dst_addr, src_addr, size);
1632       Code code = Code::cast(dst);
1633       code.Relocate(dst_addr - src_addr);
1634       if (mode != MigrationMode::kFast)
1635         base->ExecuteMigrationObservers(dest, src, dst, size);
1636       // In case the object's map gets relocated during GC we load the old map
1637       // here. This is fine since they store the same content.
1638       dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
1639     } else {
1640       DCHECK_OBJECT_SIZE(size);
1641       DCHECK(dest == NEW_SPACE);
1642       base->heap_->CopyBlock(dst_addr, src_addr, size);
1643       if (mode != MigrationMode::kFast)
1644         base->ExecuteMigrationObservers(dest, src, dst, size);
1645     }
1646     src.set_map_word(MapWord::FromForwardingAddress(dst), kRelaxedStore);
1647   }
1648 
EvacuateVisitorBase(Heap * heap,EvacuationAllocator * local_allocator,ConcurrentAllocator * shared_old_allocator,RecordMigratedSlotVisitor * record_visitor)1649   EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
1650                       ConcurrentAllocator* shared_old_allocator,
1651                       RecordMigratedSlotVisitor* record_visitor)
1652       : heap_(heap),
1653         local_allocator_(local_allocator),
1654         shared_old_allocator_(shared_old_allocator),
1655         record_visitor_(record_visitor),
1656         shared_string_table_(shared_old_allocator != nullptr) {
1657     migration_function_ = RawMigrateObject<MigrationMode::kFast>;
1658   }
1659 
TryEvacuateObject(AllocationSpace target_space,HeapObject object,int size,HeapObject * target_object)1660   inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
1661                                 int size, HeapObject* target_object) {
1662 #ifdef DEBUG
1663     if (FLAG_stress_compaction && AbortCompactionForTesting(object))
1664       return false;
1665 #endif  // DEBUG
1666     Map map = object.map(cage_base());
1667     AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
1668     AllocationResult allocation;
1669     if (ShouldPromoteIntoSharedHeap(map)) {
1670       DCHECK_EQ(target_space, OLD_SPACE);
1671       DCHECK(Heap::InYoungGeneration(object));
1672       DCHECK_NOT_NULL(shared_old_allocator_);
1673       allocation = shared_old_allocator_->AllocateRaw(size, alignment,
1674                                                       AllocationOrigin::kGC);
1675     } else {
1676       allocation = local_allocator_->Allocate(target_space, size,
1677                                               AllocationOrigin::kGC, alignment);
1678     }
1679     if (allocation.To(target_object)) {
1680       MigrateObject(*target_object, object, size, target_space);
1681       if (target_space == CODE_SPACE)
1682         MemoryChunk::FromHeapObject(*target_object)
1683             ->GetCodeObjectRegistry()
1684             ->RegisterNewlyAllocatedCodeObject((*target_object).address());
1685       return true;
1686     }
1687     return false;
1688   }
1689 
ShouldPromoteIntoSharedHeap(Map map)1690   inline bool ShouldPromoteIntoSharedHeap(Map map) {
1691     if (shared_string_table_) {
1692       return String::IsInPlaceInternalizableExcludingExternal(
1693           map.instance_type());
1694     }
1695     return false;
1696   }
1697 
ExecuteMigrationObservers(AllocationSpace dest,HeapObject src,HeapObject dst,int size)1698   inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject src,
1699                                         HeapObject dst, int size) {
1700     for (MigrationObserver* obs : observers_) {
1701       obs->Move(dest, src, dst, size);
1702     }
1703   }
1704 
MigrateObject(HeapObject dst,HeapObject src,int size,AllocationSpace dest)1705   inline void MigrateObject(HeapObject dst, HeapObject src, int size,
1706                             AllocationSpace dest) {
1707     migration_function_(this, dst, src, size, dest);
1708   }
1709 
1710 #ifdef DEBUG
AbortCompactionForTesting(HeapObject object)1711   bool AbortCompactionForTesting(HeapObject object) {
1712     if (FLAG_stress_compaction) {
1713       const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1714                              kPageAlignmentMask & ~kObjectAlignmentMask;
1715       if ((object.ptr() & kPageAlignmentMask) == mask) {
1716         Page* page = Page::FromHeapObject(object);
1717         if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1718           page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1719         } else {
1720           page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1721           return true;
1722         }
1723       }
1724     }
1725     return false;
1726   }
1727 #endif  // DEBUG
1728 
1729   Heap* heap_;
1730   EvacuationAllocator* local_allocator_;
1731   ConcurrentAllocator* shared_old_allocator_;
1732   RecordMigratedSlotVisitor* record_visitor_;
1733   std::vector<MigrationObserver*> observers_;
1734   MigrateFunction migration_function_;
1735   bool shared_string_table_ = false;
1736 };
1737 
1738 class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
1739  public:
EvacuateNewSpaceVisitor(Heap * heap,EvacuationAllocator * local_allocator,ConcurrentAllocator * shared_old_allocator,RecordMigratedSlotVisitor * record_visitor,Heap::PretenuringFeedbackMap * local_pretenuring_feedback,AlwaysPromoteYoung always_promote_young)1740   explicit EvacuateNewSpaceVisitor(
1741       Heap* heap, EvacuationAllocator* local_allocator,
1742       ConcurrentAllocator* shared_old_allocator,
1743       RecordMigratedSlotVisitor* record_visitor,
1744       Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
1745       AlwaysPromoteYoung always_promote_young)
1746       : EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
1747                             record_visitor),
1748         buffer_(LocalAllocationBuffer::InvalidBuffer()),
1749         promoted_size_(0),
1750         semispace_copied_size_(0),
1751         local_pretenuring_feedback_(local_pretenuring_feedback),
1752         is_incremental_marking_(heap->incremental_marking()->IsMarking()),
1753         always_promote_young_(always_promote_young) {}
1754 
Visit(HeapObject object,int size)1755   inline bool Visit(HeapObject object, int size) override {
1756     if (TryEvacuateWithoutCopy(object)) return true;
1757     HeapObject target_object;
1758 
1759     if (always_promote_young_ == AlwaysPromoteYoung::kYes) {
1760       heap_->UpdateAllocationSite(object.map(), object,
1761                                   local_pretenuring_feedback_);
1762 
1763       if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1764         heap_->FatalProcessOutOfMemory(
1765             "MarkCompactCollector: young object promotion failed");
1766       }
1767 
1768       promoted_size_ += size;
1769       return true;
1770     }
1771 
1772     if (heap_->ShouldBePromoted(object.address()) &&
1773         TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1774       promoted_size_ += size;
1775       return true;
1776     }
1777 
1778     heap_->UpdateAllocationSite(object.map(), object,
1779                                 local_pretenuring_feedback_);
1780 
1781     HeapObject target;
1782     AllocationSpace space = AllocateTargetObject(object, size, &target);
1783     MigrateObject(HeapObject::cast(target), object, size, space);
1784     semispace_copied_size_ += size;
1785     return true;
1786   }
1787 
promoted_size()1788   intptr_t promoted_size() { return promoted_size_; }
semispace_copied_size()1789   intptr_t semispace_copied_size() { return semispace_copied_size_; }
1790 
1791  private:
TryEvacuateWithoutCopy(HeapObject object)1792   inline bool TryEvacuateWithoutCopy(HeapObject object) {
1793     if (is_incremental_marking_) return false;
1794 
1795     Map map = object.map();
1796 
1797     // Some objects can be evacuated without creating a copy.
1798     if (map.visitor_id() == kVisitThinString) {
1799       HeapObject actual = ThinString::cast(object).unchecked_actual();
1800       if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
1801       object.set_map_word(MapWord::FromForwardingAddress(actual),
1802                           kRelaxedStore);
1803       return true;
1804     }
1805     // TODO(mlippautz): Handle ConsString.
1806 
1807     return false;
1808   }
1809 
AllocateTargetObject(HeapObject old_object,int size,HeapObject * target_object)1810   inline AllocationSpace AllocateTargetObject(HeapObject old_object, int size,
1811                                               HeapObject* target_object) {
1812     AllocationAlignment alignment =
1813         HeapObject::RequiredAlignment(old_object.map());
1814     AllocationSpace space_allocated_in = NEW_SPACE;
1815     AllocationResult allocation = local_allocator_->Allocate(
1816         NEW_SPACE, size, AllocationOrigin::kGC, alignment);
1817     if (allocation.IsFailure()) {
1818       allocation = AllocateInOldSpace(size, alignment);
1819       space_allocated_in = OLD_SPACE;
1820     }
1821     bool ok = allocation.To(target_object);
1822     DCHECK(ok);
1823     USE(ok);
1824     return space_allocated_in;
1825   }
1826 
AllocateInOldSpace(int size_in_bytes,AllocationAlignment alignment)1827   inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1828                                              AllocationAlignment alignment) {
1829     AllocationResult allocation = local_allocator_->Allocate(
1830         OLD_SPACE, size_in_bytes, AllocationOrigin::kGC, alignment);
1831     if (allocation.IsFailure()) {
1832       heap_->FatalProcessOutOfMemory(
1833           "MarkCompactCollector: semi-space copy, fallback in old gen");
1834     }
1835     return allocation;
1836   }
1837 
1838   LocalAllocationBuffer buffer_;
1839   intptr_t promoted_size_;
1840   intptr_t semispace_copied_size_;
1841   Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1842   bool is_incremental_marking_;
1843   AlwaysPromoteYoung always_promote_young_;
1844 };
1845 
1846 template <PageEvacuationMode mode>
1847 class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
1848  public:
EvacuateNewSpacePageVisitor(Heap * heap,RecordMigratedSlotVisitor * record_visitor,Heap::PretenuringFeedbackMap * local_pretenuring_feedback)1849   explicit EvacuateNewSpacePageVisitor(
1850       Heap* heap, RecordMigratedSlotVisitor* record_visitor,
1851       Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1852       : heap_(heap),
1853         record_visitor_(record_visitor),
1854         moved_bytes_(0),
1855         local_pretenuring_feedback_(local_pretenuring_feedback) {}
1856 
Move(Page * page)1857   static void Move(Page* page) {
1858     switch (mode) {
1859       case NEW_TO_NEW:
1860         page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1861         page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1862         break;
1863       case NEW_TO_OLD: {
1864         page->heap()->new_space()->from_space().RemovePage(page);
1865         Page* new_page = Page::ConvertNewToOld(page);
1866         DCHECK(!new_page->InYoungGeneration());
1867         new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1868         break;
1869       }
1870     }
1871   }
1872 
Visit(HeapObject object,int size)1873   inline bool Visit(HeapObject object, int size) override {
1874     if (mode == NEW_TO_NEW) {
1875       heap_->UpdateAllocationSite(object.map(), object,
1876                                   local_pretenuring_feedback_);
1877     } else if (mode == NEW_TO_OLD) {
1878       DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
1879       PtrComprCageBase cage_base = GetPtrComprCageBase(object);
1880       object.IterateFast(cage_base, record_visitor_);
1881       if (V8_UNLIKELY(FLAG_minor_mc)) {
1882         record_visitor_->MarkArrayBufferExtensionPromoted(object);
1883       }
1884     }
1885     return true;
1886   }
1887 
moved_bytes()1888   intptr_t moved_bytes() { return moved_bytes_; }
account_moved_bytes(intptr_t bytes)1889   void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1890 
1891  private:
1892   Heap* heap_;
1893   RecordMigratedSlotVisitor* record_visitor_;
1894   intptr_t moved_bytes_;
1895   Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1896 };
1897 
1898 class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
1899  public:
EvacuateOldSpaceVisitor(Heap * heap,EvacuationAllocator * local_allocator,ConcurrentAllocator * shared_old_allocator,RecordMigratedSlotVisitor * record_visitor)1900   EvacuateOldSpaceVisitor(Heap* heap, EvacuationAllocator* local_allocator,
1901                           ConcurrentAllocator* shared_old_allocator,
1902                           RecordMigratedSlotVisitor* record_visitor)
1903       : EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
1904                             record_visitor) {}
1905 
Visit(HeapObject object,int size)1906   inline bool Visit(HeapObject object, int size) override {
1907     HeapObject target_object;
1908     if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
1909                           object, size, &target_object)) {
1910       DCHECK(object.map_word(heap_->isolate(), kRelaxedLoad)
1911                  .IsForwardingAddress());
1912       return true;
1913     }
1914     return false;
1915   }
1916 };
1917 
1918 class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
1919  public:
EvacuateRecordOnlyVisitor(Heap * heap)1920   explicit EvacuateRecordOnlyVisitor(Heap* heap)
1921       : heap_(heap)
1922 #ifdef V8_COMPRESS_POINTERS
1923         ,
1924         cage_base_(heap->isolate())
1925 #endif  // V8_COMPRESS_POINTERS
1926   {
1927   }
1928 
1929   // The pointer compression cage base value used for decompression of all
1930   // tagged values except references to Code objects.
cage_base() const1931   V8_INLINE PtrComprCageBase cage_base() const {
1932 #ifdef V8_COMPRESS_POINTERS
1933     return cage_base_;
1934 #else
1935     return PtrComprCageBase{};
1936 #endif  // V8_COMPRESS_POINTERS
1937   }
1938 
Visit(HeapObject object,int size)1939   inline bool Visit(HeapObject object, int size) override {
1940     RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
1941                                       &heap_->ephemeron_remembered_set_);
1942     Map map = object.map(cage_base());
1943     // Instead of calling object.IterateBodyFast(cage_base(), &visitor) here
1944     // we can shortcut and use the precomputed size value passed to the visitor.
1945     DCHECK_EQ(object.SizeFromMap(map), size);
1946     object.IterateBodyFast(map, size, &visitor);
1947     return true;
1948   }
1949 
1950  private:
1951   Heap* heap_;
1952 #ifdef V8_COMPRESS_POINTERS
1953   const PtrComprCageBase cage_base_;
1954 #endif  // V8_COMPRESS_POINTERS
1955 };
1956 
IsUnmarkedHeapObject(Heap * heap,FullObjectSlot p)1957 bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
1958   Object o = *p;
1959   if (!o.IsHeapObject()) return false;
1960   HeapObject heap_object = HeapObject::cast(o);
1961   return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
1962       heap_object);
1963 }
1964 
MarkRoots(RootVisitor * root_visitor,ObjectVisitor * custom_root_body_visitor)1965 void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
1966                                      ObjectVisitor* custom_root_body_visitor) {
1967   // Mark the heap roots including global variables, stack variables,
1968   // etc., and all objects reachable from them.
1969   heap()->IterateRootsIncludingClients(
1970       root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
1971 
1972   // Custom marking for top optimized frame.
1973   ProcessTopOptimizedFrame(custom_root_body_visitor, isolate());
1974 
1975   if (isolate()->is_shared()) {
1976     isolate()->global_safepoint()->IterateClientIsolates(
1977         [this, custom_root_body_visitor](Isolate* client) {
1978           ProcessTopOptimizedFrame(custom_root_body_visitor, client);
1979         });
1980   }
1981 }
1982 
MarkObjectsFromClientHeaps()1983 void MarkCompactCollector::MarkObjectsFromClientHeaps() {
1984   if (!isolate()->is_shared()) return;
1985 
1986   SharedHeapObjectVisitor visitor(this);
1987 
1988   isolate()->global_safepoint()->IterateClientIsolates(
1989       [&visitor](Isolate* client) {
1990         Heap* heap = client->heap();
1991         HeapObjectIterator iterator(heap, HeapObjectIterator::kNoFiltering);
1992         PtrComprCageBase cage_base(client);
1993         for (HeapObject obj = iterator.Next(); !obj.is_null();
1994              obj = iterator.Next()) {
1995           obj.IterateFast(cage_base, &visitor);
1996         }
1997       });
1998 }
1999 
VisitObject(HeapObject obj)2000 void MarkCompactCollector::VisitObject(HeapObject obj) {
2001   marking_visitor_->Visit(obj.map(), obj);
2002 }
2003 
RevisitObject(HeapObject obj)2004 void MarkCompactCollector::RevisitObject(HeapObject obj) {
2005   DCHECK(marking_state()->IsBlack(obj));
2006   DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->ProgressBar().IsEnabled(),
2007                  0u == MemoryChunk::FromHeapObject(obj)->ProgressBar().Value());
2008   MarkingVisitor::RevisitScope revisit(marking_visitor_.get());
2009   marking_visitor_->Visit(obj.map(marking_visitor_->cage_base()), obj);
2010 }
2011 
ProcessEphemeronsUntilFixpoint()2012 bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
2013   int iterations = 0;
2014   int max_iterations = FLAG_ephemeron_fixpoint_iterations;
2015 
2016   bool another_ephemeron_iteration_main_thread;
2017 
2018   do {
2019     PerformWrapperTracing();
2020 
2021     if (iterations >= max_iterations) {
2022       // Give up fixpoint iteration and switch to linear algorithm.
2023       return false;
2024     }
2025 
2026     // Move ephemerons from next_ephemerons into current_ephemerons to
2027     // drain them in this iteration.
2028     DCHECK(
2029         local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2030     weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
2031     heap()->concurrent_marking()->set_another_ephemeron_iteration(false);
2032 
2033     {
2034       TRACE_GC(heap()->tracer(),
2035                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
2036 
2037       if (FLAG_parallel_marking) {
2038         heap_->concurrent_marking()->RescheduleJobIfNeeded(
2039             TaskPriority::kUserBlocking);
2040       }
2041 
2042       another_ephemeron_iteration_main_thread = ProcessEphemerons();
2043       FinishConcurrentMarking();
2044     }
2045 
2046     CHECK(
2047         local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2048     CHECK(local_weak_objects()
2049               ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
2050 
2051     ++iterations;
2052   } while (another_ephemeron_iteration_main_thread ||
2053            heap()->concurrent_marking()->another_ephemeron_iteration() ||
2054            !local_marking_worklists()->IsEmpty() ||
2055            !local_marking_worklists()->IsWrapperEmpty() ||
2056            !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
2057 
2058   CHECK(local_marking_worklists()->IsEmpty());
2059   CHECK(local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2060   CHECK(local_weak_objects()
2061             ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
2062   return true;
2063 }
2064 
ProcessEphemerons()2065 bool MarkCompactCollector::ProcessEphemerons() {
2066   Ephemeron ephemeron;
2067   bool another_ephemeron_iteration = false;
2068 
2069   // Drain current_ephemerons and push ephemerons where key and value are still
2070   // unreachable into next_ephemerons.
2071   while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
2072     if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
2073       another_ephemeron_iteration = true;
2074     }
2075   }
2076 
2077   // Drain marking worklist and push discovered ephemerons into
2078   // discovered_ephemerons.
2079   size_t objects_processed;
2080   std::tie(std::ignore, objects_processed) = ProcessMarkingWorklist(0);
2081 
2082   // As soon as a single object was processed and potentially marked another
2083   // object we need another iteration. Otherwise we might miss to apply
2084   // ephemeron semantics on it.
2085   if (objects_processed > 0) another_ephemeron_iteration = true;
2086 
2087   // Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
2088   // before) and push ephemerons where key and value are still unreachable into
2089   // next_ephemerons.
2090   while (local_weak_objects()->discovered_ephemerons_local.Pop(&ephemeron)) {
2091     if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
2092       another_ephemeron_iteration = true;
2093     }
2094   }
2095 
2096   // Flush local ephemerons for main task to global pool.
2097   local_weak_objects()->ephemeron_hash_tables_local.Publish();
2098   local_weak_objects()->next_ephemerons_local.Publish();
2099 
2100   return another_ephemeron_iteration;
2101 }
2102 
ProcessEphemeronsLinear()2103 void MarkCompactCollector::ProcessEphemeronsLinear() {
2104   TRACE_GC(heap()->tracer(),
2105            GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
2106   CHECK(heap()->concurrent_marking()->IsStopped());
2107   std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
2108   Ephemeron ephemeron;
2109 
2110   DCHECK(
2111       local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2112   weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
2113   while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
2114     ProcessEphemeron(ephemeron.key, ephemeron.value);
2115 
2116     if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
2117       key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
2118     }
2119   }
2120 
2121   ephemeron_marking_.newly_discovered_limit = key_to_values.size();
2122   bool work_to_do = true;
2123 
2124   while (work_to_do) {
2125     PerformWrapperTracing();
2126 
2127     ResetNewlyDiscovered();
2128     ephemeron_marking_.newly_discovered_limit = key_to_values.size();
2129 
2130     {
2131       TRACE_GC(heap()->tracer(),
2132                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
2133       // Drain marking worklist and push all discovered objects into
2134       // newly_discovered.
2135       ProcessMarkingWorklist<
2136           MarkCompactCollector::MarkingWorklistProcessingMode::
2137               kTrackNewlyDiscoveredObjects>(0);
2138     }
2139 
2140     while (local_weak_objects()->discovered_ephemerons_local.Pop(&ephemeron)) {
2141       ProcessEphemeron(ephemeron.key, ephemeron.value);
2142 
2143       if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
2144         key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
2145       }
2146     }
2147 
2148     if (ephemeron_marking_.newly_discovered_overflowed) {
2149       // If newly_discovered was overflowed just visit all ephemerons in
2150       // next_ephemerons.
2151       local_weak_objects()->next_ephemerons_local.Publish();
2152       weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
2153         if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
2154             non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
2155           local_marking_worklists()->Push(ephemeron.value);
2156         }
2157       });
2158 
2159     } else {
2160       // This is the good case: newly_discovered stores all discovered
2161       // objects. Now use key_to_values to see if discovered objects keep more
2162       // objects alive due to ephemeron semantics.
2163       for (HeapObject object : ephemeron_marking_.newly_discovered) {
2164         auto range = key_to_values.equal_range(object);
2165         for (auto it = range.first; it != range.second; ++it) {
2166           HeapObject value = it->second;
2167           MarkObject(object, value);
2168         }
2169       }
2170     }
2171 
2172     // Do NOT drain marking worklist here, otherwise the current checks
2173     // for work_to_do are not sufficient for determining if another iteration
2174     // is necessary.
2175 
2176     work_to_do = !local_marking_worklists()->IsEmpty() ||
2177                  !local_marking_worklists()->IsWrapperEmpty() ||
2178                  !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
2179     CHECK(local_weak_objects()
2180               ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
2181   }
2182 
2183   ResetNewlyDiscovered();
2184   ephemeron_marking_.newly_discovered.shrink_to_fit();
2185 
2186   CHECK(local_marking_worklists()->IsEmpty());
2187   CHECK(weak_objects_.current_ephemerons.IsEmpty());
2188   CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
2189 
2190   // Flush local ephemerons for main task to global pool.
2191   local_weak_objects()->ephemeron_hash_tables_local.Publish();
2192   local_weak_objects()->next_ephemerons_local.Publish();
2193 }
2194 
PerformWrapperTracing()2195 void MarkCompactCollector::PerformWrapperTracing() {
2196   if (heap_->local_embedder_heap_tracer()->InUse()) {
2197     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
2198     if (local_marking_worklists()->PublishWrapper()) {
2199       DCHECK(local_marking_worklists()->IsWrapperEmpty());
2200     } else {
2201       // Cannot directly publish wrapper objects.
2202       LocalEmbedderHeapTracer::ProcessingScope scope(
2203           heap_->local_embedder_heap_tracer());
2204       HeapObject object;
2205       while (local_marking_worklists()->PopWrapper(&object)) {
2206         scope.TracePossibleWrapper(JSObject::cast(object));
2207       }
2208     }
2209     heap_->local_embedder_heap_tracer()->Trace(
2210         std::numeric_limits<double>::infinity());
2211   }
2212 }
2213 
DrainMarkingWorklist()2214 void MarkCompactCollector::DrainMarkingWorklist() { ProcessMarkingWorklist(0); }
2215 
2216 template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
ProcessMarkingWorklist(size_t bytes_to_process)2217 std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
2218     size_t bytes_to_process) {
2219   HeapObject object;
2220   size_t bytes_processed = 0;
2221   size_t objects_processed = 0;
2222   bool is_per_context_mode = local_marking_worklists()->IsPerContextMode();
2223   Isolate* isolate = heap()->isolate();
2224   PtrComprCageBase cage_base(isolate);
2225   while (local_marking_worklists()->Pop(&object) ||
2226          local_marking_worklists()->PopOnHold(&object)) {
2227     // Left trimming may result in grey or black filler objects on the marking
2228     // worklist. Ignore these objects.
2229     if (object.IsFreeSpaceOrFiller(cage_base)) {
2230       // Due to copying mark bits and the fact that grey and black have their
2231       // first bit set, one word fillers are always black.
2232       DCHECK_IMPLIES(object.map(cage_base) ==
2233                          ReadOnlyRoots(isolate).one_pointer_filler_map(),
2234                      marking_state()->IsBlack(object));
2235       // Other fillers may be black or grey depending on the color of the object
2236       // that was trimmed.
2237       DCHECK_IMPLIES(object.map(cage_base) !=
2238                          ReadOnlyRoots(isolate).one_pointer_filler_map(),
2239                      marking_state()->IsBlackOrGrey(object));
2240       continue;
2241     }
2242     DCHECK(object.IsHeapObject());
2243     DCHECK(heap()->Contains(object));
2244     DCHECK(!(marking_state()->IsWhite(object)));
2245     if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
2246                     kTrackNewlyDiscoveredObjects) {
2247       AddNewlyDiscovered(object);
2248     }
2249     Map map = object.map(cage_base);
2250     if (is_per_context_mode) {
2251       Address context;
2252       if (native_context_inferrer_.Infer(isolate, map, object, &context)) {
2253         local_marking_worklists()->SwitchToContext(context);
2254       }
2255     }
2256     size_t visited_size = marking_visitor_->Visit(map, object);
2257     if (is_per_context_mode) {
2258       native_context_stats_.IncrementSize(local_marking_worklists()->Context(),
2259                                           map, object, visited_size);
2260     }
2261     bytes_processed += visited_size;
2262     objects_processed++;
2263     if (bytes_to_process && bytes_processed >= bytes_to_process) {
2264       break;
2265     }
2266   }
2267   return std::make_pair(bytes_processed, objects_processed);
2268 }
2269 
2270 // Generate definitions for use in other files.
2271 template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
2272     MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>(
2273     size_t bytes_to_process);
2274 template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
2275     MarkCompactCollector::MarkingWorklistProcessingMode::
2276         kTrackNewlyDiscoveredObjects>(size_t bytes_to_process);
2277 
ProcessEphemeron(HeapObject key,HeapObject value)2278 bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
2279   if (marking_state()->IsBlackOrGrey(key)) {
2280     if (marking_state()->WhiteToGrey(value)) {
2281       local_marking_worklists()->Push(value);
2282       return true;
2283     }
2284 
2285   } else if (marking_state()->IsWhite(value)) {
2286     local_weak_objects()->next_ephemerons_local.Push(Ephemeron{key, value});
2287   }
2288   return false;
2289 }
2290 
ProcessEphemeronMarking()2291 void MarkCompactCollector::ProcessEphemeronMarking() {
2292   DCHECK(local_marking_worklists()->IsEmpty());
2293 
2294   // Incremental marking might leave ephemerons in main task's local
2295   // buffer, flush it into global pool.
2296   local_weak_objects()->next_ephemerons_local.Publish();
2297 
2298   if (!ProcessEphemeronsUntilFixpoint()) {
2299     // Fixpoint iteration needed too many iterations and was cancelled. Use the
2300     // guaranteed linear algorithm.
2301     ProcessEphemeronsLinear();
2302   }
2303 
2304 #ifdef VERIFY_HEAP
2305   if (FLAG_verify_heap) {
2306     Ephemeron ephemeron;
2307 
2308     DCHECK(
2309         local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2310     weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
2311     while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
2312       CHECK(!ProcessEphemeron(ephemeron.key, ephemeron.value));
2313     }
2314   }
2315 #endif
2316 
2317   CHECK(local_marking_worklists()->IsEmpty());
2318   CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
2319 }
2320 
ProcessTopOptimizedFrame(ObjectVisitor * visitor,Isolate * isolate)2321 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
2322                                                     Isolate* isolate) {
2323   for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
2324        it.Advance()) {
2325     if (it.frame()->is_unoptimized()) return;
2326     if (it.frame()->type() == StackFrame::OPTIMIZED) {
2327       Code code = it.frame()->LookupCode();
2328       if (!code.CanDeoptAt(isolate, it.frame()->pc())) {
2329         PtrComprCageBase cage_base(isolate);
2330         Code::BodyDescriptor::IterateBody(code.map(cage_base), code, visitor);
2331       }
2332       return;
2333     }
2334   }
2335 }
2336 
RecordObjectStats()2337 void MarkCompactCollector::RecordObjectStats() {
2338   if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
2339   // Cannot run during bootstrapping due to incomplete objects.
2340   if (isolate()->bootstrapper()->IsActive()) return;
2341   heap()->CreateObjectStats();
2342   ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
2343                                  heap()->dead_object_stats_.get());
2344   collector.Collect();
2345   if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
2346                   v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
2347     std::stringstream live, dead;
2348     heap()->live_object_stats_->Dump(live);
2349     heap()->dead_object_stats_->Dump(dead);
2350     TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
2351                          "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
2352                          "live", TRACE_STR_COPY(live.str().c_str()), "dead",
2353                          TRACE_STR_COPY(dead.str().c_str()));
2354   }
2355   if (FLAG_trace_gc_object_stats) {
2356     heap()->live_object_stats_->PrintJSON("live");
2357     heap()->dead_object_stats_->PrintJSON("dead");
2358   }
2359   heap()->live_object_stats_->CheckpointObjectStats();
2360   heap()->dead_object_stats_->ClearObjectStats();
2361 }
2362 
MarkLiveObjects()2363 void MarkCompactCollector::MarkLiveObjects() {
2364   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
2365   // The recursive GC marker detects when it is nearing stack overflow,
2366   // and switches to a different marking system.  JS interrupts interfere
2367   // with the C stack limit check.
2368   PostponeInterruptsScope postpone(isolate());
2369 
2370   bool was_marked_incrementally = false;
2371   {
2372     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
2373     if (heap_->incremental_marking()->Stop()) {
2374       MarkingBarrier::PublishAll(heap());
2375       was_marked_incrementally = true;
2376     }
2377   }
2378 
2379 #ifdef DEBUG
2380   DCHECK(state_ == PREPARE_GC);
2381   state_ = MARK_LIVE_OBJECTS;
2382 #endif
2383 
2384   heap_->local_embedder_heap_tracer()->EnterFinalPause();
2385 
2386   RootMarkingVisitor root_visitor(this);
2387 
2388   {
2389     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2390     CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
2391     MarkRoots(&root_visitor, &custom_root_body_visitor);
2392   }
2393 
2394   {
2395     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_CLIENT_HEAPS);
2396     MarkObjectsFromClientHeaps();
2397   }
2398 
2399   {
2400     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
2401     if (FLAG_parallel_marking) {
2402       heap_->concurrent_marking()->RescheduleJobIfNeeded(
2403           TaskPriority::kUserBlocking);
2404     }
2405     DrainMarkingWorklist();
2406 
2407     FinishConcurrentMarking();
2408     DrainMarkingWorklist();
2409   }
2410 
2411   {
2412     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
2413 
2414     DCHECK(local_marking_worklists()->IsEmpty());
2415 
2416     // Mark objects reachable through the embedder heap. This phase is
2417     // opportunistic as it may not discover graphs that are only reachable
2418     // through ephemerons.
2419     {
2420       TRACE_GC(heap()->tracer(),
2421                GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
2422       do {
2423         // PerformWrapperTracing() also empties the work items collected by
2424         // concurrent markers. As a result this call needs to happen at least
2425         // once.
2426         PerformWrapperTracing();
2427         DrainMarkingWorklist();
2428       } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
2429                !local_marking_worklists()->IsWrapperEmpty());
2430       DCHECK(local_marking_worklists()->IsWrapperEmpty());
2431       DCHECK(local_marking_worklists()->IsEmpty());
2432     }
2433 
2434     // The objects reachable from the roots are marked, yet unreachable objects
2435     // are unmarked. Mark objects reachable due to embedder heap tracing or
2436     // harmony weak maps.
2437     {
2438       TRACE_GC(heap()->tracer(),
2439                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
2440       ProcessEphemeronMarking();
2441       DCHECK(local_marking_worklists()->IsEmpty());
2442     }
2443 
2444     // The objects reachable from the roots, weak maps, and embedder heap
2445     // tracing are marked. Objects pointed to only by weak global handles cannot
2446     // be immediately reclaimed. Instead, we have to mark them as pending and
2447     // mark objects reachable from them.
2448     //
2449     // First we identify nonlive weak handles and mark them as pending
2450     // destruction.
2451     {
2452       TRACE_GC(heap()->tracer(),
2453                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
2454       heap()->isolate()->global_handles()->IterateWeakRootsIdentifyFinalizers(
2455           &IsUnmarkedHeapObject);
2456       DrainMarkingWorklist();
2457     }
2458 
2459     // Process finalizers, effectively keeping them alive until the next
2460     // garbage collection.
2461     {
2462       TRACE_GC(heap()->tracer(),
2463                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
2464       heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
2465           &root_visitor);
2466       DrainMarkingWorklist();
2467     }
2468 
2469     // Repeat ephemeron processing from the newly marked objects.
2470     {
2471       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
2472       ProcessEphemeronMarking();
2473       DCHECK(local_marking_worklists()->IsWrapperEmpty());
2474       DCHECK(local_marking_worklists()->IsEmpty());
2475     }
2476 
2477     // We depend on IterateWeakRootsForPhantomHandles being called before
2478     // ProcessOldCodeCandidates in order to identify flushed bytecode in the
2479     // CPU profiler.
2480     {
2481       heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
2482           &IsUnmarkedHeapObject);
2483     }
2484   }
2485 
2486   if (was_marked_incrementally) {
2487     // Disable the marking barrier after concurrent/parallel marking has
2488     // finished as it will reset page flags that share the same bitmap as
2489     // the evacuation candidate bit.
2490     MarkingBarrier::DeactivateAll(heap());
2491     GlobalHandles::DisableMarkingBarrier(heap()->isolate());
2492   }
2493 
2494   epoch_++;
2495 }
2496 
ClearNonLiveReferences()2497 void MarkCompactCollector::ClearNonLiveReferences() {
2498   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
2499 
2500   if (isolate()->OwnsStringTable()) {
2501     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
2502 
2503     // Prune the string table removing all strings only pointed to by the
2504     // string table.  Cannot use string_table() here because the string
2505     // table is marked.
2506     StringTable* string_table = isolate()->string_table();
2507     InternalizedStringTableCleaner internalized_visitor(heap());
2508     string_table->DropOldData();
2509     string_table->IterateElements(&internalized_visitor);
2510     string_table->NotifyElementsRemoved(internalized_visitor.PointersRemoved());
2511   }
2512 
2513   ExternalStringTableCleaner external_visitor(heap());
2514   heap()->external_string_table_.IterateAll(&external_visitor);
2515   heap()->external_string_table_.CleanUpAll();
2516 
2517   {
2518     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
2519     // ProcessFlusheBaselineCandidates should be called after clearing bytecode
2520     // so that we flush any bytecode if needed so we could correctly set the
2521     // code object on the JSFunction.
2522     ProcessOldCodeCandidates();
2523     ProcessFlushedBaselineCandidates();
2524   }
2525 
2526   {
2527     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHED_JS_FUNCTIONS);
2528     ClearFlushedJsFunctions();
2529   }
2530 
2531   {
2532     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
2533     // Process the weak references.
2534     MarkCompactWeakObjectRetainer mark_compact_object_retainer(
2535         non_atomic_marking_state());
2536     heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
2537   }
2538 
2539   {
2540     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
2541     // ClearFullMapTransitions must be called before weak references are
2542     // cleared.
2543     ClearFullMapTransitions();
2544   }
2545   {
2546     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
2547     ClearWeakReferences();
2548     ClearWeakCollections();
2549     ClearJSWeakRefs();
2550   }
2551 
2552   PROFILE(heap()->isolate(), WeakCodeClearEvent());
2553 
2554   MarkDependentCodeForDeoptimization();
2555 
2556 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
2557   {
2558     TRACE_GC(heap()->tracer(),
2559              GCTracer::Scope::MC_SWEEP_EXTERNAL_POINTER_TABLE);
2560     isolate()->external_pointer_table().Sweep(isolate());
2561   }
2562 #endif  // V8_SANDBOXED_EXTERNAL_POINTERS
2563 
2564   DCHECK(weak_objects_.transition_arrays.IsEmpty());
2565   DCHECK(weak_objects_.weak_references.IsEmpty());
2566   DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
2567   DCHECK(weak_objects_.js_weak_refs.IsEmpty());
2568   DCHECK(weak_objects_.weak_cells.IsEmpty());
2569   DCHECK(weak_objects_.code_flushing_candidates.IsEmpty());
2570   DCHECK(weak_objects_.baseline_flushing_candidates.IsEmpty());
2571   DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
2572 }
2573 
MarkDependentCodeForDeoptimization()2574 void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
2575   std::pair<HeapObject, Code> weak_object_in_code;
2576   while (local_weak_objects()->weak_objects_in_code_local.Pop(
2577       &weak_object_in_code)) {
2578     HeapObject object = weak_object_in_code.first;
2579     Code code = weak_object_in_code.second;
2580     if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
2581         !code.embedded_objects_cleared()) {
2582       if (!code.marked_for_deoptimization()) {
2583         code.SetMarkedForDeoptimization("weak objects");
2584         have_code_to_deoptimize_ = true;
2585       }
2586       code.ClearEmbeddedObjects(heap_);
2587       DCHECK(code.embedded_objects_cleared());
2588     }
2589   }
2590 }
2591 
ClearPotentialSimpleMapTransition(Map dead_target)2592 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
2593   DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
2594   Object potential_parent = dead_target.constructor_or_back_pointer();
2595   if (potential_parent.IsMap()) {
2596     Map parent = Map::cast(potential_parent);
2597     DisallowGarbageCollection no_gc_obviously;
2598     if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
2599         TransitionsAccessor(isolate(), parent)
2600             .HasSimpleTransitionTo(dead_target)) {
2601       ClearPotentialSimpleMapTransition(parent, dead_target);
2602     }
2603   }
2604 }
2605 
ClearPotentialSimpleMapTransition(Map map,Map dead_target)2606 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
2607                                                              Map dead_target) {
2608   DCHECK(!map.is_prototype_map());
2609   DCHECK(!dead_target.is_prototype_map());
2610   DCHECK_EQ(map.raw_transitions(), HeapObjectReference::Weak(dead_target));
2611   // Take ownership of the descriptor array.
2612   int number_of_own_descriptors = map.NumberOfOwnDescriptors();
2613   DescriptorArray descriptors = map.instance_descriptors(isolate());
2614   if (descriptors == dead_target.instance_descriptors(isolate()) &&
2615       number_of_own_descriptors > 0) {
2616     TrimDescriptorArray(map, descriptors);
2617     DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
2618   }
2619 }
2620 
FlushBytecodeFromSFI(SharedFunctionInfo shared_info)2621 void MarkCompactCollector::FlushBytecodeFromSFI(
2622     SharedFunctionInfo shared_info) {
2623   DCHECK(shared_info.HasBytecodeArray());
2624 
2625   // Retain objects required for uncompiled data.
2626   String inferred_name = shared_info.inferred_name();
2627   int start_position = shared_info.StartPosition();
2628   int end_position = shared_info.EndPosition();
2629 
2630   shared_info.DiscardCompiledMetadata(
2631       isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
2632         RecordSlot(object, slot, target);
2633       });
2634 
2635   // The size of the bytecode array should always be larger than an
2636   // UncompiledData object.
2637   STATIC_ASSERT(BytecodeArray::SizeFor(0) >=
2638                 UncompiledDataWithoutPreparseData::kSize);
2639 
2640   // Replace bytecode array with an uncompiled data array.
2641   HeapObject compiled_data = shared_info.GetBytecodeArray(isolate());
2642   Address compiled_data_start = compiled_data.address();
2643   int compiled_data_size = compiled_data.Size();
2644   MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
2645 
2646   // Clear any recorded slots for the compiled data as being invalid.
2647   RememberedSet<OLD_TO_NEW>::RemoveRange(
2648       chunk, compiled_data_start, compiled_data_start + compiled_data_size,
2649       SlotSet::FREE_EMPTY_BUCKETS);
2650   RememberedSet<OLD_TO_OLD>::RemoveRange(
2651       chunk, compiled_data_start, compiled_data_start + compiled_data_size,
2652       SlotSet::FREE_EMPTY_BUCKETS);
2653 
2654   // Swap the map, using set_map_after_allocation to avoid verify heap checks
2655   // which are not necessary since we are doing this during the GC atomic pause.
2656   compiled_data.set_map_after_allocation(
2657       ReadOnlyRoots(heap()).uncompiled_data_without_preparse_data_map(),
2658       SKIP_WRITE_BARRIER);
2659 
2660   // Create a filler object for any left over space in the bytecode array.
2661   if (!heap()->IsLargeObject(compiled_data)) {
2662     heap()->CreateFillerObjectAt(
2663         compiled_data.address() + UncompiledDataWithoutPreparseData::kSize,
2664         compiled_data_size - UncompiledDataWithoutPreparseData::kSize,
2665         ClearRecordedSlots::kNo);
2666   }
2667 
2668   // Initialize the uncompiled data.
2669   UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
2670   uncompiled_data.InitAfterBytecodeFlush(
2671       inferred_name, start_position, end_position,
2672       [](HeapObject object, ObjectSlot slot, HeapObject target) {
2673         RecordSlot(object, slot, target);
2674       });
2675 
2676   // Mark the uncompiled data as black, and ensure all fields have already been
2677   // marked.
2678   DCHECK(non_atomic_marking_state()->IsBlackOrGrey(inferred_name));
2679   non_atomic_marking_state()->WhiteToBlack(uncompiled_data);
2680 
2681   // Use the raw function data setter to avoid validity checks, since we're
2682   // performing the unusual task of decompiling.
2683   shared_info.set_function_data(uncompiled_data, kReleaseStore);
2684   DCHECK(!shared_info.is_compiled());
2685 }
2686 
ProcessOldCodeCandidates()2687 void MarkCompactCollector::ProcessOldCodeCandidates() {
2688   DCHECK(FLAG_flush_bytecode || FLAG_flush_baseline_code ||
2689          weak_objects_.code_flushing_candidates.IsEmpty());
2690   SharedFunctionInfo flushing_candidate;
2691   while (local_weak_objects()->code_flushing_candidates_local.Pop(
2692       &flushing_candidate)) {
2693     bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
2694         flushing_candidate.GetBytecodeArray(isolate()));
2695     if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineCode()) {
2696       CodeT baseline_codet =
2697           CodeT::cast(flushing_candidate.function_data(kAcquireLoad));
2698       // Safe to do a relaxed load here since the CodeT was acquire-loaded.
2699       Code baseline_code = FromCodeT(baseline_codet, kRelaxedLoad);
2700       if (non_atomic_marking_state()->IsBlackOrGrey(baseline_code)) {
2701         // Currently baseline code holds bytecode array strongly and it is
2702         // always ensured that bytecode is live if baseline code is live. Hence
2703         // baseline code can safely load bytecode array without any additional
2704         // checks. In future if this changes we need to update these checks to
2705         // flush code if the bytecode is not live and also update baseline code
2706         // to bailout if there is no bytecode.
2707         DCHECK(is_bytecode_live);
2708 
2709         // Regardless of whether the CodeT is a CodeDataContainer or the Code
2710         // itself, if the Code is live then the CodeT has to be live and will
2711         // have been marked via the owning JSFunction.
2712         DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_codet));
2713       } else if (is_bytecode_live) {
2714         // If baseline code is flushed but we have a valid bytecode array reset
2715         // the function_data field to the BytecodeArray/InterpreterData.
2716         flushing_candidate.set_function_data(
2717             baseline_code.bytecode_or_interpreter_data(), kReleaseStore);
2718       }
2719     }
2720 
2721     if (!is_bytecode_live) {
2722       // If baseline code flushing is disabled we should only flush bytecode
2723       // from functions that don't have baseline data.
2724       DCHECK(FLAG_flush_baseline_code || !flushing_candidate.HasBaselineCode());
2725 
2726       // If the BytecodeArray is dead, flush it, which will replace the field
2727       // with an uncompiled data object.
2728       FlushBytecodeFromSFI(flushing_candidate);
2729     }
2730 
2731     // Now record the slot, which has either been updated to an uncompiled data,
2732     // Baseline code or BytecodeArray which is still alive.
2733     ObjectSlot slot =
2734         flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
2735     RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
2736   }
2737 }
2738 
ClearFlushedJsFunctions()2739 void MarkCompactCollector::ClearFlushedJsFunctions() {
2740   DCHECK(FLAG_flush_bytecode || weak_objects_.flushed_js_functions.IsEmpty());
2741   JSFunction flushed_js_function;
2742   while (local_weak_objects()->flushed_js_functions_local.Pop(
2743       &flushed_js_function)) {
2744     auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
2745                                      Object target) {
2746       RecordSlot(object, slot, HeapObject::cast(target));
2747     };
2748     flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
2749   }
2750 }
2751 
ProcessFlushedBaselineCandidates()2752 void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
2753   DCHECK(FLAG_flush_baseline_code ||
2754          weak_objects_.baseline_flushing_candidates.IsEmpty());
2755   JSFunction flushed_js_function;
2756   while (local_weak_objects()->baseline_flushing_candidates_local.Pop(
2757       &flushed_js_function)) {
2758     auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
2759                                      Object target) {
2760       RecordSlot(object, slot, HeapObject::cast(target));
2761     };
2762     flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
2763 
2764     // Record the code slot that has been updated either to CompileLazy,
2765     // InterpreterEntryTrampoline or baseline code.
2766     ObjectSlot slot = flushed_js_function.RawField(JSFunction::kCodeOffset);
2767     RecordSlot(flushed_js_function, slot, HeapObject::cast(*slot));
2768   }
2769 }
2770 
ClearFullMapTransitions()2771 void MarkCompactCollector::ClearFullMapTransitions() {
2772   TransitionArray array;
2773   while (local_weak_objects()->transition_arrays_local.Pop(&array)) {
2774     int num_transitions = array.number_of_entries();
2775     if (num_transitions > 0) {
2776       Map map;
2777       // The array might contain "undefined" elements because it's not yet
2778       // filled. Allow it.
2779       if (array.GetTargetIfExists(0, isolate(), &map)) {
2780         DCHECK(!map.is_null());  // Weak pointers aren't cleared yet.
2781         Object constructor_or_back_pointer = map.constructor_or_back_pointer();
2782         if (constructor_or_back_pointer.IsSmi()) {
2783           DCHECK(isolate()->has_active_deserializer());
2784           DCHECK_EQ(constructor_or_back_pointer,
2785                     Smi::uninitialized_deserialization_value());
2786           continue;
2787         }
2788         Map parent = Map::cast(map.constructor_or_back_pointer());
2789         bool parent_is_alive =
2790             non_atomic_marking_state()->IsBlackOrGrey(parent);
2791         DescriptorArray descriptors =
2792             parent_is_alive ? parent.instance_descriptors(isolate())
2793                             : DescriptorArray();
2794         bool descriptors_owner_died =
2795             CompactTransitionArray(parent, array, descriptors);
2796         if (descriptors_owner_died) {
2797           TrimDescriptorArray(parent, descriptors);
2798         }
2799       }
2800     }
2801   }
2802 }
2803 
2804 // Returns false if no maps have died, or if the transition array is
2805 // still being deserialized.
TransitionArrayNeedsCompaction(TransitionArray transitions,int num_transitions)2806 bool MarkCompactCollector::TransitionArrayNeedsCompaction(
2807     TransitionArray transitions, int num_transitions) {
2808   for (int i = 0; i < num_transitions; ++i) {
2809     MaybeObject raw_target = transitions.GetRawTarget(i);
2810     if (raw_target.IsSmi()) {
2811       // This target is still being deserialized,
2812       DCHECK(isolate()->has_active_deserializer());
2813       DCHECK_EQ(raw_target.ToSmi(), Smi::uninitialized_deserialization_value());
2814 #ifdef DEBUG
2815       // Targets can only be dead iff this array is fully deserialized.
2816       for (int j = 0; j < num_transitions; ++j) {
2817         DCHECK_IMPLIES(
2818             !transitions.GetRawTarget(j).IsSmi(),
2819             !non_atomic_marking_state()->IsWhite(transitions.GetTarget(j)));
2820       }
2821 #endif
2822       return false;
2823     } else if (non_atomic_marking_state()->IsWhite(
2824                    TransitionsAccessor::GetTargetFromRaw(raw_target))) {
2825 #ifdef DEBUG
2826       // Targets can only be dead iff this array is fully deserialized.
2827       for (int j = 0; j < num_transitions; ++j) {
2828         DCHECK(!transitions.GetRawTarget(j).IsSmi());
2829       }
2830 #endif
2831       return true;
2832     }
2833   }
2834   return false;
2835 }
2836 
CompactTransitionArray(Map map,TransitionArray transitions,DescriptorArray descriptors)2837 bool MarkCompactCollector::CompactTransitionArray(Map map,
2838                                                   TransitionArray transitions,
2839                                                   DescriptorArray descriptors) {
2840   DCHECK(!map.is_prototype_map());
2841   int num_transitions = transitions.number_of_entries();
2842   if (!TransitionArrayNeedsCompaction(transitions, num_transitions)) {
2843     return false;
2844   }
2845   bool descriptors_owner_died = false;
2846   int transition_index = 0;
2847   // Compact all live transitions to the left.
2848   for (int i = 0; i < num_transitions; ++i) {
2849     Map target = transitions.GetTarget(i);
2850     DCHECK_EQ(target.constructor_or_back_pointer(), map);
2851     if (non_atomic_marking_state()->IsWhite(target)) {
2852       if (!descriptors.is_null() &&
2853           target.instance_descriptors(isolate()) == descriptors) {
2854         DCHECK(!target.is_prototype_map());
2855         descriptors_owner_died = true;
2856       }
2857     } else {
2858       if (i != transition_index) {
2859         Name key = transitions.GetKey(i);
2860         transitions.SetKey(transition_index, key);
2861         HeapObjectSlot key_slot = transitions.GetKeySlot(transition_index);
2862         RecordSlot(transitions, key_slot, key);
2863         MaybeObject raw_target = transitions.GetRawTarget(i);
2864         transitions.SetRawTarget(transition_index, raw_target);
2865         HeapObjectSlot target_slot =
2866             transitions.GetTargetSlot(transition_index);
2867         RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
2868       }
2869       transition_index++;
2870     }
2871   }
2872   // If there are no transitions to be cleared, return.
2873   if (transition_index == num_transitions) {
2874     DCHECK(!descriptors_owner_died);
2875     return false;
2876   }
2877   // Note that we never eliminate a transition array, though we might right-trim
2878   // such that number_of_transitions() == 0. If this assumption changes,
2879   // TransitionArray::Insert() will need to deal with the case that a transition
2880   // array disappeared during GC.
2881   int trim = transitions.Capacity() - transition_index;
2882   if (trim > 0) {
2883     heap_->RightTrimWeakFixedArray(transitions,
2884                                    trim * TransitionArray::kEntrySize);
2885     transitions.SetNumberOfTransitions(transition_index);
2886   }
2887   return descriptors_owner_died;
2888 }
2889 
RightTrimDescriptorArray(DescriptorArray array,int descriptors_to_trim)2890 void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
2891                                                     int descriptors_to_trim) {
2892   int old_nof_all_descriptors = array.number_of_all_descriptors();
2893   int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
2894   DCHECK_LT(0, descriptors_to_trim);
2895   DCHECK_LE(0, new_nof_all_descriptors);
2896   Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address();
2897   Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address();
2898   MemoryChunk* chunk = MemoryChunk::FromHeapObject(array);
2899   RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end,
2900                                          SlotSet::FREE_EMPTY_BUCKETS);
2901   RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end,
2902                                          SlotSet::FREE_EMPTY_BUCKETS);
2903   heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
2904                                ClearRecordedSlots::kNo);
2905   array.set_number_of_all_descriptors(new_nof_all_descriptors);
2906 }
2907 
TrimDescriptorArray(Map map,DescriptorArray descriptors)2908 void MarkCompactCollector::TrimDescriptorArray(Map map,
2909                                                DescriptorArray descriptors) {
2910   int number_of_own_descriptors = map.NumberOfOwnDescriptors();
2911   if (number_of_own_descriptors == 0) {
2912     DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
2913     return;
2914   }
2915   int to_trim =
2916       descriptors.number_of_all_descriptors() - number_of_own_descriptors;
2917   if (to_trim > 0) {
2918     descriptors.set_number_of_descriptors(number_of_own_descriptors);
2919     RightTrimDescriptorArray(descriptors, to_trim);
2920 
2921     TrimEnumCache(map, descriptors);
2922     descriptors.Sort();
2923   }
2924   DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
2925   map.set_owns_descriptors(true);
2926 }
2927 
TrimEnumCache(Map map,DescriptorArray descriptors)2928 void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
2929   int live_enum = map.EnumLength();
2930   if (live_enum == kInvalidEnumCacheSentinel) {
2931     live_enum = map.NumberOfEnumerableProperties();
2932   }
2933   if (live_enum == 0) return descriptors.ClearEnumCache();
2934   EnumCache enum_cache = descriptors.enum_cache();
2935 
2936   FixedArray keys = enum_cache.keys();
2937   int to_trim = keys.length() - live_enum;
2938   if (to_trim <= 0) return;
2939   heap_->RightTrimFixedArray(keys, to_trim);
2940 
2941   FixedArray indices = enum_cache.indices();
2942   to_trim = indices.length() - live_enum;
2943   if (to_trim <= 0) return;
2944   heap_->RightTrimFixedArray(indices, to_trim);
2945 }
2946 
ClearWeakCollections()2947 void MarkCompactCollector::ClearWeakCollections() {
2948   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2949   EphemeronHashTable table;
2950   while (local_weak_objects()->ephemeron_hash_tables_local.Pop(&table)) {
2951     for (InternalIndex i : table.IterateEntries()) {
2952       HeapObject key = HeapObject::cast(table.KeyAt(i));
2953 #ifdef VERIFY_HEAP
2954       if (FLAG_verify_heap) {
2955         Object value = table.ValueAt(i);
2956         if (value.IsHeapObject()) {
2957           HeapObject heap_object = HeapObject::cast(value);
2958           CHECK_IMPLIES(
2959               (!is_shared_heap_ && key.InSharedHeap()) ||
2960                   non_atomic_marking_state()->IsBlackOrGrey(key),
2961               (!is_shared_heap_ && heap_object.InSharedHeap()) ||
2962                   non_atomic_marking_state()->IsBlackOrGrey(heap_object));
2963         }
2964       }
2965 #endif
2966       if (!is_shared_heap_ && key.InSharedHeap()) continue;
2967       if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
2968         table.RemoveEntry(i);
2969       }
2970     }
2971   }
2972   for (auto it = heap_->ephemeron_remembered_set_.begin();
2973        it != heap_->ephemeron_remembered_set_.end();) {
2974     if (!non_atomic_marking_state()->IsBlackOrGrey(it->first)) {
2975       it = heap_->ephemeron_remembered_set_.erase(it);
2976     } else {
2977       ++it;
2978     }
2979   }
2980 }
2981 
ClearWeakReferences()2982 void MarkCompactCollector::ClearWeakReferences() {
2983   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
2984   std::pair<HeapObject, HeapObjectSlot> slot;
2985   HeapObjectReference cleared_weak_ref =
2986       HeapObjectReference::ClearedValue(isolate());
2987   while (local_weak_objects()->weak_references_local.Pop(&slot)) {
2988     HeapObject value;
2989     // The slot could have been overwritten, so we have to treat it
2990     // as MaybeObjectSlot.
2991     MaybeObjectSlot location(slot.second);
2992     if ((*location)->GetHeapObjectIfWeak(&value)) {
2993       DCHECK(!value.IsCell());
2994       if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
2995         // The value of the weak reference is alive.
2996         RecordSlot(slot.first, HeapObjectSlot(location), value);
2997       } else {
2998         if (value.IsMap()) {
2999           // The map is non-live.
3000           ClearPotentialSimpleMapTransition(Map::cast(value));
3001         }
3002         location.store(cleared_weak_ref);
3003       }
3004     }
3005   }
3006 }
3007 
ClearJSWeakRefs()3008 void MarkCompactCollector::ClearJSWeakRefs() {
3009   JSWeakRef weak_ref;
3010   while (local_weak_objects()->js_weak_refs_local.Pop(&weak_ref)) {
3011     HeapObject target = HeapObject::cast(weak_ref.target());
3012     if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
3013       weak_ref.set_target(ReadOnlyRoots(isolate()).undefined_value());
3014     } else {
3015       // The value of the JSWeakRef is alive.
3016       ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
3017       RecordSlot(weak_ref, slot, target);
3018     }
3019   }
3020   WeakCell weak_cell;
3021   while (local_weak_objects()->weak_cells_local.Pop(&weak_cell)) {
3022     auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
3023                                      Object target) {
3024       if (target.IsHeapObject()) {
3025         RecordSlot(object, slot, HeapObject::cast(target));
3026       }
3027     };
3028     HeapObject target = HeapObject::cast(weak_cell.target());
3029     if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
3030       DCHECK(target.CanBeHeldWeakly());
3031       // The value of the WeakCell is dead.
3032       JSFinalizationRegistry finalization_registry =
3033           JSFinalizationRegistry::cast(weak_cell.finalization_registry());
3034       if (!finalization_registry.scheduled_for_cleanup()) {
3035         heap()->EnqueueDirtyJSFinalizationRegistry(finalization_registry,
3036                                                    gc_notify_updated_slot);
3037       }
3038       // We're modifying the pointers in WeakCell and JSFinalizationRegistry
3039       // during GC; thus we need to record the slots it writes. The normal write
3040       // barrier is not enough, since it's disabled before GC.
3041       weak_cell.Nullify(isolate(), gc_notify_updated_slot);
3042       DCHECK(finalization_registry.NeedsCleanup());
3043       DCHECK(finalization_registry.scheduled_for_cleanup());
3044     } else {
3045       // The value of the WeakCell is alive.
3046       ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
3047       RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
3048     }
3049 
3050     HeapObject unregister_token = weak_cell.unregister_token();
3051     if (!non_atomic_marking_state()->IsBlackOrGrey(unregister_token)) {
3052       DCHECK(unregister_token.CanBeHeldWeakly());
3053       // The unregister token is dead. Remove any corresponding entries in the
3054       // key map. Multiple WeakCell with the same token will have all their
3055       // unregister_token field set to undefined when processing the first
3056       // WeakCell. Like above, we're modifying pointers during GC, so record the
3057       // slots.
3058       JSFinalizationRegistry finalization_registry =
3059           JSFinalizationRegistry::cast(weak_cell.finalization_registry());
3060       finalization_registry.RemoveUnregisterToken(
3061           unregister_token, isolate(),
3062           JSFinalizationRegistry::kKeepMatchedCellsInRegistry,
3063           gc_notify_updated_slot);
3064     } else {
3065       // The unregister_token is alive.
3066       ObjectSlot slot = weak_cell.RawField(WeakCell::kUnregisterTokenOffset);
3067       RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
3068     }
3069   }
3070   heap()->PostFinalizationRegistryCleanupTaskIfNeeded();
3071 }
3072 
IsOnEvacuationCandidate(MaybeObject obj)3073 bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
3074   return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
3075 }
3076 
3077 // static
ShouldRecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)3078 bool MarkCompactCollector::ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
3079                                                  HeapObject target) {
3080   MemoryChunk* source_chunk = MemoryChunk::FromHeapObject(host);
3081   BasicMemoryChunk* target_chunk = BasicMemoryChunk::FromHeapObject(target);
3082   return target_chunk->IsEvacuationCandidate() &&
3083          !source_chunk->ShouldSkipEvacuationSlotRecording();
3084 }
3085 
3086 // static
3087 MarkCompactCollector::RecordRelocSlotInfo
ProcessRelocInfo(Code host,RelocInfo * rinfo,HeapObject target)3088 MarkCompactCollector::ProcessRelocInfo(Code host, RelocInfo* rinfo,
3089                                        HeapObject target) {
3090   DCHECK_EQ(host, rinfo->host());
3091 
3092   RecordRelocSlotInfo result;
3093   const RelocInfo::Mode rmode = rinfo->rmode();
3094   Address addr;
3095   SlotType slot_type;
3096 
3097   if (rinfo->IsInConstantPool()) {
3098     addr = rinfo->constant_pool_entry_address();
3099 
3100     if (RelocInfo::IsCodeTargetMode(rmode)) {
3101       slot_type = SlotType::kConstPoolCodeEntry;
3102     } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
3103       slot_type = SlotType::kConstPoolEmbeddedObjectCompressed;
3104     } else {
3105       DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
3106       slot_type = SlotType::kConstPoolEmbeddedObjectFull;
3107     }
3108   } else {
3109     addr = rinfo->pc();
3110 
3111     if (RelocInfo::IsCodeTargetMode(rmode)) {
3112       slot_type = SlotType::kCodeEntry;
3113     } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
3114       slot_type = SlotType::kEmbeddedObjectFull;
3115     } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
3116       slot_type = SlotType::kEmbeddedObjectCompressed;
3117     } else {
3118       DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
3119       slot_type = SlotType::kEmbeddedObjectData;
3120     }
3121   }
3122 
3123   MemoryChunk* const source_chunk = MemoryChunk::FromHeapObject(host);
3124   const uintptr_t offset = addr - source_chunk->address();
3125   DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
3126   result.memory_chunk = source_chunk;
3127   result.slot_type = slot_type;
3128   result.offset = static_cast<uint32_t>(offset);
3129 
3130   return result;
3131 }
3132 
3133 // static
RecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)3134 void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
3135                                            HeapObject target) {
3136   if (!ShouldRecordRelocSlot(host, rinfo, target)) return;
3137   RecordRelocSlotInfo info = ProcessRelocInfo(host, rinfo, target);
3138 
3139   // Access to TypeSlots need to be protected, since LocalHeaps might
3140   // publish code in the background thread.
3141   base::Optional<base::MutexGuard> opt_guard;
3142   if (FLAG_concurrent_sparkplug) {
3143     opt_guard.emplace(info.memory_chunk->mutex());
3144   }
3145   RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
3146                                          info.offset);
3147 }
3148 
3149 namespace {
3150 
3151 // Missing specialization MakeSlotValue<FullObjectSlot, WEAK>() will turn
3152 // attempt to store a weak reference to strong-only slot to a compilation error.
3153 template <typename TSlot, HeapObjectReferenceType reference_type>
3154 typename TSlot::TObject MakeSlotValue(HeapObject heap_object);
3155 
3156 template <>
MakeSlotValue(HeapObject heap_object)3157 Object MakeSlotValue<ObjectSlot, HeapObjectReferenceType::STRONG>(
3158     HeapObject heap_object) {
3159   return heap_object;
3160 }
3161 
3162 template <>
MakeSlotValue(HeapObject heap_object)3163 MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::STRONG>(
3164     HeapObject heap_object) {
3165   return HeapObjectReference::Strong(heap_object);
3166 }
3167 
3168 template <>
MakeSlotValue(HeapObject heap_object)3169 MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::WEAK>(
3170     HeapObject heap_object) {
3171   return HeapObjectReference::Weak(heap_object);
3172 }
3173 
3174 template <>
MakeSlotValue(HeapObject heap_object)3175 Object MakeSlotValue<OffHeapObjectSlot, HeapObjectReferenceType::STRONG>(
3176     HeapObject heap_object) {
3177   return heap_object;
3178 }
3179 
3180 #ifdef V8_COMPRESS_POINTERS
3181 template <>
MakeSlotValue(HeapObject heap_object)3182 Object MakeSlotValue<FullObjectSlot, HeapObjectReferenceType::STRONG>(
3183     HeapObject heap_object) {
3184   return heap_object;
3185 }
3186 
3187 template <>
MakeSlotValue(HeapObject heap_object)3188 MaybeObject MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
3189     HeapObject heap_object) {
3190   return HeapObjectReference::Strong(heap_object);
3191 }
3192 
3193 // The following specialization
3194 //   MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>()
3195 // is not used.
3196 #endif
3197 
3198 template <AccessMode access_mode, HeapObjectReferenceType reference_type,
3199           typename TSlot>
UpdateSlot(PtrComprCageBase cage_base,TSlot slot,typename TSlot::TObject old,HeapObject heap_obj)3200 static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
3201                                             TSlot slot,
3202                                             typename TSlot::TObject old,
3203                                             HeapObject heap_obj) {
3204   static_assert(std::is_same<TSlot, FullObjectSlot>::value ||
3205                     std::is_same<TSlot, ObjectSlot>::value ||
3206                     std::is_same<TSlot, FullMaybeObjectSlot>::value ||
3207                     std::is_same<TSlot, MaybeObjectSlot>::value ||
3208                     std::is_same<TSlot, OffHeapObjectSlot>::value,
3209                 "Only [Full|OffHeap]ObjectSlot and [Full]MaybeObjectSlot are "
3210                 "expected here");
3211   MapWord map_word = heap_obj.map_word(cage_base, kRelaxedLoad);
3212   if (map_word.IsForwardingAddress()) {
3213     DCHECK_IMPLIES(!Heap::InFromPage(heap_obj),
3214                    MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
3215                        Page::FromHeapObject(heap_obj)->IsFlagSet(
3216                            Page::COMPACTION_WAS_ABORTED));
3217     PtrComprCageBase host_cage_base =
3218         V8_EXTERNAL_CODE_SPACE_BOOL ? GetPtrComprCageBase(heap_obj) : cage_base;
3219     typename TSlot::TObject target = MakeSlotValue<TSlot, reference_type>(
3220         map_word.ToForwardingAddress(host_cage_base));
3221     if (access_mode == AccessMode::NON_ATOMIC) {
3222       // Needs to be atomic for map space compaction: This slot could be a map
3223       // word which we update while loading the map word for updating the slot
3224       // on another page.
3225       slot.Relaxed_Store(target);
3226     } else {
3227       slot.Release_CompareAndSwap(old, target);
3228     }
3229     DCHECK(!Heap::InFromPage(target));
3230     DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
3231   } else {
3232     DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map_word.ToMap()));
3233   }
3234   return REMOVE_SLOT;
3235 }
3236 
3237 template <AccessMode access_mode, typename TSlot>
UpdateSlot(PtrComprCageBase cage_base,TSlot slot)3238 static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
3239                                             TSlot slot) {
3240   typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
3241   HeapObject heap_obj;
3242   if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
3243     UpdateSlot<access_mode, HeapObjectReferenceType::WEAK>(cage_base, slot, obj,
3244                                                            heap_obj);
3245   } else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
3246     return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(
3247         cage_base, slot, obj, heap_obj);
3248   }
3249   return REMOVE_SLOT;
3250 }
3251 
3252 template <AccessMode access_mode, typename TSlot>
UpdateStrongSlot(PtrComprCageBase cage_base,TSlot slot)3253 static inline SlotCallbackResult UpdateStrongSlot(PtrComprCageBase cage_base,
3254                                                   TSlot slot) {
3255   typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
3256   DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
3257   HeapObject heap_obj;
3258   if (obj.GetHeapObject(&heap_obj)) {
3259     return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(
3260         cage_base, slot, obj, heap_obj);
3261   }
3262   return REMOVE_SLOT;
3263 }
3264 
3265 template <AccessMode access_mode>
UpdateStrongCodeSlot(HeapObject host,PtrComprCageBase cage_base,PtrComprCageBase code_cage_base,CodeObjectSlot slot)3266 static inline SlotCallbackResult UpdateStrongCodeSlot(
3267     HeapObject host, PtrComprCageBase cage_base,
3268     PtrComprCageBase code_cage_base, CodeObjectSlot slot) {
3269   Object obj = slot.Relaxed_Load(code_cage_base);
3270   DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
3271   HeapObject heap_obj;
3272   if (obj.GetHeapObject(&heap_obj)) {
3273     SlotCallbackResult result =
3274         UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(
3275             cage_base, slot, obj, heap_obj);
3276 
3277     CodeDataContainer code_data_container =
3278         CodeDataContainer::cast(HeapObject::FromAddress(
3279             slot.address() - CodeDataContainer::kCodeOffset));
3280     Code code = code_data_container.code(code_cage_base);
3281     Isolate* isolate_for_sandbox = GetIsolateForSandbox(host);
3282     code_data_container.UpdateCodeEntryPoint(isolate_for_sandbox, code);
3283     return result;
3284   }
3285   return REMOVE_SLOT;
3286 }
3287 
3288 }  // namespace
3289 
3290 // Visitor for updating root pointers and to-space pointers.
3291 // It does not expect to encounter pointers to dead objects.
3292 class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
3293                                       public RootVisitor {
3294  public:
PointersUpdatingVisitor(Heap * heap)3295   explicit PointersUpdatingVisitor(Heap* heap)
3296       : ObjectVisitorWithCageBases(heap) {}
3297 
VisitPointer(HeapObject host,ObjectSlot p)3298   void VisitPointer(HeapObject host, ObjectSlot p) override {
3299     UpdateStrongSlotInternal(cage_base(), p);
3300   }
3301 
VisitPointer(HeapObject host,MaybeObjectSlot p)3302   void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
3303     UpdateSlotInternal(cage_base(), p);
3304   }
3305 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)3306   void VisitPointers(HeapObject host, ObjectSlot start,
3307                      ObjectSlot end) override {
3308     for (ObjectSlot p = start; p < end; ++p) {
3309       UpdateStrongSlotInternal(cage_base(), p);
3310     }
3311   }
3312 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)3313   void VisitPointers(HeapObject host, MaybeObjectSlot start,
3314                      MaybeObjectSlot end) final {
3315     for (MaybeObjectSlot p = start; p < end; ++p) {
3316       UpdateSlotInternal(cage_base(), p);
3317     }
3318   }
3319 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)3320   void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
3321     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
3322     UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(host, cage_base(),
3323                                                  code_cage_base(), slot);
3324   }
3325 
VisitRootPointer(Root root,const char * description,FullObjectSlot p)3326   void VisitRootPointer(Root root, const char* description,
3327                         FullObjectSlot p) override {
3328     DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
3329     UpdateRootSlotInternal(cage_base(), p);
3330   }
3331 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)3332   void VisitRootPointers(Root root, const char* description,
3333                          FullObjectSlot start, FullObjectSlot end) override {
3334     for (FullObjectSlot p = start; p < end; ++p) {
3335       UpdateRootSlotInternal(cage_base(), p);
3336     }
3337   }
3338 
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)3339   void VisitRootPointers(Root root, const char* description,
3340                          OffHeapObjectSlot start,
3341                          OffHeapObjectSlot end) override {
3342     for (OffHeapObjectSlot p = start; p < end; ++p) {
3343       UpdateRootSlotInternal(cage_base(), p);
3344     }
3345   }
3346 
VisitCodeTarget(Code host,RelocInfo * rinfo)3347   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
3348     // This visitor nevers visits code objects.
3349     UNREACHABLE();
3350   }
3351 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)3352   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
3353     // This visitor nevers visits code objects.
3354     UNREACHABLE();
3355   }
3356 
3357  private:
UpdateRootSlotInternal(PtrComprCageBase cage_base,FullObjectSlot slot)3358   static inline SlotCallbackResult UpdateRootSlotInternal(
3359       PtrComprCageBase cage_base, FullObjectSlot slot) {
3360     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
3361   }
3362 
UpdateRootSlotInternal(PtrComprCageBase cage_base,OffHeapObjectSlot slot)3363   static inline SlotCallbackResult UpdateRootSlotInternal(
3364       PtrComprCageBase cage_base, OffHeapObjectSlot slot) {
3365     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
3366   }
3367 
UpdateStrongMaybeObjectSlotInternal(PtrComprCageBase cage_base,MaybeObjectSlot slot)3368   static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
3369       PtrComprCageBase cage_base, MaybeObjectSlot slot) {
3370     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
3371   }
3372 
UpdateStrongSlotInternal(PtrComprCageBase cage_base,ObjectSlot slot)3373   static inline SlotCallbackResult UpdateStrongSlotInternal(
3374       PtrComprCageBase cage_base, ObjectSlot slot) {
3375     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
3376   }
3377 
UpdateSlotInternal(PtrComprCageBase cage_base,MaybeObjectSlot slot)3378   static inline SlotCallbackResult UpdateSlotInternal(
3379       PtrComprCageBase cage_base, MaybeObjectSlot slot) {
3380     return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
3381   }
3382 };
3383 
3384 #ifdef VERIFY_HEAP
3385 // Visitor for updating root pointers and to-space pointers.
3386 // It does not expect to encounter pointers to dead objects.
3387 class ClientHeapVerifier final : public ObjectVisitorWithCageBases {
3388  public:
ClientHeapVerifier(Heap * heap)3389   explicit ClientHeapVerifier(Heap* heap) : ObjectVisitorWithCageBases(heap) {}
3390 
VisitPointer(HeapObject host,ObjectSlot p)3391   void VisitPointer(HeapObject host, ObjectSlot p) override {
3392     VerifySlot(cage_base(), p);
3393   }
3394 
VisitPointer(HeapObject host,MaybeObjectSlot p)3395   void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
3396     VerifySlot(cage_base(), p);
3397   }
3398 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)3399   void VisitPointers(HeapObject host, ObjectSlot start,
3400                      ObjectSlot end) override {
3401     for (ObjectSlot p = start; p < end; ++p) {
3402       VerifySlot(cage_base(), p);
3403     }
3404   }
3405 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)3406   void VisitPointers(HeapObject host, MaybeObjectSlot start,
3407                      MaybeObjectSlot end) final {
3408     for (MaybeObjectSlot p = start; p < end; ++p) {
3409       VerifySlot(cage_base(), p);
3410     }
3411   }
3412 
VisitMapPointer(HeapObject host)3413   void VisitMapPointer(HeapObject host) override {
3414     VerifySlot(cage_base(), host.map_slot());
3415   }
3416 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)3417   void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
3418     VerifySlot(code_cage_base(), ObjectSlot(slot.address()));
3419   }
3420 
VisitCodeTarget(Code host,RelocInfo * rinfo)3421   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {}
3422 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)3423   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {}
3424 
3425  private:
VerifySlot(PtrComprCageBase cage_base,ObjectSlot slot)3426   void VerifySlot(PtrComprCageBase cage_base, ObjectSlot slot) {
3427     HeapObject heap_object;
3428     if (slot.load(cage_base).GetHeapObject(&heap_object)) {
3429       VerifyHeapObject(heap_object);
3430     }
3431   }
3432 
VerifySlot(PtrComprCageBase cage_base,MaybeObjectSlot slot)3433   void VerifySlot(PtrComprCageBase cage_base, MaybeObjectSlot slot) {
3434     HeapObject heap_object;
3435     if (slot.load(cage_base).GetHeapObject(&heap_object)) {
3436       VerifyHeapObject(heap_object);
3437     }
3438   }
3439 
VerifyHeapObject(HeapObject heap_object)3440   void VerifyHeapObject(HeapObject heap_object) {
3441     if (BasicMemoryChunk::FromHeapObject(heap_object)->InReadOnlySpace())
3442       return;
3443     if (!heap_object.InSharedHeap()) return;
3444     CHECK(!heap_object.map_word(kRelaxedLoad).IsForwardingAddress());
3445   }
3446 };
3447 #endif  // VERIFY_HEAP
3448 
UpdateReferenceInExternalStringTableEntry(Heap * heap,FullObjectSlot p)3449 static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
3450                                                         FullObjectSlot p) {
3451   HeapObject old_string = HeapObject::cast(*p);
3452   MapWord map_word = old_string.map_word(kRelaxedLoad);
3453 
3454   if (map_word.IsForwardingAddress()) {
3455     String new_string = String::cast(map_word.ToForwardingAddress());
3456 
3457     if (new_string.IsExternalString()) {
3458       MemoryChunk::MoveExternalBackingStoreBytes(
3459           ExternalBackingStoreType::kExternalString,
3460           Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
3461           ExternalString::cast(new_string).ExternalPayloadSize());
3462     }
3463     return new_string;
3464   }
3465 
3466   return String::cast(*p);
3467 }
3468 
EvacuatePrologue()3469 void MarkCompactCollector::EvacuatePrologue() {
3470   // New space.
3471   NewSpace* new_space = heap()->new_space();
3472 
3473   if (new_space) {
3474     // Append the list of new space pages to be processed.
3475     for (Page* p :
3476          PageRange(new_space->first_allocatable_address(), new_space->top())) {
3477       new_space_evacuation_pages_.push_back(p);
3478     }
3479     new_space->Flip();
3480     new_space->ResetLinearAllocationArea();
3481 
3482     DCHECK_EQ(new_space->Size(), 0);
3483   }
3484 
3485   if (heap()->new_lo_space()) {
3486     heap()->new_lo_space()->Flip();
3487     heap()->new_lo_space()->ResetPendingObject();
3488   }
3489 
3490   // Old space.
3491   DCHECK(old_space_evacuation_pages_.empty());
3492   old_space_evacuation_pages_ = std::move(evacuation_candidates_);
3493   evacuation_candidates_.clear();
3494   DCHECK(evacuation_candidates_.empty());
3495 }
3496 
EvacuateEpilogue()3497 void MarkCompactCollector::EvacuateEpilogue() {
3498   aborted_evacuation_candidates_due_to_oom_.clear();
3499   aborted_evacuation_candidates_due_to_flags_.clear();
3500 
3501   // New space.
3502   if (heap()->new_space()) {
3503     heap()->new_space()->set_age_mark(heap()->new_space()->top());
3504     DCHECK_EQ(0, heap()->new_space()->Size());
3505   }
3506 
3507   // Deallocate unmarked large objects.
3508   heap()->lo_space()->FreeUnmarkedObjects();
3509   heap()->code_lo_space()->FreeUnmarkedObjects();
3510   if (heap()->new_lo_space()) {
3511     heap()->new_lo_space()->FreeUnmarkedObjects();
3512   }
3513 
3514   // Old space. Deallocate evacuated candidate pages.
3515   ReleaseEvacuationCandidates();
3516 
3517   // Give pages that are queued to be freed back to the OS.
3518   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3519 
3520 #ifdef DEBUG
3521   MemoryChunkIterator chunk_iterator(heap());
3522 
3523   while (chunk_iterator.HasNext()) {
3524     MemoryChunk* chunk = chunk_iterator.Next();
3525 
3526     // Old-to-old slot sets must be empty after evacuation.
3527     DCHECK_NULL((chunk->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
3528     DCHECK_NULL((chunk->slot_set<OLD_TO_SHARED, AccessMode::NON_ATOMIC>()));
3529     DCHECK_NULL((chunk->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
3530     DCHECK_NULL(chunk->invalidated_slots<OLD_TO_OLD>());
3531     DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
3532   }
3533 #endif
3534 }
3535 
3536 namespace {
CreateSharedOldAllocator(Heap * heap)3537 ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
3538   if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
3539     return new ConcurrentAllocator(nullptr, heap->shared_old_space());
3540   }
3541 
3542   return nullptr;
3543 }
3544 }  // namespace
3545 
3546 class Evacuator : public Malloced {
3547  public:
3548   enum EvacuationMode {
3549     kObjectsNewToOld,
3550     kPageNewToOld,
3551     kObjectsOldToOld,
3552     kPageNewToNew,
3553   };
3554 
EvacuationModeName(EvacuationMode mode)3555   static const char* EvacuationModeName(EvacuationMode mode) {
3556     switch (mode) {
3557       case kObjectsNewToOld:
3558         return "objects-new-to-old";
3559       case kPageNewToOld:
3560         return "page-new-to-old";
3561       case kObjectsOldToOld:
3562         return "objects-old-to-old";
3563       case kPageNewToNew:
3564         return "page-new-to-new";
3565     }
3566   }
3567 
ComputeEvacuationMode(MemoryChunk * chunk)3568   static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3569     // Note: The order of checks is important in this function.
3570     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3571       return kPageNewToOld;
3572     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
3573       return kPageNewToNew;
3574     if (chunk->InYoungGeneration()) return kObjectsNewToOld;
3575     return kObjectsOldToOld;
3576   }
3577 
3578   // NewSpacePages with more live bytes than this threshold qualify for fast
3579   // evacuation.
NewSpacePageEvacuationThreshold()3580   static intptr_t NewSpacePageEvacuationThreshold() {
3581     if (FLAG_page_promotion)
3582       return FLAG_page_promotion_threshold *
3583              MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
3584     return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize;
3585   }
3586 
Evacuator(Heap * heap,RecordMigratedSlotVisitor * record_visitor,EvacuationAllocator * local_allocator,AlwaysPromoteYoung always_promote_young)3587   Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor,
3588             EvacuationAllocator* local_allocator,
3589             AlwaysPromoteYoung always_promote_young)
3590       : heap_(heap),
3591         local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
3592         shared_old_allocator_(CreateSharedOldAllocator(heap_)),
3593         new_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
3594                            record_visitor, &local_pretenuring_feedback_,
3595                            always_promote_young),
3596         new_to_new_page_visitor_(heap_, record_visitor,
3597                                  &local_pretenuring_feedback_),
3598         new_to_old_page_visitor_(heap_, record_visitor,
3599                                  &local_pretenuring_feedback_),
3600 
3601         old_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
3602                            record_visitor),
3603         local_allocator_(local_allocator),
3604         duration_(0.0),
3605         bytes_compacted_(0) {}
3606 
3607   virtual ~Evacuator() = default;
3608 
3609   void EvacuatePage(MemoryChunk* chunk);
3610 
AddObserver(MigrationObserver * observer)3611   void AddObserver(MigrationObserver* observer) {
3612     new_space_visitor_.AddObserver(observer);
3613     old_space_visitor_.AddObserver(observer);
3614   }
3615 
3616   // Merge back locally cached info sequentially. Note that this method needs
3617   // to be called from the main thread.
3618   virtual void Finalize();
3619 
3620   virtual GCTracer::Scope::ScopeId GetBackgroundTracingScope() = 0;
3621   virtual GCTracer::Scope::ScopeId GetTracingScope() = 0;
3622 
3623  protected:
3624   static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3625 
3626   // |saved_live_bytes| returns the live bytes of the page that was processed.
3627   virtual void RawEvacuatePage(MemoryChunk* chunk,
3628                                intptr_t* saved_live_bytes) = 0;
3629 
heap()3630   inline Heap* heap() { return heap_; }
3631 
ReportCompactionProgress(double duration,intptr_t bytes_compacted)3632   void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3633     duration_ += duration;
3634     bytes_compacted_ += bytes_compacted;
3635   }
3636 
3637   Heap* heap_;
3638 
3639   Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
3640 
3641   // Allocator for the shared heap.
3642   std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
3643 
3644   // Visitors for the corresponding spaces.
3645   EvacuateNewSpaceVisitor new_space_visitor_;
3646   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
3647       new_to_new_page_visitor_;
3648   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
3649       new_to_old_page_visitor_;
3650   EvacuateOldSpaceVisitor old_space_visitor_;
3651 
3652   // Locally cached collector data.
3653   EvacuationAllocator* local_allocator_;
3654 
3655   // Book keeping info.
3656   double duration_;
3657   intptr_t bytes_compacted_;
3658 };
3659 
EvacuatePage(MemoryChunk * chunk)3660 void Evacuator::EvacuatePage(MemoryChunk* chunk) {
3661   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
3662   DCHECK(chunk->SweepingDone());
3663   intptr_t saved_live_bytes = 0;
3664   double evacuation_time = 0.0;
3665   {
3666     AlwaysAllocateScope always_allocate(heap());
3667     TimedScope timed_scope(&evacuation_time);
3668     RawEvacuatePage(chunk, &saved_live_bytes);
3669   }
3670   ReportCompactionProgress(evacuation_time, saved_live_bytes);
3671   if (FLAG_trace_evacuation) {
3672     PrintIsolate(heap()->isolate(),
3673                  "evacuation[%p]: page=%p new_space=%d "
3674                  "page_evacuation=%d executable=%d contains_age_mark=%d "
3675                  "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
3676                  static_cast<void*>(this), static_cast<void*>(chunk),
3677                  chunk->InNewSpace(),
3678                  chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
3679                      chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
3680                  chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
3681                  chunk->Contains(heap()->new_space()->age_mark()),
3682                  saved_live_bytes, evacuation_time,
3683                  chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
3684   }
3685 }
3686 
Finalize()3687 void Evacuator::Finalize() {
3688   local_allocator_->Finalize();
3689   if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
3690   heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3691   heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3692                                        new_to_old_page_visitor_.moved_bytes());
3693   heap()->IncrementSemiSpaceCopiedObjectSize(
3694       new_space_visitor_.semispace_copied_size() +
3695       new_to_new_page_visitor_.moved_bytes());
3696   heap()->IncrementYoungSurvivorsCounter(
3697       new_space_visitor_.promoted_size() +
3698       new_space_visitor_.semispace_copied_size() +
3699       new_to_old_page_visitor_.moved_bytes() +
3700       new_to_new_page_visitor_.moved_bytes());
3701   heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3702 }
3703 
3704 class FullEvacuator : public Evacuator {
3705  public:
FullEvacuator(MarkCompactCollector * collector)3706   explicit FullEvacuator(MarkCompactCollector* collector)
3707       : Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
3708                   AlwaysPromoteYoung::kYes),
3709         record_visitor_(collector, &ephemeron_remembered_set_),
3710         local_allocator_(heap_,
3711                          CompactionSpaceKind::kCompactionSpaceForMarkCompact),
3712         collector_(collector) {}
3713 
GetBackgroundTracingScope()3714   GCTracer::Scope::ScopeId GetBackgroundTracingScope() override {
3715     return GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY;
3716   }
3717 
GetTracingScope()3718   GCTracer::Scope::ScopeId GetTracingScope() override {
3719     return GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL;
3720   }
3721 
Finalize()3722   void Finalize() override {
3723     Evacuator::Finalize();
3724 
3725     for (auto it = ephemeron_remembered_set_.begin();
3726          it != ephemeron_remembered_set_.end(); ++it) {
3727       auto insert_result =
3728           heap()->ephemeron_remembered_set_.insert({it->first, it->second});
3729       if (!insert_result.second) {
3730         // Insertion didn't happen, there was already an item.
3731         auto set = insert_result.first->second;
3732         for (int entry : it->second) {
3733           set.insert(entry);
3734         }
3735       }
3736     }
3737   }
3738 
3739  protected:
3740   void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
3741   EphemeronRememberedSet ephemeron_remembered_set_;
3742   RecordMigratedSlotVisitor record_visitor_;
3743   EvacuationAllocator local_allocator_;
3744 
3745   MarkCompactCollector* collector_;
3746 };
3747 
RawEvacuatePage(MemoryChunk * chunk,intptr_t * live_bytes)3748 void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
3749   const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
3750   MarkCompactCollector::NonAtomicMarkingState* marking_state =
3751       collector_->non_atomic_marking_state();
3752   *live_bytes = marking_state->live_bytes(chunk);
3753   TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3754                "FullEvacuator::RawEvacuatePage", "evacuation_mode",
3755                EvacuationModeName(evacuation_mode), "live_bytes", *live_bytes);
3756   HeapObject failed_object;
3757   switch (evacuation_mode) {
3758     case kObjectsNewToOld:
3759       LiveObjectVisitor::VisitBlackObjectsNoFail(
3760           chunk, marking_state, &new_space_visitor_,
3761           LiveObjectVisitor::kClearMarkbits);
3762       break;
3763     case kPageNewToOld:
3764       LiveObjectVisitor::VisitBlackObjectsNoFail(
3765           chunk, marking_state, &new_to_old_page_visitor_,
3766           LiveObjectVisitor::kKeepMarking);
3767       new_to_old_page_visitor_.account_moved_bytes(
3768           marking_state->live_bytes(chunk));
3769       break;
3770     case kPageNewToNew:
3771       LiveObjectVisitor::VisitBlackObjectsNoFail(
3772           chunk, marking_state, &new_to_new_page_visitor_,
3773           LiveObjectVisitor::kKeepMarking);
3774       new_to_new_page_visitor_.account_moved_bytes(
3775           marking_state->live_bytes(chunk));
3776       break;
3777     case kObjectsOldToOld: {
3778       const bool success = LiveObjectVisitor::VisitBlackObjects(
3779           chunk, marking_state, &old_space_visitor_,
3780           LiveObjectVisitor::kClearMarkbits, &failed_object);
3781       if (!success) {
3782         if (FLAG_crash_on_aborted_evacuation) {
3783           heap_->FatalProcessOutOfMemory("FullEvacuator::RawEvacuatePage");
3784         } else {
3785           // Aborted compaction page. Actual processing happens on the main
3786           // thread for simplicity reasons.
3787           collector_->ReportAbortedEvacuationCandidateDueToOOM(
3788               failed_object.address(), static_cast<Page*>(chunk));
3789         }
3790       }
3791       break;
3792     }
3793   }
3794 }
3795 
3796 class PageEvacuationJob : public v8::JobTask {
3797  public:
PageEvacuationJob(Isolate * isolate,std::vector<std::unique_ptr<Evacuator>> * evacuators,std::vector<std::pair<ParallelWorkItem,MemoryChunk * >> evacuation_items)3798   PageEvacuationJob(
3799       Isolate* isolate, std::vector<std::unique_ptr<Evacuator>>* evacuators,
3800       std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items)
3801       : evacuators_(evacuators),
3802         evacuation_items_(std::move(evacuation_items)),
3803         remaining_evacuation_items_(evacuation_items_.size()),
3804         generator_(evacuation_items_.size()),
3805         tracer_(isolate->heap()->tracer()) {}
3806 
Run(JobDelegate * delegate)3807   void Run(JobDelegate* delegate) override {
3808     Evacuator* evacuator = (*evacuators_)[delegate->GetTaskId()].get();
3809     if (delegate->IsJoiningThread()) {
3810       TRACE_GC(tracer_, evacuator->GetTracingScope());
3811       ProcessItems(delegate, evacuator);
3812     } else {
3813       TRACE_GC_EPOCH(tracer_, evacuator->GetBackgroundTracingScope(),
3814                      ThreadKind::kBackground);
3815       ProcessItems(delegate, evacuator);
3816     }
3817   }
3818 
ProcessItems(JobDelegate * delegate,Evacuator * evacuator)3819   void ProcessItems(JobDelegate* delegate, Evacuator* evacuator) {
3820     while (remaining_evacuation_items_.load(std::memory_order_relaxed) > 0) {
3821       base::Optional<size_t> index = generator_.GetNext();
3822       if (!index) return;
3823       for (size_t i = *index; i < evacuation_items_.size(); ++i) {
3824         auto& work_item = evacuation_items_[i];
3825         if (!work_item.first.TryAcquire()) break;
3826         evacuator->EvacuatePage(work_item.second);
3827         if (remaining_evacuation_items_.fetch_sub(
3828                 1, std::memory_order_relaxed) <= 1) {
3829           return;
3830         }
3831       }
3832     }
3833   }
3834 
GetMaxConcurrency(size_t worker_count) const3835   size_t GetMaxConcurrency(size_t worker_count) const override {
3836     const size_t kItemsPerWorker = std::max(1, MB / Page::kPageSize);
3837     // Ceiling division to ensure enough workers for all
3838     // |remaining_evacuation_items_|
3839     const size_t wanted_num_workers =
3840         (remaining_evacuation_items_.load(std::memory_order_relaxed) +
3841          kItemsPerWorker - 1) /
3842         kItemsPerWorker;
3843     return std::min<size_t>(wanted_num_workers, evacuators_->size());
3844   }
3845 
3846  private:
3847   std::vector<std::unique_ptr<Evacuator>>* evacuators_;
3848   std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items_;
3849   std::atomic<size_t> remaining_evacuation_items_{0};
3850   IndexGenerator generator_;
3851 
3852   GCTracer* tracer_;
3853 };
3854 
3855 template <class Evacuator, class Collector>
CreateAndExecuteEvacuationTasks(Collector * collector,std::vector<std::pair<ParallelWorkItem,MemoryChunk * >> evacuation_items,MigrationObserver * migration_observer)3856 size_t MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
3857     Collector* collector,
3858     std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
3859     MigrationObserver* migration_observer) {
3860   base::Optional<ProfilingMigrationObserver> profiling_observer;
3861   if (isolate()->LogObjectRelocation()) {
3862     profiling_observer.emplace(heap());
3863   }
3864   std::vector<std::unique_ptr<v8::internal::Evacuator>> evacuators;
3865   const int wanted_num_tasks = NumberOfParallelCompactionTasks();
3866   for (int i = 0; i < wanted_num_tasks; i++) {
3867     auto evacuator = std::make_unique<Evacuator>(collector);
3868     if (profiling_observer) {
3869       evacuator->AddObserver(&profiling_observer.value());
3870     }
3871     if (migration_observer) {
3872       evacuator->AddObserver(migration_observer);
3873     }
3874     evacuators.push_back(std::move(evacuator));
3875   }
3876   V8::GetCurrentPlatform()
3877       ->PostJob(v8::TaskPriority::kUserBlocking,
3878                 std::make_unique<PageEvacuationJob>(
3879                     isolate(), &evacuators, std::move(evacuation_items)))
3880       ->Join();
3881   for (auto& evacuator : evacuators) {
3882     evacuator->Finalize();
3883   }
3884   return wanted_num_tasks;
3885 }
3886 
ShouldMovePage(Page * p,intptr_t live_bytes,AlwaysPromoteYoung always_promote_young)3887 bool MarkCompactCollectorBase::ShouldMovePage(
3888     Page* p, intptr_t live_bytes, AlwaysPromoteYoung always_promote_young) {
3889   const bool reduce_memory = heap()->ShouldReduceMemory();
3890   const Address age_mark = heap()->new_space()->age_mark();
3891   return !reduce_memory && !p->NeverEvacuate() &&
3892          (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
3893          (always_promote_young == AlwaysPromoteYoung::kYes ||
3894           !p->Contains(age_mark)) &&
3895          heap()->CanExpandOldGeneration(live_bytes);
3896 }
3897 
3898 namespace {
3899 
TraceEvacuation(Isolate * isolate,size_t pages_count,size_t wanted_num_tasks,size_t live_bytes,size_t aborted_pages)3900 void TraceEvacuation(Isolate* isolate, size_t pages_count,
3901                      size_t wanted_num_tasks, size_t live_bytes,
3902                      size_t aborted_pages) {
3903   DCHECK(FLAG_trace_evacuation);
3904   PrintIsolate(
3905       isolate,
3906       "%8.0f ms: evacuation-summary: parallel=%s pages=%zu "
3907       "wanted_tasks=%zu cores=%d live_bytes=%" V8PRIdPTR
3908       " compaction_speed=%.f aborted=%zu\n",
3909       isolate->time_millis_since_init(),
3910       FLAG_parallel_compaction ? "yes" : "no", pages_count, wanted_num_tasks,
3911       V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1, live_bytes,
3912       isolate->heap()->tracer()->CompactionSpeedInBytesPerMillisecond(),
3913       aborted_pages);
3914 }
3915 
3916 }  // namespace
3917 
EvacuatePagesInParallel()3918 void MarkCompactCollector::EvacuatePagesInParallel() {
3919   std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
3920   intptr_t live_bytes = 0;
3921 
3922   // Evacuation of new space pages cannot be aborted, so it needs to run
3923   // before old space evacuation.
3924   for (Page* page : new_space_evacuation_pages_) {
3925     intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
3926     if (live_bytes_on_page == 0) continue;
3927     live_bytes += live_bytes_on_page;
3928     if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kYes)) {
3929       EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3930       DCHECK_EQ(heap()->old_space(), page->owner());
3931       // The move added page->allocated_bytes to the old space, but we are
3932       // going to sweep the page and add page->live_byte_count.
3933       heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
3934                                                   page);
3935     }
3936     evacuation_items.emplace_back(ParallelWorkItem{}, page);
3937   }
3938 
3939   if (!heap()->IsGCWithoutStack()) {
3940     if (!FLAG_compact_with_stack || !FLAG_compact_code_space_with_stack) {
3941       for (Page* page : old_space_evacuation_pages_) {
3942         if (!FLAG_compact_with_stack || page->owner_identity() == CODE_SPACE) {
3943           ReportAbortedEvacuationCandidateDueToFlags(page->area_start(), page);
3944           // Set this flag early on in this case to allow filtering such pages
3945           // below.
3946           page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3947         }
3948       }
3949     }
3950   }
3951 
3952   for (Page* page : old_space_evacuation_pages_) {
3953     if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) continue;
3954 
3955     live_bytes += non_atomic_marking_state()->live_bytes(page);
3956     evacuation_items.emplace_back(ParallelWorkItem{}, page);
3957   }
3958 
3959   // Promote young generation large objects.
3960   if (heap()->new_lo_space()) {
3961     IncrementalMarking::NonAtomicMarkingState* marking_state =
3962         heap()->incremental_marking()->non_atomic_marking_state();
3963 
3964     for (auto it = heap()->new_lo_space()->begin();
3965          it != heap()->new_lo_space()->end();) {
3966       LargePage* current = *it;
3967       it++;
3968       HeapObject object = current->GetObject();
3969       DCHECK(!marking_state->IsGrey(object));
3970       if (marking_state->IsBlack(object)) {
3971         heap_->lo_space()->PromoteNewLargeObject(current);
3972         current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
3973         promoted_large_pages_.push_back(current);
3974         evacuation_items.emplace_back(ParallelWorkItem{}, current);
3975       }
3976     }
3977   }
3978 
3979   const size_t pages_count = evacuation_items.size();
3980   size_t wanted_num_tasks = 0;
3981   if (!evacuation_items.empty()) {
3982     TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3983                  "MarkCompactCollector::EvacuatePagesInParallel", "pages",
3984                  evacuation_items.size());
3985 
3986     wanted_num_tasks = CreateAndExecuteEvacuationTasks<FullEvacuator>(
3987         this, std::move(evacuation_items), nullptr);
3988   }
3989 
3990   const size_t aborted_pages = PostProcessEvacuationCandidates();
3991 
3992   if (FLAG_trace_evacuation) {
3993     TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes,
3994                     aborted_pages);
3995   }
3996 }
3997 
3998 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3999  public:
RetainAs(Object object)4000   Object RetainAs(Object object) override {
4001     if (object.IsHeapObject()) {
4002       HeapObject heap_object = HeapObject::cast(object);
4003       MapWord map_word = heap_object.map_word(kRelaxedLoad);
4004       if (map_word.IsForwardingAddress()) {
4005         return map_word.ToForwardingAddress();
4006       }
4007     }
4008     return object;
4009   }
4010 };
4011 
RecordLiveSlotsOnPage(Page * page)4012 void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
4013   EvacuateRecordOnlyVisitor visitor(heap());
4014   LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
4015                                              &visitor,
4016                                              LiveObjectVisitor::kKeepMarking);
4017 }
4018 
4019 template <class Visitor, typename MarkingState>
VisitBlackObjects(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode,HeapObject * failed_object)4020 bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
4021                                           MarkingState* marking_state,
4022                                           Visitor* visitor,
4023                                           IterationMode iteration_mode,
4024                                           HeapObject* failed_object) {
4025   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4026                "LiveObjectVisitor::VisitBlackObjects");
4027   for (auto object_and_size :
4028        LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
4029     HeapObject const object = object_and_size.first;
4030     if (!visitor->Visit(object, object_and_size.second)) {
4031       if (iteration_mode == kClearMarkbits) {
4032         marking_state->bitmap(chunk)->ClearRange(
4033             chunk->AddressToMarkbitIndex(chunk->area_start()),
4034             chunk->AddressToMarkbitIndex(object.address()));
4035         *failed_object = object;
4036       }
4037       return false;
4038     }
4039   }
4040   if (iteration_mode == kClearMarkbits) {
4041     marking_state->ClearLiveness(chunk);
4042   }
4043   return true;
4044 }
4045 
4046 template <class Visitor, typename MarkingState>
VisitBlackObjectsNoFail(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode)4047 void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
4048                                                 MarkingState* marking_state,
4049                                                 Visitor* visitor,
4050                                                 IterationMode iteration_mode) {
4051   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4052                "LiveObjectVisitor::VisitBlackObjectsNoFail");
4053   if (chunk->IsLargePage()) {
4054     HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
4055     if (marking_state->IsBlack(object)) {
4056       const bool success = visitor->Visit(object, object.Size());
4057       USE(success);
4058       DCHECK(success);
4059     }
4060   } else {
4061     for (auto object_and_size :
4062          LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
4063       HeapObject const object = object_and_size.first;
4064       DCHECK(marking_state->IsBlack(object));
4065       const bool success = visitor->Visit(object, object_and_size.second);
4066       USE(success);
4067       DCHECK(success);
4068     }
4069   }
4070   if (iteration_mode == kClearMarkbits) {
4071     marking_state->ClearLiveness(chunk);
4072   }
4073 }
4074 
4075 template <class Visitor, typename MarkingState>
VisitGreyObjectsNoFail(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode)4076 void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
4077                                                MarkingState* marking_state,
4078                                                Visitor* visitor,
4079                                                IterationMode iteration_mode) {
4080   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4081                "LiveObjectVisitor::VisitGreyObjectsNoFail");
4082   if (chunk->IsLargePage()) {
4083     HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
4084     if (marking_state->IsGrey(object)) {
4085       const bool success = visitor->Visit(object, object.Size());
4086       USE(success);
4087       DCHECK(success);
4088     }
4089   } else {
4090     for (auto object_and_size :
4091          LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
4092       HeapObject const object = object_and_size.first;
4093       DCHECK(marking_state->IsGrey(object));
4094       const bool success = visitor->Visit(object, object_and_size.second);
4095       USE(success);
4096       DCHECK(success);
4097     }
4098   }
4099   if (iteration_mode == kClearMarkbits) {
4100     marking_state->ClearLiveness(chunk);
4101   }
4102 }
4103 
4104 template <typename MarkingState>
RecomputeLiveBytes(MemoryChunk * chunk,MarkingState * marking_state)4105 void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
4106                                            MarkingState* marking_state) {
4107   int new_live_size = 0;
4108   for (auto object_and_size :
4109        LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
4110     new_live_size += object_and_size.second;
4111   }
4112   marking_state->SetLiveBytes(chunk, new_live_size);
4113 }
4114 
Evacuate()4115 void MarkCompactCollector::Evacuate() {
4116   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
4117   base::MutexGuard guard(heap()->relocation_mutex());
4118 
4119   {
4120     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
4121     EvacuatePrologue();
4122   }
4123 
4124   {
4125     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
4126     EvacuationScope evacuation_scope(this);
4127     EvacuatePagesInParallel();
4128   }
4129 
4130   UpdatePointersAfterEvacuation();
4131 
4132   if (heap()->new_space()) {
4133     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
4134     if (!heap()->new_space()->Rebalance()) {
4135       heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
4136     }
4137   }
4138 
4139   // Give pages that are queued to be freed back to the OS. Note that filtering
4140   // slots only handles old space (for unboxed doubles), and thus map space can
4141   // still contain stale pointers. We only free the chunks after pointer updates
4142   // to still have access to page headers.
4143   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
4144 
4145   {
4146     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
4147 
4148     for (Page* p : new_space_evacuation_pages_) {
4149       // Full GCs don't promote pages within new space.
4150       DCHECK(!p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
4151       if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
4152         p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
4153         DCHECK_EQ(OLD_SPACE, p->owner_identity());
4154         sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
4155       }
4156     }
4157     new_space_evacuation_pages_.clear();
4158 
4159     for (LargePage* p : promoted_large_pages_) {
4160       DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
4161       p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
4162     }
4163     promoted_large_pages_.clear();
4164 
4165     for (Page* p : old_space_evacuation_pages_) {
4166       if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
4167         sweeper()->AddPage(p->owner_identity(), p, Sweeper::REGULAR);
4168         p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
4169       }
4170     }
4171   }
4172 
4173   {
4174     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
4175     EvacuateEpilogue();
4176   }
4177 
4178 #ifdef VERIFY_HEAP
4179   if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) {
4180     FullEvacuationVerifier verifier(heap());
4181     verifier.Run();
4182   }
4183 #endif
4184 }
4185 
4186 class UpdatingItem : public ParallelWorkItem {
4187  public:
4188   virtual ~UpdatingItem() = default;
4189   virtual void Process() = 0;
4190 };
4191 
4192 class PointersUpdatingJob : public v8::JobTask {
4193  public:
PointersUpdatingJob(Isolate * isolate,std::vector<std::unique_ptr<UpdatingItem>> updating_items,GCTracer::Scope::ScopeId scope,GCTracer::Scope::ScopeId background_scope)4194   explicit PointersUpdatingJob(
4195       Isolate* isolate,
4196       std::vector<std::unique_ptr<UpdatingItem>> updating_items,
4197       GCTracer::Scope::ScopeId scope, GCTracer::Scope::ScopeId background_scope)
4198       : updating_items_(std::move(updating_items)),
4199         remaining_updating_items_(updating_items_.size()),
4200         generator_(updating_items_.size()),
4201         tracer_(isolate->heap()->tracer()),
4202         scope_(scope),
4203         background_scope_(background_scope) {}
4204 
Run(JobDelegate * delegate)4205   void Run(JobDelegate* delegate) override {
4206     if (delegate->IsJoiningThread()) {
4207       TRACE_GC(tracer_, scope_);
4208       UpdatePointers(delegate);
4209     } else {
4210       TRACE_GC_EPOCH(tracer_, background_scope_, ThreadKind::kBackground);
4211       UpdatePointers(delegate);
4212     }
4213   }
4214 
UpdatePointers(JobDelegate * delegate)4215   void UpdatePointers(JobDelegate* delegate) {
4216     while (remaining_updating_items_.load(std::memory_order_relaxed) > 0) {
4217       base::Optional<size_t> index = generator_.GetNext();
4218       if (!index) return;
4219       for (size_t i = *index; i < updating_items_.size(); ++i) {
4220         auto& work_item = updating_items_[i];
4221         if (!work_item->TryAcquire()) break;
4222         work_item->Process();
4223         if (remaining_updating_items_.fetch_sub(1, std::memory_order_relaxed) <=
4224             1) {
4225           return;
4226         }
4227       }
4228     }
4229   }
4230 
GetMaxConcurrency(size_t worker_count) const4231   size_t GetMaxConcurrency(size_t worker_count) const override {
4232     size_t items = remaining_updating_items_.load(std::memory_order_relaxed);
4233     if (!FLAG_parallel_pointer_update) return items > 0;
4234     const size_t kMaxPointerUpdateTasks = 8;
4235     size_t max_concurrency = std::min<size_t>(kMaxPointerUpdateTasks, items);
4236     DCHECK_IMPLIES(items > 0, max_concurrency > 0);
4237     return max_concurrency;
4238   }
4239 
4240  private:
4241   std::vector<std::unique_ptr<UpdatingItem>> updating_items_;
4242   std::atomic<size_t> remaining_updating_items_{0};
4243   IndexGenerator generator_;
4244 
4245   GCTracer* tracer_;
4246   GCTracer::Scope::ScopeId scope_;
4247   GCTracer::Scope::ScopeId background_scope_;
4248 };
4249 
4250 template <typename MarkingState>
4251 class ToSpaceUpdatingItem : public UpdatingItem {
4252  public:
ToSpaceUpdatingItem(Heap * heap,MemoryChunk * chunk,Address start,Address end,MarkingState * marking_state)4253   explicit ToSpaceUpdatingItem(Heap* heap, MemoryChunk* chunk, Address start,
4254                                Address end, MarkingState* marking_state)
4255       : heap_(heap),
4256         chunk_(chunk),
4257         start_(start),
4258         end_(end),
4259         marking_state_(marking_state) {}
4260   ~ToSpaceUpdatingItem() override = default;
4261 
Process()4262   void Process() override {
4263     if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
4264       // New->new promoted pages contain garbage so they require iteration using
4265       // markbits.
4266       ProcessVisitLive();
4267     } else {
4268       ProcessVisitAll();
4269     }
4270   }
4271 
4272  private:
ProcessVisitAll()4273   void ProcessVisitAll() {
4274     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4275                  "ToSpaceUpdatingItem::ProcessVisitAll");
4276     PointersUpdatingVisitor visitor(heap_);
4277     for (Address cur = start_; cur < end_;) {
4278       HeapObject object = HeapObject::FromAddress(cur);
4279       Map map = object.map(visitor.cage_base());
4280       int size = object.SizeFromMap(map);
4281       object.IterateBodyFast(map, size, &visitor);
4282       cur += size;
4283     }
4284   }
4285 
ProcessVisitLive()4286   void ProcessVisitLive() {
4287     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4288                  "ToSpaceUpdatingItem::ProcessVisitLive");
4289     // For young generation evacuations we want to visit grey objects, for
4290     // full MC, we need to visit black objects.
4291     PointersUpdatingVisitor visitor(heap_);
4292     for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
4293              chunk_, marking_state_->bitmap(chunk_))) {
4294       object_and_size.first.IterateBodyFast(visitor.cage_base(), &visitor);
4295     }
4296   }
4297 
4298   Heap* heap_;
4299   MemoryChunk* chunk_;
4300   Address start_;
4301   Address end_;
4302   MarkingState* marking_state_;
4303 };
4304 
4305 template <typename MarkingState, GarbageCollector collector>
4306 class RememberedSetUpdatingItem : public UpdatingItem {
4307  public:
RememberedSetUpdatingItem(Heap * heap,MarkingState * marking_state,MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)4308   explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
4309                                      MemoryChunk* chunk,
4310                                      RememberedSetUpdatingMode updating_mode)
4311       : heap_(heap),
4312         marking_state_(marking_state),
4313         chunk_(chunk),
4314         updating_mode_(updating_mode) {}
4315   ~RememberedSetUpdatingItem() override = default;
4316 
Process()4317   void Process() override {
4318     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4319                  "RememberedSetUpdatingItem::Process");
4320     base::MutexGuard guard(chunk_->mutex());
4321     CodePageMemoryModificationScope memory_modification_scope(chunk_);
4322     UpdateUntypedPointers();
4323     UpdateTypedPointers();
4324   }
4325 
4326  private:
4327   template <typename TSlot>
CheckAndUpdateOldToNewSlot(TSlot slot)4328   inline SlotCallbackResult CheckAndUpdateOldToNewSlot(TSlot slot) {
4329     static_assert(
4330         std::is_same<TSlot, FullMaybeObjectSlot>::value ||
4331             std::is_same<TSlot, MaybeObjectSlot>::value,
4332         "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
4333     using THeapObjectSlot = typename TSlot::THeapObjectSlot;
4334     HeapObject heap_object;
4335     if (!(*slot).GetHeapObject(&heap_object)) {
4336       return REMOVE_SLOT;
4337     }
4338     if (Heap::InFromPage(heap_object)) {
4339       MapWord map_word = heap_object.map_word(kRelaxedLoad);
4340       if (map_word.IsForwardingAddress()) {
4341         HeapObjectReference::Update(THeapObjectSlot(slot),
4342                                     map_word.ToForwardingAddress());
4343       }
4344       bool success = (*slot).GetHeapObject(&heap_object);
4345       USE(success);
4346       DCHECK(success);
4347       // If the object was in from space before and is after executing the
4348       // callback in to space, the object is still live.
4349       // Unfortunately, we do not know about the slot. It could be in a
4350       // just freed free space object.
4351       if (Heap::InToPage(heap_object)) {
4352         return KEEP_SLOT;
4353       }
4354     } else if (Heap::InToPage(heap_object)) {
4355       // Slots can point to "to" space if the page has been moved, or if the
4356       // slot has been recorded multiple times in the remembered set, or
4357       // if the slot was already updated during old->old updating.
4358       // In case the page has been moved, check markbits to determine liveness
4359       // of the slot. In the other case, the slot can just be kept.
4360       if (Page::FromHeapObject(heap_object)
4361               ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
4362         // IsBlackOrGrey is required because objects are marked as grey for
4363         // the young generation collector while they are black for the full
4364         // MC.);
4365         if (marking_state_->IsBlackOrGrey(heap_object)) {
4366           return KEEP_SLOT;
4367         } else {
4368           return REMOVE_SLOT;
4369         }
4370       }
4371       return KEEP_SLOT;
4372     } else {
4373       DCHECK(!Heap::InYoungGeneration(heap_object));
4374     }
4375     return REMOVE_SLOT;
4376   }
4377 
UpdateUntypedPointers()4378   void UpdateUntypedPointers() {
4379     if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
4380       InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
4381       int slots = RememberedSet<OLD_TO_NEW>::Iterate(
4382           chunk_,
4383           [this, &filter](MaybeObjectSlot slot) {
4384             if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
4385             return CheckAndUpdateOldToNewSlot(slot);
4386           },
4387           SlotSet::FREE_EMPTY_BUCKETS);
4388 
4389       DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
4390 
4391       if (slots == 0) {
4392         chunk_->ReleaseSlotSet<OLD_TO_NEW>();
4393       }
4394     }
4395 
4396     if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
4397       // The invalidated slots are not needed after old-to-new slots were
4398       // processed.
4399       chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
4400     }
4401 
4402     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
4403         (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
4404       InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(chunk_);
4405       PtrComprCageBase cage_base = heap_->isolate();
4406       RememberedSet<OLD_TO_OLD>::Iterate(
4407           chunk_,
4408           [&filter, cage_base](MaybeObjectSlot slot) {
4409             if (filter.IsValid(slot.address())) {
4410               UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
4411             }
4412             // Always keep slot since all slots are dropped at once after
4413             // iteration.
4414             return KEEP_SLOT;
4415           },
4416           SlotSet::KEEP_EMPTY_BUCKETS);
4417       chunk_->ReleaseSlotSet<OLD_TO_OLD>();
4418     }
4419     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
4420         chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
4421       // The invalidated slots are not needed after old-to-old slots were
4422       // processsed.
4423       chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
4424     }
4425     if (V8_EXTERNAL_CODE_SPACE_BOOL) {
4426       if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
4427           (chunk_->slot_set<OLD_TO_CODE, AccessMode::NON_ATOMIC>() !=
4428            nullptr)) {
4429         PtrComprCageBase cage_base = heap_->isolate();
4430 #ifdef V8_EXTERNAL_CODE_SPACE
4431         PtrComprCageBase code_cage_base(heap_->isolate()->code_cage_base());
4432 #else
4433         PtrComprCageBase code_cage_base = cage_base;
4434 #endif
4435         RememberedSet<OLD_TO_CODE>::Iterate(
4436             chunk_,
4437             [=](MaybeObjectSlot slot) {
4438               HeapObject host = HeapObject::FromAddress(
4439                   slot.address() - CodeDataContainer::kCodeOffset);
4440               DCHECK(host.IsCodeDataContainer(cage_base));
4441               return UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(
4442                   host, cage_base, code_cage_base,
4443                   CodeObjectSlot(slot.address()));
4444             },
4445             SlotSet::FREE_EMPTY_BUCKETS);
4446         chunk_->ReleaseSlotSet<OLD_TO_CODE>();
4447       }
4448       // The invalidated slots are not needed after old-to-code slots were
4449       // processsed, but since there are no invalidated OLD_TO_CODE slots,
4450       // there's nothing to clear.
4451     }
4452   }
4453 
UpdateTypedPointers()4454   void UpdateTypedPointers() {
4455     if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
4456         nullptr) {
4457       CHECK_NE(chunk_->owner(), heap_->map_space());
4458       const auto check_and_update_old_to_new_slot_fn =
4459           [this](FullMaybeObjectSlot slot) {
4460             return CheckAndUpdateOldToNewSlot(slot);
4461           };
4462       RememberedSet<OLD_TO_NEW>::IterateTyped(
4463           chunk_, [=](SlotType slot_type, Address slot) {
4464             return UpdateTypedSlotHelper::UpdateTypedSlot(
4465                 heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
4466           });
4467     }
4468     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
4469         (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
4470          nullptr)) {
4471       CHECK_NE(chunk_->owner(), heap_->map_space());
4472       RememberedSet<OLD_TO_OLD>::IterateTyped(chunk_, [=](SlotType slot_type,
4473                                                           Address slot) {
4474         // Using UpdateStrongSlot is OK here, because there are no weak
4475         // typed slots.
4476         PtrComprCageBase cage_base = heap_->isolate();
4477         UpdateTypedSlotHelper::UpdateTypedSlot(
4478             heap_, slot_type, slot, [cage_base](FullMaybeObjectSlot slot) {
4479               return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
4480             });
4481         // Always keep slot since all slots are dropped at once after iteration.
4482         return KEEP_SLOT;
4483       });
4484       chunk_->ReleaseTypedSlotSet<OLD_TO_OLD>();
4485     }
4486   }
4487 
4488   Heap* heap_;
4489   MarkingState* marking_state_;
4490   MemoryChunk* chunk_;
4491   RememberedSetUpdatingMode updating_mode_;
4492 };
4493 
4494 std::unique_ptr<UpdatingItem>
CreateRememberedSetUpdatingItem(MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)4495 MarkCompactCollector::CreateRememberedSetUpdatingItem(
4496     MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
4497   return std::make_unique<RememberedSetUpdatingItem<
4498       NonAtomicMarkingState, GarbageCollector::MARK_COMPACTOR>>(
4499       heap(), non_atomic_marking_state(), chunk, updating_mode);
4500 }
4501 
4502 template <typename IterateableSpace>
CollectRememberedSetUpdatingItems(std::vector<std::unique_ptr<UpdatingItem>> * items,IterateableSpace * space,RememberedSetUpdatingMode mode)4503 int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
4504     std::vector<std::unique_ptr<UpdatingItem>>* items, IterateableSpace* space,
4505     RememberedSetUpdatingMode mode) {
4506   int pages = 0;
4507   for (MemoryChunk* chunk : *space) {
4508     const bool contains_old_to_old_slots =
4509         chunk->slot_set<OLD_TO_OLD>() != nullptr ||
4510         chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
4511     const bool contains_old_to_code_slots =
4512         V8_EXTERNAL_CODE_SPACE_BOOL &&
4513         chunk->slot_set<OLD_TO_CODE>() != nullptr;
4514     const bool contains_old_to_new_slots =
4515         chunk->slot_set<OLD_TO_NEW>() != nullptr ||
4516         chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
4517     const bool contains_old_to_old_invalidated_slots =
4518         chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
4519     const bool contains_old_to_new_invalidated_slots =
4520         chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
4521     if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
4522         !contains_old_to_old_invalidated_slots &&
4523         !contains_old_to_new_invalidated_slots && !contains_old_to_code_slots)
4524       continue;
4525     if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
4526         contains_old_to_old_invalidated_slots ||
4527         contains_old_to_new_invalidated_slots) {
4528       items->emplace_back(CreateRememberedSetUpdatingItem(chunk, mode));
4529       pages++;
4530     }
4531   }
4532   return pages;
4533 }
4534 
4535 class EphemeronTableUpdatingItem : public UpdatingItem {
4536  public:
4537   enum EvacuationState { kRegular, kAborted };
4538 
EphemeronTableUpdatingItem(Heap * heap)4539   explicit EphemeronTableUpdatingItem(Heap* heap) : heap_(heap) {}
4540   ~EphemeronTableUpdatingItem() override = default;
4541 
Process()4542   void Process() override {
4543     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4544                  "EphemeronTableUpdatingItem::Process");
4545     PtrComprCageBase cage_base(heap_->isolate());
4546 
4547     for (auto it = heap_->ephemeron_remembered_set_.begin();
4548          it != heap_->ephemeron_remembered_set_.end();) {
4549       EphemeronHashTable table = it->first;
4550       auto& indices = it->second;
4551       if (table.map_word(cage_base, kRelaxedLoad).IsForwardingAddress()) {
4552         // The table has moved, and RecordMigratedSlotVisitor::VisitEphemeron
4553         // inserts entries for the moved table into ephemeron_remembered_set_.
4554         it = heap_->ephemeron_remembered_set_.erase(it);
4555         continue;
4556       }
4557       DCHECK(table.map(cage_base).IsMap(cage_base));
4558       DCHECK(table.IsEphemeronHashTable(cage_base));
4559       for (auto iti = indices.begin(); iti != indices.end();) {
4560         // EphemeronHashTable keys must be heap objects.
4561         HeapObjectSlot key_slot(table.RawFieldOfElementAt(
4562             EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
4563         HeapObject key = key_slot.ToHeapObject();
4564         MapWord map_word = key.map_word(cage_base, kRelaxedLoad);
4565         if (map_word.IsForwardingAddress()) {
4566           key = map_word.ToForwardingAddress();
4567           key_slot.StoreHeapObject(key);
4568         }
4569         if (!heap_->InYoungGeneration(key)) {
4570           iti = indices.erase(iti);
4571         } else {
4572           ++iti;
4573         }
4574       }
4575       if (indices.size() == 0) {
4576         it = heap_->ephemeron_remembered_set_.erase(it);
4577       } else {
4578         ++it;
4579       }
4580     }
4581   }
4582 
4583  private:
4584   Heap* const heap_;
4585 };
4586 
UpdatePointersAfterEvacuation()4587 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
4588   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
4589 
4590   {
4591     TRACE_GC(heap()->tracer(),
4592              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
4593     // The external string table is updated at the end.
4594     PointersUpdatingVisitor updating_visitor(heap());
4595     heap_->IterateRootsIncludingClients(
4596         &updating_visitor,
4597         base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable});
4598   }
4599 
4600   {
4601     TRACE_GC(heap()->tracer(),
4602              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_CLIENT_HEAPS);
4603     UpdatePointersInClientHeaps();
4604   }
4605 
4606   {
4607     TRACE_GC(heap()->tracer(),
4608              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
4609     std::vector<std::unique_ptr<UpdatingItem>> updating_items;
4610 
4611     CollectRememberedSetUpdatingItems(&updating_items, heap()->old_space(),
4612                                       RememberedSetUpdatingMode::ALL);
4613     CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
4614                                       RememberedSetUpdatingMode::ALL);
4615     CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
4616                                       RememberedSetUpdatingMode::ALL);
4617     CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
4618                                       RememberedSetUpdatingMode::ALL);
4619     if (heap()->map_space()) {
4620       CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
4621                                         RememberedSetUpdatingMode::ALL);
4622     }
4623 
4624     // Iterating to space may require a valid body descriptor for e.g.
4625     // WasmStruct which races with updating a slot in Map. Since to space is
4626     // empty after a full GC, such races can't happen.
4627     DCHECK_IMPLIES(heap()->new_space(), heap()->new_space()->Size() == 0);
4628 
4629     updating_items.push_back(
4630         std::make_unique<EphemeronTableUpdatingItem>(heap()));
4631 
4632     V8::GetCurrentPlatform()
4633         ->PostJob(v8::TaskPriority::kUserBlocking,
4634                   std::make_unique<PointersUpdatingJob>(
4635                       isolate(), std::move(updating_items),
4636                       GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
4637                       GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
4638         ->Join();
4639   }
4640 
4641   {
4642     TRACE_GC(heap()->tracer(),
4643              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
4644     // Update pointers from external string table.
4645     heap_->UpdateReferencesInExternalStringTable(
4646         &UpdateReferenceInExternalStringTableEntry);
4647 
4648     EvacuationWeakObjectRetainer evacuation_object_retainer;
4649     heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4650   }
4651 }
4652 
UpdatePointersInClientHeaps()4653 void MarkCompactCollector::UpdatePointersInClientHeaps() {
4654   if (!isolate()->is_shared()) return;
4655 
4656   isolate()->global_safepoint()->IterateClientIsolates(
4657       [this](Isolate* client) { UpdatePointersInClientHeap(client); });
4658 }
4659 
UpdatePointersInClientHeap(Isolate * client)4660 void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
4661   PtrComprCageBase cage_base(client);
4662   MemoryChunkIterator chunk_iterator(client->heap());
4663 
4664   while (chunk_iterator.HasNext()) {
4665     MemoryChunk* chunk = chunk_iterator.Next();
4666     CodePageMemoryModificationScope unprotect_code_page(chunk);
4667 
4668     RememberedSet<OLD_TO_SHARED>::Iterate(
4669         chunk,
4670         [cage_base](MaybeObjectSlot slot) {
4671           return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
4672         },
4673         SlotSet::KEEP_EMPTY_BUCKETS);
4674 
4675     chunk->ReleaseSlotSet<OLD_TO_SHARED>();
4676 
4677     RememberedSet<OLD_TO_SHARED>::IterateTyped(
4678         chunk, [this](SlotType slot_type, Address slot) {
4679           // Using UpdateStrongSlot is OK here, because there are no weak
4680           // typed slots.
4681           PtrComprCageBase cage_base = heap_->isolate();
4682           return UpdateTypedSlotHelper::UpdateTypedSlot(
4683               heap_, slot_type, slot, [cage_base](FullMaybeObjectSlot slot) {
4684                 return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base,
4685                                                                 slot);
4686               });
4687         });
4688 
4689     chunk->ReleaseTypedSlotSet<OLD_TO_SHARED>();
4690   }
4691 
4692 #ifdef VERIFY_HEAP
4693   if (FLAG_verify_heap) {
4694     ClientHeapVerifier verifier_visitor(client->heap());
4695 
4696     HeapObjectIterator iterator(client->heap(),
4697                                 HeapObjectIterator::kNoFiltering);
4698     for (HeapObject obj = iterator.Next(); !obj.is_null();
4699          obj = iterator.Next()) {
4700       obj.IterateFast(cage_base, &verifier_visitor);
4701     }
4702   }
4703 #endif  // VERIFY_HEAP
4704 }
4705 
ReportAbortedEvacuationCandidateDueToOOM(Address failed_start,Page * page)4706 void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToOOM(
4707     Address failed_start, Page* page) {
4708   base::MutexGuard guard(&mutex_);
4709   aborted_evacuation_candidates_due_to_oom_.push_back(
4710       std::make_pair(failed_start, page));
4711 }
4712 
ReportAbortedEvacuationCandidateDueToFlags(Address failed_start,Page * page)4713 void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToFlags(
4714     Address failed_start, Page* page) {
4715   base::MutexGuard guard(&mutex_);
4716   aborted_evacuation_candidates_due_to_flags_.push_back(
4717       std::make_pair(failed_start, page));
4718 }
4719 
4720 namespace {
4721 
ReRecordPage(Heap * heap,v8::internal::MarkCompactCollector::NonAtomicMarkingState * marking_state,Address failed_start,Page * page)4722 void ReRecordPage(
4723     Heap* heap,
4724     v8::internal::MarkCompactCollector::NonAtomicMarkingState* marking_state,
4725     Address failed_start, Page* page) {
4726   page->SetFlag(Page::COMPACTION_WAS_ABORTED);
4727   // Aborted compaction page. We have to record slots here, since we
4728   // might not have recorded them in first place.
4729 
4730   // Remove outdated slots.
4731   RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
4732                                          SlotSet::FREE_EMPTY_BUCKETS);
4733   RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
4734                                               failed_start);
4735 
4736   // Remove invalidated slots.
4737   if (failed_start > page->area_start()) {
4738     InvalidatedSlotsCleanup old_to_new_cleanup =
4739         InvalidatedSlotsCleanup::OldToNew(page);
4740     old_to_new_cleanup.Free(page->area_start(), failed_start);
4741   }
4742 
4743   // Recompute live bytes.
4744   LiveObjectVisitor::RecomputeLiveBytes(page, marking_state);
4745   // Re-record slots.
4746   EvacuateRecordOnlyVisitor record_visitor(heap);
4747   LiveObjectVisitor::VisitBlackObjectsNoFail(
4748       page, marking_state, &record_visitor, LiveObjectVisitor::kKeepMarking);
4749   // Array buffers will be processed during pointer updating.
4750 }
4751 
4752 }  // namespace
4753 
PostProcessEvacuationCandidates()4754 size_t MarkCompactCollector::PostProcessEvacuationCandidates() {
4755   CHECK_IMPLIES(FLAG_crash_on_aborted_evacuation,
4756                 aborted_evacuation_candidates_due_to_oom_.empty());
4757   for (auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
4758     ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
4759                  start_and_page.second);
4760   }
4761   for (auto start_and_page : aborted_evacuation_candidates_due_to_flags_) {
4762     ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
4763                  start_and_page.second);
4764   }
4765   const size_t aborted_pages =
4766       aborted_evacuation_candidates_due_to_oom_.size() +
4767       aborted_evacuation_candidates_due_to_flags_.size();
4768   size_t aborted_pages_verified = 0;
4769   for (Page* p : old_space_evacuation_pages_) {
4770     if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
4771       // After clearing the evacuation candidate flag the page is again in a
4772       // regular state.
4773       p->ClearEvacuationCandidate();
4774       aborted_pages_verified++;
4775     } else {
4776       DCHECK(p->IsEvacuationCandidate());
4777       DCHECK(p->SweepingDone());
4778       p->owner()->memory_chunk_list().Remove(p);
4779     }
4780   }
4781   DCHECK_EQ(aborted_pages_verified, aborted_pages);
4782   USE(aborted_pages_verified);
4783   return aborted_pages;
4784 }
4785 
ReleaseEvacuationCandidates()4786 void MarkCompactCollector::ReleaseEvacuationCandidates() {
4787   for (Page* p : old_space_evacuation_pages_) {
4788     if (!p->IsEvacuationCandidate()) continue;
4789     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
4790     non_atomic_marking_state()->SetLiveBytes(p, 0);
4791     CHECK(p->SweepingDone());
4792     space->ReleasePage(p);
4793   }
4794   old_space_evacuation_pages_.clear();
4795   compacting_ = false;
4796 }
4797 
StartSweepSpace(PagedSpace * space)4798 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
4799   space->ClearAllocatorState();
4800 
4801   int will_be_swept = 0;
4802   bool unused_page_present = false;
4803 
4804   // Loop needs to support deletion if live bytes == 0 for a page.
4805   for (auto it = space->begin(); it != space->end();) {
4806     Page* p = *(it++);
4807     DCHECK(p->SweepingDone());
4808 
4809     if (p->IsEvacuationCandidate()) {
4810       // Will be processed in Evacuate.
4811       DCHECK(!evacuation_candidates_.empty());
4812       continue;
4813     }
4814 
4815     // One unused page is kept, all further are released before sweeping them.
4816     if (non_atomic_marking_state()->live_bytes(p) == 0) {
4817       if (unused_page_present) {
4818         if (FLAG_gc_verbose) {
4819           PrintIsolate(isolate(), "sweeping: released page: %p",
4820                        static_cast<void*>(p));
4821         }
4822         space->memory_chunk_list().Remove(p);
4823         space->ReleasePage(p);
4824         continue;
4825       }
4826       unused_page_present = true;
4827     }
4828 
4829     sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
4830     will_be_swept++;
4831   }
4832 
4833   if (FLAG_gc_verbose) {
4834     PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
4835                  space->name(), will_be_swept);
4836   }
4837 }
4838 
StartSweepSpaces()4839 void MarkCompactCollector::StartSweepSpaces() {
4840   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4841 #ifdef DEBUG
4842   state_ = SWEEP_SPACES;
4843 #endif
4844 
4845   {
4846     {
4847       GCTracer::Scope sweep_scope(
4848           heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD, ThreadKind::kMain);
4849       StartSweepSpace(heap()->old_space());
4850     }
4851     {
4852       GCTracer::Scope sweep_scope(
4853           heap()->tracer(), GCTracer::Scope::MC_SWEEP_CODE, ThreadKind::kMain);
4854       StartSweepSpace(heap()->code_space());
4855     }
4856     if (heap()->map_space()) {
4857       GCTracer::Scope sweep_scope(
4858           heap()->tracer(), GCTracer::Scope::MC_SWEEP_MAP, ThreadKind::kMain);
4859       StartSweepSpace(heap()->map_space());
4860     }
4861     sweeper()->StartSweeping();
4862   }
4863 }
4864 
4865 namespace {
4866 
4867 #ifdef VERIFY_HEAP
4868 
4869 class YoungGenerationMarkingVerifier : public MarkingVerifier {
4870  public:
YoungGenerationMarkingVerifier(Heap * heap)4871   explicit YoungGenerationMarkingVerifier(Heap* heap)
4872       : MarkingVerifier(heap),
4873         marking_state_(
4874             heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
4875 
bitmap(const MemoryChunk * chunk)4876   ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
4877       const MemoryChunk* chunk) override {
4878     return marking_state_->bitmap(chunk);
4879   }
4880 
IsMarked(HeapObject object)4881   bool IsMarked(HeapObject object) override {
4882     return marking_state_->IsGrey(object);
4883   }
4884 
IsBlackOrGrey(HeapObject object)4885   bool IsBlackOrGrey(HeapObject object) override {
4886     return marking_state_->IsBlackOrGrey(object);
4887   }
4888 
Run()4889   void Run() override {
4890     VerifyRoots();
4891     VerifyMarking(heap_->new_space());
4892   }
4893 
4894  protected:
VerifyMap(Map map)4895   void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
4896 
VerifyPointers(ObjectSlot start,ObjectSlot end)4897   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
4898     VerifyPointersImpl(start, end);
4899   }
4900 
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)4901   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
4902     VerifyPointersImpl(start, end);
4903   }
VerifyCodePointer(CodeObjectSlot slot)4904   void VerifyCodePointer(CodeObjectSlot slot) override {
4905     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
4906     // Code slots never appear in new space because CodeDataContainers, the
4907     // only object that can contain code pointers, are always allocated in
4908     // the old space.
4909     UNREACHABLE();
4910   }
4911 
VisitCodeTarget(Code host,RelocInfo * rinfo)4912   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
4913     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
4914     VerifyHeapObjectImpl(target);
4915   }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4916   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
4917     VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
4918   }
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)4919   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
4920     VerifyPointersImpl(start, end);
4921   }
4922 
4923  private:
VerifyHeapObjectImpl(HeapObject heap_object)4924   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
4925     CHECK_IMPLIES(Heap::InYoungGeneration(heap_object), IsMarked(heap_object));
4926   }
4927 
4928   template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)4929   V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
4930     PtrComprCageBase cage_base =
4931         GetPtrComprCageBaseFromOnHeapAddress(start.address());
4932     for (TSlot slot = start; slot < end; ++slot) {
4933       typename TSlot::TObject object = slot.load(cage_base);
4934       HeapObject heap_object;
4935       // Minor MC treats weak references as strong.
4936       if (object.GetHeapObject(&heap_object)) {
4937         VerifyHeapObjectImpl(heap_object);
4938       }
4939     }
4940   }
4941 
4942   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4943 };
4944 
4945 class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
4946  public:
YoungGenerationEvacuationVerifier(Heap * heap)4947   explicit YoungGenerationEvacuationVerifier(Heap* heap)
4948       : EvacuationVerifier(heap) {}
4949 
Run()4950   void Run() override {
4951     DCHECK(!heap_->mark_compact_collector()->sweeping_in_progress());
4952     VerifyRoots();
4953     VerifyEvacuation(heap_->new_space());
4954     VerifyEvacuation(heap_->old_space());
4955     VerifyEvacuation(heap_->code_space());
4956     if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
4957   }
4958 
4959  protected:
VerifyHeapObjectImpl(HeapObject heap_object)4960   V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
4961     CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
4962                   Heap::InToPage(heap_object));
4963   }
4964 
4965   template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)4966   void VerifyPointersImpl(TSlot start, TSlot end) {
4967     for (TSlot current = start; current < end; ++current) {
4968       typename TSlot::TObject object = current.load(cage_base());
4969       HeapObject heap_object;
4970       if (object.GetHeapObject(&heap_object)) {
4971         VerifyHeapObjectImpl(heap_object);
4972       }
4973     }
4974   }
VerifyMap(Map map)4975   void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
VerifyPointers(ObjectSlot start,ObjectSlot end)4976   void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
4977     VerifyPointersImpl(start, end);
4978   }
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)4979   void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
4980     VerifyPointersImpl(start, end);
4981   }
VerifyCodePointer(CodeObjectSlot slot)4982   void VerifyCodePointer(CodeObjectSlot slot) override {
4983     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
4984     Object maybe_code = slot.load(code_cage_base());
4985     HeapObject code;
4986     // The slot might contain smi during CodeDataContainer creation, so skip it.
4987     if (maybe_code.GetHeapObject(&code)) {
4988       VerifyHeapObjectImpl(code);
4989     }
4990   }
VisitCodeTarget(Code host,RelocInfo * rinfo)4991   void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
4992     Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
4993     VerifyHeapObjectImpl(target);
4994   }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4995   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
4996     VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
4997   }
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)4998   void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
4999     VerifyPointersImpl(start, end);
5000   }
5001 };
5002 
5003 #endif  // VERIFY_HEAP
5004 
IsUnmarkedObjectForYoungGeneration(Heap * heap,FullObjectSlot p)5005 bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
5006   DCHECK_IMPLIES(Heap::InYoungGeneration(*p), Heap::InToPage(*p));
5007   return Heap::InYoungGeneration(*p) && !heap->minor_mark_compact_collector()
5008                                              ->non_atomic_marking_state()
5009                                              ->IsGrey(HeapObject::cast(*p));
5010 }
5011 
5012 }  // namespace
5013 
5014 class YoungGenerationMarkingVisitor final
5015     : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
5016  public:
YoungGenerationMarkingVisitor(Isolate * isolate,MinorMarkCompactCollector::MarkingState * marking_state,MinorMarkCompactCollector::MarkingWorklist::Local * worklist_local)5017   YoungGenerationMarkingVisitor(
5018       Isolate* isolate, MinorMarkCompactCollector::MarkingState* marking_state,
5019       MinorMarkCompactCollector::MarkingWorklist::Local* worklist_local)
5020       : NewSpaceVisitor(isolate),
5021         worklist_local_(worklist_local),
5022         marking_state_(marking_state) {}
5023 
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)5024   V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
5025                                ObjectSlot end) final {
5026     VisitPointersImpl(host, start, end);
5027   }
5028 
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)5029   V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
5030                                MaybeObjectSlot end) final {
5031     VisitPointersImpl(host, start, end);
5032   }
5033 
VisitCodePointer(HeapObject host,CodeObjectSlot slot)5034   V8_INLINE void VisitCodePointer(HeapObject host,
5035                                   CodeObjectSlot slot) override {
5036     CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
5037     // Code slots never appear in new space because CodeDataContainers, the
5038     // only object that can contain code pointers, are always allocated in
5039     // the old space.
5040     UNREACHABLE();
5041   }
5042 
VisitPointer(HeapObject host,ObjectSlot slot)5043   V8_INLINE void VisitPointer(HeapObject host, ObjectSlot slot) final {
5044     VisitPointerImpl(host, slot);
5045   }
5046 
VisitPointer(HeapObject host,MaybeObjectSlot slot)5047   V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot slot) final {
5048     VisitPointerImpl(host, slot);
5049   }
5050 
VisitCodeTarget(Code host,RelocInfo * rinfo)5051   V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
5052     // Code objects are not expected in new space.
5053     UNREACHABLE();
5054   }
5055 
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)5056   V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
5057     // Code objects are not expected in new space.
5058     UNREACHABLE();
5059   }
5060 
VisitJSArrayBuffer(Map map,JSArrayBuffer object)5061   V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
5062     object.YoungMarkExtension();
5063     int size = JSArrayBuffer::BodyDescriptor::SizeOf(map, object);
5064     JSArrayBuffer::BodyDescriptor::IterateBody(map, object, size, this);
5065     return size;
5066   }
5067 
5068  private:
5069   template <typename TSlot>
VisitPointersImpl(HeapObject host,TSlot start,TSlot end)5070   V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
5071     for (TSlot slot = start; slot < end; ++slot) {
5072       VisitPointer(host, slot);
5073     }
5074   }
5075 
5076   template <typename TSlot>
VisitPointerImpl(HeapObject host,TSlot slot)5077   V8_INLINE void VisitPointerImpl(HeapObject host, TSlot slot) {
5078     typename TSlot::TObject target = *slot;
5079     if (Heap::InYoungGeneration(target)) {
5080       // Treat weak references as strong.
5081       // TODO(marja): Proper weakness handling for minor-mcs.
5082       HeapObject target_object = target.GetHeapObject();
5083       MarkObjectViaMarkingWorklist(target_object);
5084     }
5085   }
5086 
MarkObjectViaMarkingWorklist(HeapObject object)5087   inline void MarkObjectViaMarkingWorklist(HeapObject object) {
5088     if (marking_state_->WhiteToGrey(object)) {
5089       // Marking deque overflow is unsupported for the young generation.
5090       worklist_local_->Push(object);
5091     }
5092   }
5093 
5094   MinorMarkCompactCollector::MarkingWorklist::Local* worklist_local_;
5095   MinorMarkCompactCollector::MarkingState* marking_state_;
5096 };
5097 
SetUp()5098 void MinorMarkCompactCollector::SetUp() {}
5099 
TearDown()5100 void MinorMarkCompactCollector::TearDown() {}
5101 
5102 // static
5103 constexpr size_t MinorMarkCompactCollector::kMaxParallelTasks;
5104 
MinorMarkCompactCollector(Heap * heap)5105 MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
5106     : MarkCompactCollectorBase(heap),
5107       worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
5108       main_thread_worklist_local_(worklist_),
5109       marking_state_(heap->isolate()),
5110       non_atomic_marking_state_(heap->isolate()),
5111       main_marking_visitor_(new YoungGenerationMarkingVisitor(
5112           heap->isolate(), marking_state(), &main_thread_worklist_local_)),
5113       page_parallel_job_semaphore_(0) {}
5114 
~MinorMarkCompactCollector()5115 MinorMarkCompactCollector::~MinorMarkCompactCollector() {
5116   delete worklist_;
5117   delete main_marking_visitor_;
5118 }
5119 
CleanupPromotedPages()5120 void MinorMarkCompactCollector::CleanupPromotedPages() {
5121   for (Page* p : promoted_pages_) {
5122     p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
5123     p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
5124     non_atomic_marking_state()->ClearLiveness(p);
5125   }
5126   promoted_pages_.clear();
5127 
5128   for (LargePage* p : promoted_large_pages_) {
5129     p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
5130   }
5131   promoted_large_pages_.clear();
5132 }
5133 
SweepArrayBufferExtensions()5134 void MinorMarkCompactCollector::SweepArrayBufferExtensions() {
5135   heap_->array_buffer_sweeper()->RequestSweep(
5136       ArrayBufferSweeper::SweepingType::kYoung);
5137 }
5138 
5139 class YoungGenerationMigrationObserver final : public MigrationObserver {
5140  public:
YoungGenerationMigrationObserver(Heap * heap,MarkCompactCollector * mark_compact_collector)5141   YoungGenerationMigrationObserver(Heap* heap,
5142                                    MarkCompactCollector* mark_compact_collector)
5143       : MigrationObserver(heap),
5144         mark_compact_collector_(mark_compact_collector) {}
5145 
Move(AllocationSpace dest,HeapObject src,HeapObject dst,int size)5146   inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
5147                    int size) final {
5148     // Migrate color to old generation marking in case the object survived young
5149     // generation garbage collection.
5150     if (heap_->incremental_marking()->IsMarking()) {
5151       DCHECK(
5152           heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
5153       heap_->incremental_marking()->TransferColor(src, dst);
5154     }
5155   }
5156 
5157  protected:
5158   base::Mutex mutex_;
5159   MarkCompactCollector* mark_compact_collector_;
5160 };
5161 
5162 class YoungGenerationRecordMigratedSlotVisitor final
5163     : public RecordMigratedSlotVisitor {
5164  public:
YoungGenerationRecordMigratedSlotVisitor(MarkCompactCollector * collector)5165   explicit YoungGenerationRecordMigratedSlotVisitor(
5166       MarkCompactCollector* collector)
5167       : RecordMigratedSlotVisitor(collector, nullptr) {}
5168 
VisitCodeTarget(Code host,RelocInfo * rinfo)5169   void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)5170   void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
5171     UNREACHABLE();
5172   }
5173 
MarkArrayBufferExtensionPromoted(HeapObject object)5174   void MarkArrayBufferExtensionPromoted(HeapObject object) final {
5175     if (!object.IsJSArrayBuffer()) return;
5176     JSArrayBuffer::cast(object).YoungMarkExtensionPromoted();
5177   }
5178 
5179  private:
5180   // Only record slots for host objects that are considered as live by the full
5181   // collector.
IsLive(HeapObject object)5182   inline bool IsLive(HeapObject object) {
5183     return collector_->non_atomic_marking_state()->IsBlack(object);
5184   }
5185 
RecordMigratedSlot(HeapObject host,MaybeObject value,Address slot)5186   inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
5187                                  Address slot) final {
5188     if (value->IsStrongOrWeak()) {
5189       BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
5190       if (p->InYoungGeneration()) {
5191         DCHECK_IMPLIES(
5192             p->IsToPage(),
5193             p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
5194         MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
5195         DCHECK(chunk->SweepingDone());
5196         RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
5197       } else if (p->IsEvacuationCandidate() && IsLive(host)) {
5198         if (V8_EXTERNAL_CODE_SPACE_BOOL &&
5199             p->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
5200           RememberedSet<OLD_TO_CODE>::Insert<AccessMode::NON_ATOMIC>(
5201               MemoryChunk::FromHeapObject(host), slot);
5202         } else {
5203           RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
5204               MemoryChunk::FromHeapObject(host), slot);
5205         }
5206       }
5207     }
5208   }
5209 };
5210 
UpdatePointersAfterEvacuation()5211 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
5212   TRACE_GC(heap()->tracer(),
5213            GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
5214 
5215   PointersUpdatingVisitor updating_visitor(heap());
5216   std::vector<std::unique_ptr<UpdatingItem>> updating_items;
5217 
5218   // Create batches of global handles.
5219   CollectToSpaceUpdatingItems(&updating_items);
5220   CollectRememberedSetUpdatingItems(&updating_items, heap()->old_space(),
5221                                     RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
5222   CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
5223                                     RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
5224   if (heap()->map_space()) {
5225     CollectRememberedSetUpdatingItems(
5226         &updating_items, heap()->map_space(),
5227         RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
5228   }
5229   CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
5230                                     RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
5231   CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
5232                                     RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
5233 
5234   {
5235     TRACE_GC(heap()->tracer(),
5236              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
5237     heap()->IterateRoots(&updating_visitor,
5238                          base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
5239                                                  SkipRoot::kOldGeneration});
5240   }
5241   {
5242     TRACE_GC(heap()->tracer(),
5243              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
5244     V8::GetCurrentPlatform()
5245         ->PostJob(
5246             v8::TaskPriority::kUserBlocking,
5247             std::make_unique<PointersUpdatingJob>(
5248                 isolate(), std::move(updating_items),
5249                 GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
5250                 GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
5251         ->Join();
5252   }
5253 
5254   {
5255     TRACE_GC(heap()->tracer(),
5256              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
5257 
5258     EvacuationWeakObjectRetainer evacuation_object_retainer;
5259     heap()->ProcessWeakListRoots(&evacuation_object_retainer);
5260 
5261     // Update pointers from external string table.
5262     heap()->UpdateYoungReferencesInExternalStringTable(
5263         &UpdateReferenceInExternalStringTableEntry);
5264   }
5265 }
5266 
5267 class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
5268  public:
RootMarkingVisitor(MinorMarkCompactCollector * collector)5269   explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
5270       : collector_(collector) {}
5271 
VisitRootPointer(Root root,const char * description,FullObjectSlot p)5272   void VisitRootPointer(Root root, const char* description,
5273                         FullObjectSlot p) final {
5274     MarkObjectByPointer(p);
5275   }
5276 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)5277   void VisitRootPointers(Root root, const char* description,
5278                          FullObjectSlot start, FullObjectSlot end) final {
5279     for (FullObjectSlot p = start; p < end; ++p) {
5280       DCHECK(!MapWord::IsPacked((*p).ptr()));
5281       MarkObjectByPointer(p);
5282     }
5283   }
5284 
5285  private:
MarkObjectByPointer(FullObjectSlot p)5286   V8_INLINE void MarkObjectByPointer(FullObjectSlot p) {
5287     if (!(*p).IsHeapObject()) return;
5288     collector_->MarkRootObject(HeapObject::cast(*p));
5289   }
5290   MinorMarkCompactCollector* const collector_;
5291 };
5292 
CollectGarbage()5293 void MinorMarkCompactCollector::CollectGarbage() {
5294   // Minor MC does not support processing the ephemeron remembered set.
5295   DCHECK(heap()->ephemeron_remembered_set_.empty());
5296 
5297   {
5298     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
5299     heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
5300   }
5301 
5302   heap()->array_buffer_sweeper()->EnsureFinished();
5303 
5304   MarkLiveObjects();
5305   ClearNonLiveReferences();
5306 #ifdef VERIFY_HEAP
5307   if (FLAG_verify_heap) {
5308     YoungGenerationMarkingVerifier verifier(heap());
5309     verifier.Run();
5310   }
5311 #endif  // VERIFY_HEAP
5312 
5313   Evacuate();
5314 #ifdef VERIFY_HEAP
5315   if (FLAG_verify_heap) {
5316     YoungGenerationEvacuationVerifier verifier(heap());
5317     verifier.Run();
5318   }
5319 #endif  // VERIFY_HEAP
5320 
5321   {
5322     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
5323     heap()->incremental_marking()->UpdateMarkingWorklistAfterYoungGenGC();
5324   }
5325 
5326   {
5327     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
5328     for (Page* p :
5329          PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
5330       DCHECK_EQ(promoted_pages_.end(),
5331                 std::find(promoted_pages_.begin(), promoted_pages_.end(), p));
5332       non_atomic_marking_state()->ClearLiveness(p);
5333       if (FLAG_concurrent_marking) {
5334         // Ensure that concurrent marker does not track pages that are
5335         // going to be unmapped.
5336         heap()->concurrent_marking()->ClearMemoryChunkData(p);
5337       }
5338     }
5339     // Since we promote all surviving large objects immediatelly, all remaining
5340     // large objects must be dead.
5341     // TODO(v8:11685): Don't free all as soon as we have an intermediate
5342     // generation.
5343     heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
5344   }
5345 
5346   CleanupPromotedPages();
5347 
5348   SweepArrayBufferExtensions();
5349 }
5350 
MakeIterable(Page * p,FreeSpaceTreatmentMode free_space_mode)5351 void MinorMarkCompactCollector::MakeIterable(
5352     Page* p, FreeSpaceTreatmentMode free_space_mode) {
5353   CHECK(!p->IsLargePage());
5354   // We have to clear the full collectors markbits for the areas that we
5355   // remove here.
5356   MarkCompactCollector* full_collector = heap()->mark_compact_collector();
5357   Address free_start = p->area_start();
5358 
5359   for (auto object_and_size :
5360        LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
5361     HeapObject const object = object_and_size.first;
5362     DCHECK(non_atomic_marking_state()->IsGrey(object));
5363     Address free_end = object.address();
5364     if (free_end != free_start) {
5365       CHECK_GT(free_end, free_start);
5366       size_t size = static_cast<size_t>(free_end - free_start);
5367       full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
5368           p->AddressToMarkbitIndex(free_start),
5369           p->AddressToMarkbitIndex(free_end));
5370       if (free_space_mode == ZAP_FREE_SPACE) {
5371         ZapCode(free_start, size);
5372       }
5373       p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
5374                                       ClearRecordedSlots::kNo);
5375     }
5376     PtrComprCageBase cage_base(p->heap()->isolate());
5377     Map map = object.map(cage_base, kAcquireLoad);
5378     int size = object.SizeFromMap(map);
5379     free_start = free_end + size;
5380   }
5381 
5382   if (free_start != p->area_end()) {
5383     CHECK_GT(p->area_end(), free_start);
5384     size_t size = static_cast<size_t>(p->area_end() - free_start);
5385     full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
5386         p->AddressToMarkbitIndex(free_start),
5387         p->AddressToMarkbitIndex(p->area_end()));
5388     if (free_space_mode == ZAP_FREE_SPACE) {
5389       ZapCode(free_start, size);
5390     }
5391     p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
5392                                     ClearRecordedSlots::kNo);
5393   }
5394 }
5395 
5396 namespace {
5397 
5398 // Helper class for pruning the string table.
5399 class YoungGenerationExternalStringTableCleaner : public RootVisitor {
5400  public:
YoungGenerationExternalStringTableCleaner(MinorMarkCompactCollector * collector)5401   YoungGenerationExternalStringTableCleaner(
5402       MinorMarkCompactCollector* collector)
5403       : heap_(collector->heap()),
5404         marking_state_(collector->non_atomic_marking_state()) {}
5405 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)5406   void VisitRootPointers(Root root, const char* description,
5407                          FullObjectSlot start, FullObjectSlot end) override {
5408     DCHECK_EQ(static_cast<int>(root),
5409               static_cast<int>(Root::kExternalStringsTable));
5410     // Visit all HeapObject pointers in [start, end).
5411     for (FullObjectSlot p = start; p < end; ++p) {
5412       Object o = *p;
5413       if (o.IsHeapObject()) {
5414         HeapObject heap_object = HeapObject::cast(o);
5415         if (marking_state_->IsWhite(heap_object)) {
5416           if (o.IsExternalString()) {
5417             heap_->FinalizeExternalString(String::cast(*p));
5418           } else {
5419             // The original external string may have been internalized.
5420             DCHECK(o.IsThinString());
5421           }
5422           // Set the entry to the_hole_value (as deleted).
5423           p.store(ReadOnlyRoots(heap_).the_hole_value());
5424         }
5425       }
5426     }
5427   }
5428 
5429  private:
5430   Heap* heap_;
5431   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
5432 };
5433 
5434 // Marked young generation objects and all old generation objects will be
5435 // retained.
5436 class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
5437  public:
MinorMarkCompactWeakObjectRetainer(MinorMarkCompactCollector * collector)5438   explicit MinorMarkCompactWeakObjectRetainer(
5439       MinorMarkCompactCollector* collector)
5440       : marking_state_(collector->non_atomic_marking_state()) {}
5441 
RetainAs(Object object)5442   Object RetainAs(Object object) override {
5443     HeapObject heap_object = HeapObject::cast(object);
5444     if (!Heap::InYoungGeneration(heap_object)) return object;
5445 
5446     // Young generation marking only marks to grey instead of black.
5447     DCHECK(!marking_state_->IsBlack(heap_object));
5448     if (marking_state_->IsGrey(heap_object)) {
5449       return object;
5450     }
5451     return Object();
5452   }
5453 
5454  private:
5455   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
5456 };
5457 
5458 }  // namespace
5459 
ClearNonLiveReferences()5460 void MinorMarkCompactCollector::ClearNonLiveReferences() {
5461   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
5462 
5463   {
5464     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
5465     // Internalized strings are always stored in old space, so there is no need
5466     // to clean them here.
5467     YoungGenerationExternalStringTableCleaner external_visitor(this);
5468     heap()->external_string_table_.IterateYoung(&external_visitor);
5469     heap()->external_string_table_.CleanUpYoung();
5470   }
5471 
5472   {
5473     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
5474     // Process the weak references.
5475     MinorMarkCompactWeakObjectRetainer retainer(this);
5476     heap()->ProcessYoungWeakReferences(&retainer);
5477   }
5478 }
5479 
EvacuatePrologue()5480 void MinorMarkCompactCollector::EvacuatePrologue() {
5481   NewSpace* new_space = heap()->new_space();
5482   // Append the list of new space pages to be processed.
5483   for (Page* p :
5484        PageRange(new_space->first_allocatable_address(), new_space->top())) {
5485     new_space_evacuation_pages_.push_back(p);
5486   }
5487 
5488   new_space->Flip();
5489   new_space->ResetLinearAllocationArea();
5490 
5491   heap()->new_lo_space()->Flip();
5492   heap()->new_lo_space()->ResetPendingObject();
5493 }
5494 
EvacuateEpilogue()5495 void MinorMarkCompactCollector::EvacuateEpilogue() {
5496   heap()->new_space()->set_age_mark(heap()->new_space()->top());
5497   // Give pages that are queued to be freed back to the OS.
5498   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
5499 }
5500 
CollectToSpaceUpdatingItems(std::vector<std::unique_ptr<UpdatingItem>> * items)5501 int MinorMarkCompactCollector::CollectToSpaceUpdatingItems(
5502     std::vector<std::unique_ptr<UpdatingItem>>* items) {
5503   // Seed to space pages.
5504   const Address space_start = heap()->new_space()->first_allocatable_address();
5505   const Address space_end = heap()->new_space()->top();
5506   int pages = 0;
5507   for (Page* page : PageRange(space_start, space_end)) {
5508     Address start =
5509         page->Contains(space_start) ? space_start : page->area_start();
5510     Address end = page->Contains(space_end) ? space_end : page->area_end();
5511     items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
5512     pages++;
5513   }
5514   return pages;
5515 }
5516 
5517 std::unique_ptr<UpdatingItem>
CreateToSpaceUpdatingItem(MemoryChunk * chunk,Address start,Address end)5518 MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
5519                                                      Address start,
5520                                                      Address end) {
5521   return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
5522       heap(), chunk, start, end, non_atomic_marking_state());
5523 }
5524 
5525 std::unique_ptr<UpdatingItem>
CreateRememberedSetUpdatingItem(MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)5526 MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
5527     MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
5528   return std::make_unique<RememberedSetUpdatingItem<
5529       NonAtomicMarkingState, GarbageCollector::MINOR_MARK_COMPACTOR>>(
5530       heap(), non_atomic_marking_state(), chunk, updating_mode);
5531 }
5532 
5533 class PageMarkingItem;
5534 class RootMarkingItem;
5535 class YoungGenerationMarkingTask;
5536 
5537 class YoungGenerationMarkingTask {
5538  public:
YoungGenerationMarkingTask(Isolate * isolate,MinorMarkCompactCollector * collector,MinorMarkCompactCollector::MarkingWorklist * global_worklist)5539   YoungGenerationMarkingTask(
5540       Isolate* isolate, MinorMarkCompactCollector* collector,
5541       MinorMarkCompactCollector::MarkingWorklist* global_worklist)
5542       : marking_worklist_local_(global_worklist),
5543         marking_state_(collector->marking_state()),
5544         visitor_(isolate, marking_state_, &marking_worklist_local_) {
5545     local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
5546                               Page::kPageSize);
5547   }
5548 
MarkObject(Object object)5549   void MarkObject(Object object) {
5550     if (!Heap::InYoungGeneration(object)) return;
5551     HeapObject heap_object = HeapObject::cast(object);
5552     if (marking_state_->WhiteToGrey(heap_object)) {
5553       const int size = visitor_.Visit(heap_object);
5554       IncrementLiveBytes(heap_object, size);
5555     }
5556   }
5557 
EmptyMarkingWorklist()5558   void EmptyMarkingWorklist() {
5559     HeapObject object;
5560     while (marking_worklist_local_.Pop(&object)) {
5561       const int size = visitor_.Visit(object);
5562       IncrementLiveBytes(object, size);
5563     }
5564   }
5565 
IncrementLiveBytes(HeapObject object,intptr_t bytes)5566   void IncrementLiveBytes(HeapObject object, intptr_t bytes) {
5567     local_live_bytes_[Page::FromHeapObject(object)] += bytes;
5568   }
5569 
FlushLiveBytes()5570   void FlushLiveBytes() {
5571     for (auto pair : local_live_bytes_) {
5572       marking_state_->IncrementLiveBytes(pair.first, pair.second);
5573     }
5574   }
5575 
5576  private:
5577   MinorMarkCompactCollector::MarkingWorklist::Local marking_worklist_local_;
5578   MinorMarkCompactCollector::MarkingState* marking_state_;
5579   YoungGenerationMarkingVisitor visitor_;
5580   std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
5581 };
5582 
5583 class PageMarkingItem : public ParallelWorkItem {
5584  public:
PageMarkingItem(MemoryChunk * chunk)5585   explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
5586   ~PageMarkingItem() = default;
5587 
Process(YoungGenerationMarkingTask * task)5588   void Process(YoungGenerationMarkingTask* task) {
5589     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
5590                  "PageMarkingItem::Process");
5591     base::MutexGuard guard(chunk_->mutex());
5592     MarkUntypedPointers(task);
5593     MarkTypedPointers(task);
5594   }
5595 
5596  private:
heap()5597   inline Heap* heap() { return chunk_->heap(); }
5598 
MarkUntypedPointers(YoungGenerationMarkingTask * task)5599   void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
5600     InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
5601     RememberedSet<OLD_TO_NEW>::Iterate(
5602         chunk_,
5603         [this, task, &filter](MaybeObjectSlot slot) {
5604           if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
5605           return CheckAndMarkObject(task, slot);
5606         },
5607         SlotSet::FREE_EMPTY_BUCKETS);
5608   }
5609 
MarkTypedPointers(YoungGenerationMarkingTask * task)5610   void MarkTypedPointers(YoungGenerationMarkingTask* task) {
5611     RememberedSet<OLD_TO_NEW>::IterateTyped(
5612         chunk_, [=](SlotType slot_type, Address slot) {
5613           return UpdateTypedSlotHelper::UpdateTypedSlot(
5614               heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
5615                 return CheckAndMarkObject(task, slot);
5616               });
5617         });
5618   }
5619 
5620   template <typename TSlot>
5621   V8_INLINE SlotCallbackResult
CheckAndMarkObject(YoungGenerationMarkingTask * task,TSlot slot)5622   CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot) {
5623     static_assert(
5624         std::is_same<TSlot, FullMaybeObjectSlot>::value ||
5625             std::is_same<TSlot, MaybeObjectSlot>::value,
5626         "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
5627     MaybeObject object = *slot;
5628     if (Heap::InYoungGeneration(object)) {
5629       // Marking happens before flipping the young generation, so the object
5630       // has to be in a to page.
5631       DCHECK(Heap::InToPage(object));
5632       HeapObject heap_object;
5633       bool success = object.GetHeapObject(&heap_object);
5634       USE(success);
5635       DCHECK(success);
5636       task->MarkObject(heap_object);
5637       return KEEP_SLOT;
5638     }
5639     return REMOVE_SLOT;
5640   }
5641 
5642   MemoryChunk* chunk_;
5643 };
5644 
5645 class YoungGenerationMarkingJob : public v8::JobTask {
5646  public:
YoungGenerationMarkingJob(Isolate * isolate,MinorMarkCompactCollector * collector,MinorMarkCompactCollector::MarkingWorklist * global_worklist,std::vector<PageMarkingItem> marking_items)5647   YoungGenerationMarkingJob(
5648       Isolate* isolate, MinorMarkCompactCollector* collector,
5649       MinorMarkCompactCollector::MarkingWorklist* global_worklist,
5650       std::vector<PageMarkingItem> marking_items)
5651       : isolate_(isolate),
5652         collector_(collector),
5653         global_worklist_(global_worklist),
5654         marking_items_(std::move(marking_items)),
5655         remaining_marking_items_(marking_items_.size()),
5656         generator_(marking_items_.size()) {}
5657 
Run(JobDelegate * delegate)5658   void Run(JobDelegate* delegate) override {
5659     if (delegate->IsJoiningThread()) {
5660       TRACE_GC(collector_->heap()->tracer(),
5661                GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
5662       ProcessItems(delegate);
5663     } else {
5664       TRACE_GC_EPOCH(collector_->heap()->tracer(),
5665                      GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
5666                      ThreadKind::kBackground);
5667       ProcessItems(delegate);
5668     }
5669   }
5670 
GetMaxConcurrency(size_t worker_count) const5671   size_t GetMaxConcurrency(size_t worker_count) const override {
5672     // Pages are not private to markers but we can still use them to estimate
5673     // the amount of marking that is required.
5674     const int kPagesPerTask = 2;
5675     size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
5676     size_t num_tasks =
5677         std::max((items + 1) / kPagesPerTask, global_worklist_->Size());
5678     if (!FLAG_parallel_marking) {
5679       num_tasks = std::min<size_t>(1, num_tasks);
5680     }
5681     return std::min<size_t>(num_tasks,
5682                             MinorMarkCompactCollector::kMaxParallelTasks);
5683   }
5684 
5685  private:
ProcessItems(JobDelegate * delegate)5686   void ProcessItems(JobDelegate* delegate) {
5687     double marking_time = 0.0;
5688     {
5689       TimedScope scope(&marking_time);
5690       YoungGenerationMarkingTask task(isolate_, collector_, global_worklist_);
5691       ProcessMarkingItems(&task);
5692       task.EmptyMarkingWorklist();
5693       task.FlushLiveBytes();
5694     }
5695     if (FLAG_trace_minor_mc_parallel_marking) {
5696       PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
5697                    static_cast<void*>(this), marking_time);
5698     }
5699   }
5700 
ProcessMarkingItems(YoungGenerationMarkingTask * task)5701   void ProcessMarkingItems(YoungGenerationMarkingTask* task) {
5702     while (remaining_marking_items_.load(std::memory_order_relaxed) > 0) {
5703       base::Optional<size_t> index = generator_.GetNext();
5704       if (!index) return;
5705       for (size_t i = *index; i < marking_items_.size(); ++i) {
5706         auto& work_item = marking_items_[i];
5707         if (!work_item.TryAcquire()) break;
5708         work_item.Process(task);
5709         task->EmptyMarkingWorklist();
5710         if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <=
5711             1) {
5712           return;
5713         }
5714       }
5715     }
5716   }
5717 
5718   Isolate* isolate_;
5719   MinorMarkCompactCollector* collector_;
5720   MinorMarkCompactCollector::MarkingWorklist* global_worklist_;
5721   std::vector<PageMarkingItem> marking_items_;
5722   std::atomic_size_t remaining_marking_items_{0};
5723   IndexGenerator generator_;
5724 };
5725 
MarkRootSetInParallel(RootMarkingVisitor * root_visitor)5726 void MinorMarkCompactCollector::MarkRootSetInParallel(
5727     RootMarkingVisitor* root_visitor) {
5728   {
5729     std::vector<PageMarkingItem> marking_items;
5730 
5731     // Seed the root set (roots + old->new set).
5732     {
5733       TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
5734       isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
5735           &JSObject::IsUnmodifiedApiObject);
5736       // MinorMC treats all weak roots except for global handles as strong.
5737       // That is why we don't set skip_weak = true here and instead visit
5738       // global handles separately.
5739       heap()->IterateRoots(
5740           root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
5741                                                 SkipRoot::kGlobalHandles,
5742                                                 SkipRoot::kOldGeneration});
5743       isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
5744           root_visitor);
5745       // Create items for each page.
5746       RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
5747           heap(), [&marking_items](MemoryChunk* chunk) {
5748             marking_items.emplace_back(chunk);
5749           });
5750     }
5751 
5752     // Add tasks and run in parallel.
5753     {
5754       // The main thread might hold local items, while GlobalPoolSize() == 0.
5755       // Flush to ensure these items are visible globally and picked up by the
5756       // job.
5757       main_thread_worklist_local_.Publish();
5758       TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
5759       V8::GetCurrentPlatform()
5760           ->PostJob(v8::TaskPriority::kUserBlocking,
5761                     std::make_unique<YoungGenerationMarkingJob>(
5762                         isolate(), this, worklist(), std::move(marking_items)))
5763           ->Join();
5764 
5765       DCHECK(worklist()->IsEmpty());
5766       DCHECK(main_thread_worklist_local_.IsLocalEmpty());
5767     }
5768   }
5769 }
5770 
MarkLiveObjects()5771 void MinorMarkCompactCollector::MarkLiveObjects() {
5772   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
5773 
5774   PostponeInterruptsScope postpone(isolate());
5775 
5776   RootMarkingVisitor root_visitor(this);
5777 
5778   MarkRootSetInParallel(&root_visitor);
5779 
5780   // Mark rest on the main thread.
5781   {
5782     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
5783     DrainMarkingWorklist();
5784   }
5785 
5786   {
5787     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
5788     isolate()->global_handles()->MarkYoungWeakDeadObjectsPending(
5789         &IsUnmarkedObjectForYoungGeneration);
5790     isolate()->global_handles()->IterateYoungWeakDeadObjectsForFinalizers(
5791         &root_visitor);
5792     isolate()->global_handles()->IterateYoungWeakObjectsForPhantomHandles(
5793         &root_visitor, &IsUnmarkedObjectForYoungGeneration);
5794     DrainMarkingWorklist();
5795   }
5796 
5797   if (FLAG_minor_mc_trace_fragmentation) {
5798     TraceFragmentation();
5799   }
5800 }
5801 
DrainMarkingWorklist()5802 void MinorMarkCompactCollector::DrainMarkingWorklist() {
5803   PtrComprCageBase cage_base(isolate());
5804   HeapObject object;
5805   while (main_thread_worklist_local_.Pop(&object)) {
5806     DCHECK(!object.IsFreeSpaceOrFiller(cage_base));
5807     DCHECK(object.IsHeapObject());
5808     DCHECK(heap()->Contains(object));
5809     DCHECK(non_atomic_marking_state()->IsGrey(object));
5810     main_marking_visitor()->Visit(object);
5811   }
5812   DCHECK(main_thread_worklist_local_.IsLocalEmpty());
5813 }
5814 
TraceFragmentation()5815 void MinorMarkCompactCollector::TraceFragmentation() {
5816   NewSpace* new_space = heap()->new_space();
5817   PtrComprCageBase cage_base(isolate());
5818   const std::array<size_t, 4> free_size_class_limits = {0, 1024, 2048, 4096};
5819   size_t free_bytes_of_class[free_size_class_limits.size()] = {0};
5820   size_t live_bytes = 0;
5821   size_t allocatable_bytes = 0;
5822   for (Page* p :
5823        PageRange(new_space->first_allocatable_address(), new_space->top())) {
5824     Address free_start = p->area_start();
5825     for (auto object_and_size : LiveObjectRange<kGreyObjects>(
5826              p, non_atomic_marking_state()->bitmap(p))) {
5827       HeapObject const object = object_and_size.first;
5828       Address free_end = object.address();
5829       if (free_end != free_start) {
5830         size_t free_bytes = free_end - free_start;
5831         int free_bytes_index = 0;
5832         for (auto free_size_class_limit : free_size_class_limits) {
5833           if (free_bytes >= free_size_class_limit) {
5834             free_bytes_of_class[free_bytes_index] += free_bytes;
5835           }
5836           free_bytes_index++;
5837         }
5838       }
5839       Map map = object.map(cage_base, kAcquireLoad);
5840       int size = object.SizeFromMap(map);
5841       live_bytes += size;
5842       free_start = free_end + size;
5843     }
5844     size_t area_end =
5845         p->Contains(new_space->top()) ? new_space->top() : p->area_end();
5846     if (free_start != area_end) {
5847       size_t free_bytes = area_end - free_start;
5848       int free_bytes_index = 0;
5849       for (auto free_size_class_limit : free_size_class_limits) {
5850         if (free_bytes >= free_size_class_limit) {
5851           free_bytes_of_class[free_bytes_index] += free_bytes;
5852         }
5853         free_bytes_index++;
5854       }
5855     }
5856     allocatable_bytes += area_end - p->area_start();
5857     CHECK_EQ(allocatable_bytes, live_bytes + free_bytes_of_class[0]);
5858   }
5859   PrintIsolate(
5860       isolate(),
5861       "Minor Mark-Compact Fragmentation: allocatable_bytes=%zu live_bytes=%zu "
5862       "free_bytes=%zu free_bytes_1K=%zu free_bytes_2K=%zu free_bytes_4K=%zu\n",
5863       allocatable_bytes, live_bytes, free_bytes_of_class[0],
5864       free_bytes_of_class[1], free_bytes_of_class[2], free_bytes_of_class[3]);
5865 }
5866 
Evacuate()5867 void MinorMarkCompactCollector::Evacuate() {
5868   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
5869   base::MutexGuard guard(heap()->relocation_mutex());
5870 
5871   {
5872     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
5873     EvacuatePrologue();
5874   }
5875 
5876   {
5877     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
5878     EvacuatePagesInParallel();
5879   }
5880 
5881   if (!FLAG_minor_mc_sweeping) UpdatePointersAfterEvacuation();
5882 
5883   {
5884     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
5885     if (!heap()->new_space()->Rebalance()) {
5886       heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
5887     }
5888   }
5889 
5890   {
5891     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
5892     for (Page* p : new_space_evacuation_pages_) {
5893       if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
5894           p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
5895         promoted_pages_.push_back(p);
5896       }
5897     }
5898     new_space_evacuation_pages_.clear();
5899   }
5900 
5901   {
5902     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
5903     EvacuateEpilogue();
5904   }
5905 }
5906 
5907 namespace {
5908 
5909 class YoungGenerationEvacuator : public Evacuator {
5910  public:
YoungGenerationEvacuator(MinorMarkCompactCollector * collector)5911   explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
5912       : Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
5913                   AlwaysPromoteYoung::kNo),
5914         record_visitor_(collector->heap()->mark_compact_collector()),
5915         local_allocator_(
5916             heap_, CompactionSpaceKind::kCompactionSpaceForMinorMarkCompact),
5917         collector_(collector) {}
5918 
GetBackgroundTracingScope()5919   GCTracer::Scope::ScopeId GetBackgroundTracingScope() override {
5920     return GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
5921   }
5922 
GetTracingScope()5923   GCTracer::Scope::ScopeId GetTracingScope() override {
5924     return GCTracer::Scope::MINOR_MC_EVACUATE_COPY_PARALLEL;
5925   }
5926 
5927  protected:
5928   void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
5929 
5930   YoungGenerationRecordMigratedSlotVisitor record_visitor_;
5931   EvacuationAllocator local_allocator_;
5932   MinorMarkCompactCollector* collector_;
5933 };
5934 
RawEvacuatePage(MemoryChunk * chunk,intptr_t * live_bytes)5935 void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
5936                                                intptr_t* live_bytes) {
5937   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
5938                "YoungGenerationEvacuator::RawEvacuatePage");
5939   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
5940       collector_->non_atomic_marking_state();
5941   *live_bytes = marking_state->live_bytes(chunk);
5942   switch (ComputeEvacuationMode(chunk)) {
5943     case kObjectsNewToOld:
5944       DCHECK(!FLAG_minor_mc_sweeping);
5945       LiveObjectVisitor::VisitGreyObjectsNoFail(
5946           chunk, marking_state, &new_space_visitor_,
5947           LiveObjectVisitor::kClearMarkbits);
5948       break;
5949     case kPageNewToOld:
5950       LiveObjectVisitor::VisitGreyObjectsNoFail(
5951           chunk, marking_state, &new_to_old_page_visitor_,
5952           LiveObjectVisitor::kKeepMarking);
5953       new_to_old_page_visitor_.account_moved_bytes(
5954           marking_state->live_bytes(chunk));
5955       if (!chunk->IsLargePage()) {
5956         if (heap()->ShouldZapGarbage()) {
5957           collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
5958         } else if (heap()->incremental_marking()->IsMarking()) {
5959           // When incremental marking is on, we need to clear the mark bits of
5960           // the full collector. We cannot yet discard the young generation mark
5961           // bits as they are still relevant for pointers updating.
5962           collector_->MakeIterable(static_cast<Page*>(chunk),
5963                                    IGNORE_FREE_SPACE);
5964         }
5965       }
5966       break;
5967     case kPageNewToNew:
5968       LiveObjectVisitor::VisitGreyObjectsNoFail(
5969           chunk, marking_state, &new_to_new_page_visitor_,
5970           LiveObjectVisitor::kKeepMarking);
5971       new_to_new_page_visitor_.account_moved_bytes(
5972           marking_state->live_bytes(chunk));
5973       DCHECK(!chunk->IsLargePage());
5974       if (heap()->ShouldZapGarbage()) {
5975         collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
5976       } else if (heap()->incremental_marking()->IsMarking()) {
5977         // When incremental marking is on, we need to clear the mark bits of
5978         // the full collector. We cannot yet discard the young generation mark
5979         // bits as they are still relevant for pointers updating.
5980         collector_->MakeIterable(static_cast<Page*>(chunk), IGNORE_FREE_SPACE);
5981       }
5982       break;
5983     case kObjectsOldToOld:
5984       UNREACHABLE();
5985   }
5986 }
5987 
5988 }  // namespace
5989 
EvacuatePagesInParallel()5990 void MinorMarkCompactCollector::EvacuatePagesInParallel() {
5991   std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
5992   intptr_t live_bytes = 0;
5993 
5994   for (Page* page : new_space_evacuation_pages_) {
5995     intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
5996     if (live_bytes_on_page == 0) continue;
5997     live_bytes += live_bytes_on_page;
5998     if (FLAG_minor_mc_sweeping ||
5999         ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
6000       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
6001         EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
6002       } else {
6003         EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
6004       }
6005     }
6006     evacuation_items.emplace_back(ParallelWorkItem{}, page);
6007   }
6008 
6009   // Promote young generation large objects.
6010   for (auto it = heap()->new_lo_space()->begin();
6011        it != heap()->new_lo_space()->end();) {
6012     LargePage* current = *it;
6013     it++;
6014     HeapObject object = current->GetObject();
6015     DCHECK(!non_atomic_marking_state_.IsBlack(object));
6016     if (non_atomic_marking_state_.IsGrey(object)) {
6017       heap_->lo_space()->PromoteNewLargeObject(current);
6018       current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
6019       promoted_large_pages_.push_back(current);
6020       evacuation_items.emplace_back(ParallelWorkItem{}, current);
6021     }
6022   }
6023   if (evacuation_items.empty()) return;
6024 
6025   YoungGenerationMigrationObserver observer(heap(),
6026                                             heap()->mark_compact_collector());
6027   const auto pages_count = evacuation_items.size();
6028   const auto wanted_num_tasks =
6029       CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
6030           this, std::move(evacuation_items), &observer);
6031 
6032   if (FLAG_trace_evacuation) {
6033     TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes, 0);
6034   }
6035 }
6036 
6037 }  // namespace internal
6038 }  // namespace v8
6039