• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/mark-compact.h"
6 
7 #include <unordered_map>
8 
9 #include "src/base/utils/random-number-generator.h"
10 #include "src/cancelable-task.h"
11 #include "src/code-stubs.h"
12 #include "src/compilation-cache.h"
13 #include "src/deoptimizer.h"
14 #include "src/execution.h"
15 #include "src/frames-inl.h"
16 #include "src/global-handles.h"
17 #include "src/heap/array-buffer-collector.h"
18 #include "src/heap/array-buffer-tracker-inl.h"
19 #include "src/heap/gc-tracer.h"
20 #include "src/heap/incremental-marking.h"
21 #include "src/heap/invalidated-slots-inl.h"
22 #include "src/heap/item-parallel-job.h"
23 #include "src/heap/local-allocator-inl.h"
24 #include "src/heap/mark-compact-inl.h"
25 #include "src/heap/object-stats.h"
26 #include "src/heap/objects-visiting-inl.h"
27 #include "src/heap/spaces-inl.h"
28 #include "src/heap/sweeper.h"
29 #include "src/heap/worklist.h"
30 #include "src/ic/stub-cache.h"
31 #include "src/objects/hash-table-inl.h"
32 #include "src/transitions-inl.h"
33 #include "src/utils-inl.h"
34 #include "src/v8.h"
35 #include "src/vm-state-inl.h"
36 
37 namespace v8 {
38 namespace internal {
39 
40 const char* Marking::kWhiteBitPattern = "00";
41 const char* Marking::kBlackBitPattern = "11";
42 const char* Marking::kGreyBitPattern = "10";
43 const char* Marking::kImpossibleBitPattern = "01";
44 
45 // The following has to hold in order for {MarkingState::MarkBitFrom} to not
46 // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
47 STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
48 
49 // =============================================================================
50 // Verifiers
51 // =============================================================================
52 
53 #ifdef VERIFY_HEAP
54 namespace {
55 
56 class MarkingVerifier : public ObjectVisitor, public RootVisitor {
57  public:
58   virtual void Run() = 0;
59 
60  protected:
MarkingVerifier(Heap * heap)61   explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
62 
63   virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
64 
65   virtual void VerifyPointers(Object** start, Object** end) = 0;
66   virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
67 
68   virtual bool IsMarked(HeapObject* object) = 0;
69 
70   virtual bool IsBlackOrGrey(HeapObject* object) = 0;
71 
VisitPointers(HeapObject * host,Object ** start,Object ** end)72   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
73     VerifyPointers(start, end);
74   }
75 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)76   void VisitPointers(HeapObject* host, MaybeObject** start,
77                      MaybeObject** end) override {
78     VerifyPointers(start, end);
79   }
80 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)81   void VisitRootPointers(Root root, const char* description, Object** start,
82                          Object** end) override {
83     VerifyPointers(start, end);
84   }
85 
86   void VerifyRoots(VisitMode mode);
87   void VerifyMarkingOnPage(const Page* page, Address start, Address end);
88   void VerifyMarking(NewSpace* new_space);
89   void VerifyMarking(PagedSpace* paged_space);
90 
91   Heap* heap_;
92 };
93 
VerifyRoots(VisitMode mode)94 void MarkingVerifier::VerifyRoots(VisitMode mode) {
95   heap_->IterateStrongRoots(this, mode);
96 }
97 
VerifyMarkingOnPage(const Page * page,Address start,Address end)98 void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
99                                           Address end) {
100   HeapObject* object;
101   Address next_object_must_be_here_or_later = start;
102   for (Address current = start; current < end;) {
103     object = HeapObject::FromAddress(current);
104     // One word fillers at the end of a black area can be grey.
105     if (IsBlackOrGrey(object) &&
106         object->map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
107       CHECK(IsMarked(object));
108       CHECK(current >= next_object_must_be_here_or_later);
109       object->Iterate(this);
110       next_object_must_be_here_or_later = current + object->Size();
111       // The object is either part of a black area of black allocation or a
112       // regular black object
113       CHECK(
114           bitmap(page)->AllBitsSetInRange(
115               page->AddressToMarkbitIndex(current),
116               page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
117           bitmap(page)->AllBitsClearInRange(
118               page->AddressToMarkbitIndex(current + kPointerSize * 2),
119               page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
120       current = next_object_must_be_here_or_later;
121     } else {
122       current += kPointerSize;
123     }
124   }
125 }
126 
VerifyMarking(NewSpace * space)127 void MarkingVerifier::VerifyMarking(NewSpace* space) {
128   Address end = space->top();
129   // The bottom position is at the start of its page. Allows us to use
130   // page->area_start() as start of range on all pages.
131   CHECK_EQ(space->first_allocatable_address(),
132            space->first_page()->area_start());
133 
134   PageRange range(space->first_allocatable_address(), end);
135   for (auto it = range.begin(); it != range.end();) {
136     Page* page = *(it++);
137     Address limit = it != range.end() ? page->area_end() : end;
138     CHECK(limit == end || !page->Contains(end));
139     VerifyMarkingOnPage(page, page->area_start(), limit);
140   }
141 }
142 
VerifyMarking(PagedSpace * space)143 void MarkingVerifier::VerifyMarking(PagedSpace* space) {
144   for (Page* p : *space) {
145     VerifyMarkingOnPage(p, p->area_start(), p->area_end());
146   }
147 }
148 
149 class FullMarkingVerifier : public MarkingVerifier {
150  public:
FullMarkingVerifier(Heap * heap)151   explicit FullMarkingVerifier(Heap* heap)
152       : MarkingVerifier(heap),
153         marking_state_(
154             heap->mark_compact_collector()->non_atomic_marking_state()) {}
155 
Run()156   void Run() override {
157     VerifyRoots(VISIT_ONLY_STRONG);
158     VerifyMarking(heap_->new_space());
159     VerifyMarking(heap_->old_space());
160     VerifyMarking(heap_->code_space());
161     VerifyMarking(heap_->map_space());
162 
163     LargeObjectIterator it(heap_->lo_space());
164     for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
165       if (marking_state_->IsBlackOrGrey(obj)) {
166         obj->Iterate(this);
167       }
168     }
169   }
170 
171  protected:
bitmap(const MemoryChunk * chunk)172   Bitmap* bitmap(const MemoryChunk* chunk) override {
173     return marking_state_->bitmap(chunk);
174   }
175 
IsMarked(HeapObject * object)176   bool IsMarked(HeapObject* object) override {
177     return marking_state_->IsBlack(object);
178   }
179 
IsBlackOrGrey(HeapObject * object)180   bool IsBlackOrGrey(HeapObject* object) override {
181     return marking_state_->IsBlackOrGrey(object);
182   }
183 
VerifyPointers(Object ** start,Object ** end)184   void VerifyPointers(Object** start, Object** end) override {
185     for (Object** current = start; current < end; current++) {
186       if ((*current)->IsHeapObject()) {
187         HeapObject* object = HeapObject::cast(*current);
188         CHECK(marking_state_->IsBlackOrGrey(object));
189       }
190     }
191   }
192 
VerifyPointers(MaybeObject ** start,MaybeObject ** end)193   void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
194     for (MaybeObject** current = start; current < end; current++) {
195       HeapObject* object;
196       if ((*current)->ToStrongHeapObject(&object)) {
197         CHECK(marking_state_->IsBlackOrGrey(object));
198       }
199     }
200   }
201 
VisitEmbeddedPointer(Code * host,RelocInfo * rinfo)202   void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
203     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
204     if (!host->IsWeakObject(rinfo->target_object())) {
205       Object* p = rinfo->target_object();
206       VisitPointer(host, &p);
207     }
208   }
209 
210  private:
211   MarkCompactCollector::NonAtomicMarkingState* marking_state_;
212 };
213 
214 class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
215  public:
216   virtual void Run() = 0;
217 
VisitPointers(HeapObject * host,Object ** start,Object ** end)218   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
219     VerifyPointers(start, end);
220   }
221 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)222   void VisitPointers(HeapObject* host, MaybeObject** start,
223                      MaybeObject** end) override {
224     VerifyPointers(start, end);
225   }
226 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)227   void VisitRootPointers(Root root, const char* description, Object** start,
228                          Object** end) override {
229     VerifyPointers(start, end);
230   }
231 
232  protected:
EvacuationVerifier(Heap * heap)233   explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
234 
heap()235   inline Heap* heap() { return heap_; }
236 
237   virtual void VerifyPointers(Object** start, Object** end) = 0;
238   virtual void VerifyPointers(MaybeObject** start, MaybeObject** end) = 0;
239 
240   void VerifyRoots(VisitMode mode);
241   void VerifyEvacuationOnPage(Address start, Address end);
242   void VerifyEvacuation(NewSpace* new_space);
243   void VerifyEvacuation(PagedSpace* paged_space);
244 
245   Heap* heap_;
246 };
247 
VerifyRoots(VisitMode mode)248 void EvacuationVerifier::VerifyRoots(VisitMode mode) {
249   heap_->IterateStrongRoots(this, mode);
250 }
251 
VerifyEvacuationOnPage(Address start,Address end)252 void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
253   Address current = start;
254   while (current < end) {
255     HeapObject* object = HeapObject::FromAddress(current);
256     if (!object->IsFiller()) object->Iterate(this);
257     current += object->Size();
258   }
259 }
260 
VerifyEvacuation(NewSpace * space)261 void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
262   PageRange range(space->first_allocatable_address(), space->top());
263   for (auto it = range.begin(); it != range.end();) {
264     Page* page = *(it++);
265     Address current = page->area_start();
266     Address limit = it != range.end() ? page->area_end() : space->top();
267     CHECK(limit == space->top() || !page->Contains(space->top()));
268     VerifyEvacuationOnPage(current, limit);
269   }
270 }
271 
VerifyEvacuation(PagedSpace * space)272 void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
273   for (Page* p : *space) {
274     if (p->IsEvacuationCandidate()) continue;
275     if (p->Contains(space->top())) {
276       CodePageMemoryModificationScope memory_modification_scope(p);
277       heap_->CreateFillerObjectAt(
278           space->top(), static_cast<int>(space->limit() - space->top()),
279           ClearRecordedSlots::kNo);
280     }
281     VerifyEvacuationOnPage(p->area_start(), p->area_end());
282   }
283 }
284 
285 class FullEvacuationVerifier : public EvacuationVerifier {
286  public:
FullEvacuationVerifier(Heap * heap)287   explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
288 
Run()289   void Run() override {
290     VerifyRoots(VISIT_ALL);
291     VerifyEvacuation(heap_->new_space());
292     VerifyEvacuation(heap_->old_space());
293     VerifyEvacuation(heap_->code_space());
294     VerifyEvacuation(heap_->map_space());
295   }
296 
297  protected:
VerifyPointers(Object ** start,Object ** end)298   void VerifyPointers(Object** start, Object** end) override {
299     for (Object** current = start; current < end; current++) {
300       if ((*current)->IsHeapObject()) {
301         HeapObject* object = HeapObject::cast(*current);
302         if (Heap::InNewSpace(object)) {
303           CHECK(Heap::InToSpace(object));
304         }
305         CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
306       }
307     }
308   }
VerifyPointers(MaybeObject ** start,MaybeObject ** end)309   void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
310     for (MaybeObject** current = start; current < end; current++) {
311       HeapObject* object;
312       if ((*current)->ToStrongHeapObject(&object)) {
313         if (Heap::InNewSpace(object)) {
314           CHECK(Heap::InToSpace(object));
315         }
316         CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
317       }
318     }
319   }
320 };
321 
322 }  // namespace
323 #endif  // VERIFY_HEAP
324 
325 // =============================================================================
326 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
327 // =============================================================================
328 
329 using MarkCompactMarkingVisitor =
330     MarkingVisitor<FixedArrayVisitationMode::kRegular,
331                    TraceRetainingPathMode::kEnabled,
332                    MarkCompactCollector::MarkingState>;
333 
334 namespace {
335 
NumberOfAvailableCores()336 int NumberOfAvailableCores() {
337   static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
338   // This number of cores should be greater than zero and never change.
339   DCHECK_GE(num_cores, 1);
340   DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
341   return num_cores;
342 }
343 
344 }  // namespace
345 
NumberOfParallelCompactionTasks(int pages)346 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
347   DCHECK_GT(pages, 0);
348   int tasks =
349       FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
350   if (!heap_->CanExpandOldGeneration(
351           static_cast<size_t>(tasks * Page::kPageSize))) {
352     // Optimize for memory usage near the heap limit.
353     tasks = 1;
354   }
355   return tasks;
356 }
357 
NumberOfParallelPointerUpdateTasks(int pages,int slots)358 int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
359                                                                  int slots) {
360   DCHECK_GT(pages, 0);
361   // Limit the number of update tasks as task creation often dominates the
362   // actual work that is being done.
363   const int kMaxPointerUpdateTasks = 8;
364   const int kSlotsPerTask = 600;
365   const int wanted_tasks =
366       (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
367   return FLAG_parallel_pointer_update
368              ? Min(kMaxPointerUpdateTasks,
369                    Min(NumberOfAvailableCores(), wanted_tasks))
370              : 1;
371 }
372 
NumberOfParallelToSpacePointerUpdateTasks(int pages)373 int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
374     int pages) {
375   DCHECK_GT(pages, 0);
376   // No cap needed because all pages we need to process are fully filled with
377   // interesting objects.
378   return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
379                                       : 1;
380 }
381 
MarkCompactCollector(Heap * heap)382 MarkCompactCollector::MarkCompactCollector(Heap* heap)
383     : MarkCompactCollectorBase(heap),
384       page_parallel_job_semaphore_(0),
385 #ifdef DEBUG
386       state_(IDLE),
387 #endif
388       was_marked_incrementally_(false),
389       evacuation_(false),
390       compacting_(false),
391       black_allocation_(false),
392       have_code_to_deoptimize_(false),
393       marking_worklist_(heap),
394       sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
395   old_to_new_slots_ = -1;
396 }
397 
~MarkCompactCollector()398 MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
399 
SetUp()400 void MarkCompactCollector::SetUp() {
401   DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
402   DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
403   DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
404   DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
405 }
406 
TearDown()407 void MarkCompactCollector::TearDown() {
408   AbortCompaction();
409   AbortWeakObjects();
410   if (heap()->incremental_marking()->IsMarking()) {
411     marking_worklist()->Clear();
412   }
413 }
414 
AddEvacuationCandidate(Page * p)415 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
416   DCHECK(!p->NeverEvacuate());
417   p->MarkEvacuationCandidate();
418   evacuation_candidates_.push_back(p);
419 }
420 
421 
TraceFragmentation(PagedSpace * space)422 static void TraceFragmentation(PagedSpace* space) {
423   int number_of_pages = space->CountTotalPages();
424   intptr_t reserved = (number_of_pages * space->AreaSize());
425   intptr_t free = reserved - space->SizeOfObjects();
426   PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
427          static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
428 }
429 
StartCompaction()430 bool MarkCompactCollector::StartCompaction() {
431   if (!compacting_) {
432     DCHECK(evacuation_candidates_.empty());
433 
434     CollectEvacuationCandidates(heap()->old_space());
435 
436     if (FLAG_compact_code_space) {
437       CollectEvacuationCandidates(heap()->code_space());
438     } else if (FLAG_trace_fragmentation) {
439       TraceFragmentation(heap()->code_space());
440     }
441 
442     if (FLAG_trace_fragmentation) {
443       TraceFragmentation(heap()->map_space());
444     }
445 
446     compacting_ = !evacuation_candidates_.empty();
447   }
448 
449   return compacting_;
450 }
451 
CollectGarbage()452 void MarkCompactCollector::CollectGarbage() {
453   // Make sure that Prepare() has been called. The individual steps below will
454   // update the state as they proceed.
455   DCHECK(state_ == PREPARE_GC);
456 
457 #ifdef ENABLE_MINOR_MC
458   heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
459 #endif  // ENABLE_MINOR_MC
460 
461   MarkLiveObjects();
462   ClearNonLiveReferences();
463   VerifyMarking();
464 
465   RecordObjectStats();
466 
467   StartSweepSpaces();
468 
469   Evacuate();
470 
471   Finish();
472 }
473 
474 #ifdef VERIFY_HEAP
VerifyMarkbitsAreDirty(PagedSpace * space)475 void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
476   HeapObjectIterator iterator(space);
477   while (HeapObject* object = iterator.Next()) {
478     CHECK(non_atomic_marking_state()->IsBlack(object));
479   }
480 }
481 
VerifyMarkbitsAreClean(PagedSpace * space)482 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
483   for (Page* p : *space) {
484     CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
485     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
486   }
487 }
488 
489 
VerifyMarkbitsAreClean(NewSpace * space)490 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
491   for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
492     CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
493     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
494   }
495 }
496 
497 
VerifyMarkbitsAreClean()498 void MarkCompactCollector::VerifyMarkbitsAreClean() {
499   VerifyMarkbitsAreClean(heap_->old_space());
500   VerifyMarkbitsAreClean(heap_->code_space());
501   VerifyMarkbitsAreClean(heap_->map_space());
502   VerifyMarkbitsAreClean(heap_->new_space());
503   // Read-only space should always be black since we never collect any objects
504   // in it or linked from it.
505   VerifyMarkbitsAreDirty(heap_->read_only_space());
506 
507   LargeObjectIterator it(heap_->lo_space());
508   for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
509     CHECK(non_atomic_marking_state()->IsWhite(obj));
510     CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
511                     MemoryChunk::FromAddress(obj->address())));
512   }
513 }
514 
515 #endif  // VERIFY_HEAP
516 
ClearMarkbitsInPagedSpace(PagedSpace * space)517 void MarkCompactCollector::ClearMarkbitsInPagedSpace(PagedSpace* space) {
518   for (Page* p : *space) {
519     non_atomic_marking_state()->ClearLiveness(p);
520   }
521 }
522 
ClearMarkbitsInNewSpace(NewSpace * space)523 void MarkCompactCollector::ClearMarkbitsInNewSpace(NewSpace* space) {
524   for (Page* p : *space) {
525     non_atomic_marking_state()->ClearLiveness(p);
526   }
527 }
528 
529 
ClearMarkbits()530 void MarkCompactCollector::ClearMarkbits() {
531   ClearMarkbitsInPagedSpace(heap_->code_space());
532   ClearMarkbitsInPagedSpace(heap_->map_space());
533   ClearMarkbitsInPagedSpace(heap_->old_space());
534   ClearMarkbitsInNewSpace(heap_->new_space());
535   heap_->lo_space()->ClearMarkingStateOfLiveObjects();
536 }
537 
EnsureSweepingCompleted()538 void MarkCompactCollector::EnsureSweepingCompleted() {
539   if (!sweeper()->sweeping_in_progress()) return;
540 
541   sweeper()->EnsureCompleted();
542   heap()->old_space()->RefillFreeList();
543   heap()->code_space()->RefillFreeList();
544   heap()->map_space()->RefillFreeList();
545 
546 #ifdef VERIFY_HEAP
547   if (FLAG_verify_heap && !evacuation()) {
548     FullEvacuationVerifier verifier(heap());
549     verifier.Run();
550   }
551 #endif
552 }
553 
ComputeEvacuationHeuristics(size_t area_size,int * target_fragmentation_percent,size_t * max_evacuated_bytes)554 void MarkCompactCollector::ComputeEvacuationHeuristics(
555     size_t area_size, int* target_fragmentation_percent,
556     size_t* max_evacuated_bytes) {
557   // For memory reducing and optimize for memory mode we directly define both
558   // constants.
559   const int kTargetFragmentationPercentForReduceMemory = 20;
560   const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
561   const int kTargetFragmentationPercentForOptimizeMemory = 20;
562   const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
563 
564   // For regular mode (which is latency critical) we define less aggressive
565   // defaults to start and switch to a trace-based (using compaction speed)
566   // approach as soon as we have enough samples.
567   const int kTargetFragmentationPercent = 70;
568   const size_t kMaxEvacuatedBytes = 4 * MB;
569   // Time to take for a single area (=payload of page). Used as soon as there
570   // exist enough compaction speed samples.
571   const float kTargetMsPerArea = .5;
572 
573   if (heap()->ShouldReduceMemory()) {
574     *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
575     *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
576   } else if (heap()->ShouldOptimizeForMemoryUsage()) {
577     *target_fragmentation_percent =
578         kTargetFragmentationPercentForOptimizeMemory;
579     *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
580   } else {
581     const double estimated_compaction_speed =
582         heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
583     if (estimated_compaction_speed != 0) {
584       // Estimate the target fragmentation based on traced compaction speed
585       // and a goal for a single page.
586       const double estimated_ms_per_area =
587           1 + area_size / estimated_compaction_speed;
588       *target_fragmentation_percent = static_cast<int>(
589           100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
590       if (*target_fragmentation_percent <
591           kTargetFragmentationPercentForReduceMemory) {
592         *target_fragmentation_percent =
593             kTargetFragmentationPercentForReduceMemory;
594       }
595     } else {
596       *target_fragmentation_percent = kTargetFragmentationPercent;
597     }
598     *max_evacuated_bytes = kMaxEvacuatedBytes;
599   }
600 }
601 
CollectEvacuationCandidates(PagedSpace * space)602 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
603   DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
604 
605   int number_of_pages = space->CountTotalPages();
606   size_t area_size = space->AreaSize();
607 
608   // Pairs of (live_bytes_in_page, page).
609   typedef std::pair<size_t, Page*> LiveBytesPagePair;
610   std::vector<LiveBytesPagePair> pages;
611   pages.reserve(number_of_pages);
612 
613   DCHECK(!sweeping_in_progress());
614   Page* owner_of_linear_allocation_area =
615       space->top() == space->limit()
616           ? nullptr
617           : Page::FromAllocationAreaAddress(space->top());
618   for (Page* p : *space) {
619     if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
620         !p->CanAllocate())
621       continue;
622     // Invariant: Evacuation candidates are just created when marking is
623     // started. This means that sweeping has finished. Furthermore, at the end
624     // of a GC all evacuation candidates are cleared and their slot buffers are
625     // released.
626     CHECK(!p->IsEvacuationCandidate());
627     CHECK_NULL(p->slot_set<OLD_TO_OLD>());
628     CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
629     CHECK(p->SweepingDone());
630     DCHECK(p->area_size() == area_size);
631     pages.push_back(std::make_pair(p->allocated_bytes(), p));
632   }
633 
634   int candidate_count = 0;
635   size_t total_live_bytes = 0;
636 
637   const bool reduce_memory = heap()->ShouldReduceMemory();
638   if (FLAG_manual_evacuation_candidates_selection) {
639     for (size_t i = 0; i < pages.size(); i++) {
640       Page* p = pages[i].second;
641       if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
642         candidate_count++;
643         total_live_bytes += pages[i].first;
644         p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
645         AddEvacuationCandidate(p);
646       }
647     }
648   } else if (FLAG_stress_compaction_random) {
649     double fraction = isolate()->fuzzer_rng()->NextDouble();
650     size_t pages_to_mark_count =
651         static_cast<size_t>(fraction * (pages.size() + 1));
652     for (uint64_t i : isolate()->fuzzer_rng()->NextSample(
653              pages.size(), pages_to_mark_count)) {
654       candidate_count++;
655       total_live_bytes += pages[i].first;
656       AddEvacuationCandidate(pages[i].second);
657     }
658   } else if (FLAG_stress_compaction) {
659     for (size_t i = 0; i < pages.size(); i++) {
660       Page* p = pages[i].second;
661       if (i % 2 == 0) {
662         candidate_count++;
663         total_live_bytes += pages[i].first;
664         AddEvacuationCandidate(p);
665       }
666     }
667   } else {
668     // The following approach determines the pages that should be evacuated.
669     //
670     // We use two conditions to decide whether a page qualifies as an evacuation
671     // candidate, or not:
672     // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
673     //   between live bytes and capacity of this page (= area).
674     // * Evacuation quota: A global quota determining how much bytes should be
675     //   compacted.
676     //
677     // The algorithm sorts all pages by live bytes and then iterates through
678     // them starting with the page with the most free memory, adding them to the
679     // set of evacuation candidates as long as both conditions (fragmentation
680     // and quota) hold.
681     size_t max_evacuated_bytes;
682     int target_fragmentation_percent;
683     ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
684                                 &max_evacuated_bytes);
685 
686     const size_t free_bytes_threshold =
687         target_fragmentation_percent * (area_size / 100);
688 
689     // Sort pages from the most free to the least free, then select
690     // the first n pages for evacuation such that:
691     // - the total size of evacuated objects does not exceed the specified
692     // limit.
693     // - fragmentation of (n+1)-th page does not exceed the specified limit.
694     std::sort(pages.begin(), pages.end(),
695               [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
696                 return a.first < b.first;
697               });
698     for (size_t i = 0; i < pages.size(); i++) {
699       size_t live_bytes = pages[i].first;
700       DCHECK_GE(area_size, live_bytes);
701       size_t free_bytes = area_size - live_bytes;
702       if (FLAG_always_compact ||
703           ((free_bytes >= free_bytes_threshold) &&
704            ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
705         candidate_count++;
706         total_live_bytes += live_bytes;
707       }
708       if (FLAG_trace_fragmentation_verbose) {
709         PrintIsolate(isolate(),
710                      "compaction-selection-page: space=%s free_bytes_page=%zu "
711                      "fragmentation_limit_kb=%" PRIuS
712                      " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
713                      "compaction_limit_kb=%zu\n",
714                      space->name(), free_bytes / KB, free_bytes_threshold / KB,
715                      target_fragmentation_percent, total_live_bytes / KB,
716                      max_evacuated_bytes / KB);
717       }
718     }
719     // How many pages we will allocated for the evacuated objects
720     // in the worst case: ceil(total_live_bytes / area_size)
721     int estimated_new_pages =
722         static_cast<int>((total_live_bytes + area_size - 1) / area_size);
723     DCHECK_LE(estimated_new_pages, candidate_count);
724     int estimated_released_pages = candidate_count - estimated_new_pages;
725     // Avoid (compact -> expand) cycles.
726     if ((estimated_released_pages == 0) && !FLAG_always_compact) {
727       candidate_count = 0;
728     }
729     for (int i = 0; i < candidate_count; i++) {
730       AddEvacuationCandidate(pages[i].second);
731     }
732   }
733 
734   if (FLAG_trace_fragmentation) {
735     PrintIsolate(isolate(),
736                  "compaction-selection: space=%s reduce_memory=%d pages=%d "
737                  "total_live_bytes=%zu\n",
738                  space->name(), reduce_memory, candidate_count,
739                  total_live_bytes / KB);
740   }
741 }
742 
743 
AbortCompaction()744 void MarkCompactCollector::AbortCompaction() {
745   if (compacting_) {
746     RememberedSet<OLD_TO_OLD>::ClearAll(heap());
747     for (Page* p : evacuation_candidates_) {
748       p->ClearEvacuationCandidate();
749     }
750     compacting_ = false;
751     evacuation_candidates_.clear();
752   }
753   DCHECK(evacuation_candidates_.empty());
754 }
755 
756 
Prepare()757 void MarkCompactCollector::Prepare() {
758   was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
759 
760 #ifdef DEBUG
761   DCHECK(state_ == IDLE);
762   state_ = PREPARE_GC;
763 #endif
764 
765   DCHECK(!FLAG_never_compact || !FLAG_always_compact);
766 
767   // Instead of waiting we could also abort the sweeper threads here.
768   EnsureSweepingCompleted();
769 
770   if (heap()->incremental_marking()->IsSweeping()) {
771     heap()->incremental_marking()->Stop();
772   }
773 
774   heap()->memory_allocator()->unmapper()->PrepareForMarkCompact();
775 
776   // Clear marking bits if incremental marking is aborted.
777   if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
778     heap()->incremental_marking()->Stop();
779     heap()->incremental_marking()->AbortBlackAllocation();
780     FinishConcurrentMarking(ConcurrentMarking::StopRequest::PREEMPT_TASKS);
781     heap()->incremental_marking()->Deactivate();
782     ClearMarkbits();
783     AbortWeakObjects();
784     AbortCompaction();
785     heap_->local_embedder_heap_tracer()->AbortTracing();
786     marking_worklist()->Clear();
787     was_marked_incrementally_ = false;
788   }
789 
790   if (!was_marked_incrementally_) {
791     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
792     heap_->local_embedder_heap_tracer()->TracePrologue();
793   }
794 
795   // Don't start compaction if we are in the middle of incremental
796   // marking cycle. We did not collect any slots.
797   if (!FLAG_never_compact && !was_marked_incrementally_) {
798     StartCompaction();
799   }
800 
801   PagedSpaces spaces(heap());
802   for (PagedSpace* space = spaces.next(); space != nullptr;
803        space = spaces.next()) {
804     space->PrepareForMarkCompact();
805   }
806   heap()->account_external_memory_concurrently_freed();
807 
808 #ifdef VERIFY_HEAP
809   if (!was_marked_incrementally_ && FLAG_verify_heap) {
810     VerifyMarkbitsAreClean();
811   }
812 #endif
813 }
814 
FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request)815 void MarkCompactCollector::FinishConcurrentMarking(
816     ConcurrentMarking::StopRequest stop_request) {
817   if (FLAG_concurrent_marking) {
818     heap()->concurrent_marking()->Stop(stop_request);
819     heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
820   }
821 }
822 
VerifyMarking()823 void MarkCompactCollector::VerifyMarking() {
824   CHECK(marking_worklist()->IsEmpty());
825   DCHECK(heap_->incremental_marking()->IsStopped());
826 #ifdef VERIFY_HEAP
827   if (FLAG_verify_heap) {
828     FullMarkingVerifier verifier(heap());
829     verifier.Run();
830   }
831 #endif
832 #ifdef VERIFY_HEAP
833   if (FLAG_verify_heap) {
834     heap()->old_space()->VerifyLiveBytes();
835     heap()->map_space()->VerifyLiveBytes();
836     heap()->code_space()->VerifyLiveBytes();
837   }
838 #endif
839 }
840 
Finish()841 void MarkCompactCollector::Finish() {
842   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
843 
844 #ifdef DEBUG
845   heap()->VerifyCountersBeforeConcurrentSweeping();
846 #endif
847 
848   CHECK(weak_objects_.current_ephemerons.IsEmpty());
849   CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
850   weak_objects_.next_ephemerons.Clear();
851 
852   sweeper()->StartSweeperTasks();
853   sweeper()->StartIterabilityTasks();
854 
855   // Clear the marking state of live large objects.
856   heap_->lo_space()->ClearMarkingStateOfLiveObjects();
857 
858 #ifdef DEBUG
859   DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
860   state_ = IDLE;
861 #endif
862   heap_->isolate()->inner_pointer_to_code_cache()->Flush();
863 
864   // The stub caches are not traversed during GC; clear them to force
865   // their lazy re-initialization. This must be done after the
866   // GC, because it relies on the new address of certain old space
867   // objects (empty string, illegal builtin).
868   isolate()->load_stub_cache()->Clear();
869   isolate()->store_stub_cache()->Clear();
870 
871   if (have_code_to_deoptimize_) {
872     // Some code objects were marked for deoptimization during the GC.
873     Deoptimizer::DeoptimizeMarkedCode(isolate());
874     have_code_to_deoptimize_ = false;
875   }
876 }
877 
878 class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
879  public:
RootMarkingVisitor(MarkCompactCollector * collector)880   explicit RootMarkingVisitor(MarkCompactCollector* collector)
881       : collector_(collector) {}
882 
VisitRootPointer(Root root,const char * description,Object ** p)883   void VisitRootPointer(Root root, const char* description, Object** p) final {
884     MarkObjectByPointer(root, p);
885   }
886 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)887   void VisitRootPointers(Root root, const char* description, Object** start,
888                          Object** end) final {
889     for (Object** p = start; p < end; p++) MarkObjectByPointer(root, p);
890   }
891 
892  private:
MarkObjectByPointer(Root root,Object ** p)893   V8_INLINE void MarkObjectByPointer(Root root, Object** p) {
894     if (!(*p)->IsHeapObject()) return;
895 
896     collector_->MarkRootObject(root, HeapObject::cast(*p));
897   }
898 
899   MarkCompactCollector* const collector_;
900 };
901 
902 // This visitor is used to visit the body of special objects held alive by
903 // other roots.
904 //
905 // It is currently used for
906 // - Code held alive by the top optimized frame. This code cannot be deoptimized
907 // and thus have to be kept alive in an isolate way, i.e., it should not keep
908 // alive other code objects reachable through the weak list but they should
909 // keep alive its embedded pointers (which would otherwise be dropped).
910 // - Prefix of the string table.
911 class MarkCompactCollector::CustomRootBodyMarkingVisitor final
912     : public ObjectVisitor {
913  public:
CustomRootBodyMarkingVisitor(MarkCompactCollector * collector)914   explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
915       : collector_(collector) {}
916 
VisitPointer(HeapObject * host,Object ** p)917   void VisitPointer(HeapObject* host, Object** p) final {
918     MarkObject(host, *p);
919   }
920 
VisitPointers(HeapObject * host,Object ** start,Object ** end)921   void VisitPointers(HeapObject* host, Object** start, Object** end) final {
922     for (Object** p = start; p < end; p++) {
923       DCHECK(!HasWeakHeapObjectTag(*p));
924       MarkObject(host, *p);
925     }
926   }
927 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)928   void VisitPointers(HeapObject* host, MaybeObject** start,
929                      MaybeObject** end) final {
930     // At the moment, custom roots cannot contain weak pointers.
931     UNREACHABLE();
932   }
933 
934   // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
935 
936  private:
MarkObject(HeapObject * host,Object * object)937   void MarkObject(HeapObject* host, Object* object) {
938     if (!object->IsHeapObject()) return;
939     collector_->MarkObject(host, HeapObject::cast(object));
940   }
941 
942   MarkCompactCollector* const collector_;
943 };
944 
945 class InternalizedStringTableCleaner : public ObjectVisitor {
946  public:
InternalizedStringTableCleaner(Heap * heap,HeapObject * table)947   InternalizedStringTableCleaner(Heap* heap, HeapObject* table)
948       : heap_(heap), pointers_removed_(0), table_(table) {}
949 
VisitPointers(HeapObject * host,Object ** start,Object ** end)950   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
951     // Visit all HeapObject pointers in [start, end).
952     Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
953     MarkCompactCollector::NonAtomicMarkingState* marking_state =
954         heap_->mark_compact_collector()->non_atomic_marking_state();
955     for (Object** p = start; p < end; p++) {
956       Object* o = *p;
957       if (o->IsHeapObject()) {
958         HeapObject* heap_object = HeapObject::cast(o);
959         if (marking_state->IsWhite(heap_object)) {
960           pointers_removed_++;
961           // Set the entry to the_hole_value (as deleted).
962           *p = the_hole;
963         } else {
964           // StringTable contains only old space strings.
965           DCHECK(!Heap::InNewSpace(o));
966           MarkCompactCollector::RecordSlot(table_, p, heap_object);
967         }
968       }
969     }
970   }
971 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)972   void VisitPointers(HeapObject* host, MaybeObject** start,
973                      MaybeObject** end) final {
974     UNREACHABLE();
975   }
976 
PointersRemoved()977   int PointersRemoved() {
978     return pointers_removed_;
979   }
980 
981  private:
982   Heap* heap_;
983   int pointers_removed_;
984   HeapObject* table_;
985 };
986 
987 class ExternalStringTableCleaner : public RootVisitor {
988  public:
ExternalStringTableCleaner(Heap * heap)989   explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
990 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)991   void VisitRootPointers(Root root, const char* description, Object** start,
992                          Object** end) override {
993     // Visit all HeapObject pointers in [start, end).
994     MarkCompactCollector::NonAtomicMarkingState* marking_state =
995         heap_->mark_compact_collector()->non_atomic_marking_state();
996     Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
997     for (Object** p = start; p < end; p++) {
998       Object* o = *p;
999       if (o->IsHeapObject()) {
1000         HeapObject* heap_object = HeapObject::cast(o);
1001         if (marking_state->IsWhite(heap_object)) {
1002           if (o->IsExternalString()) {
1003             heap_->FinalizeExternalString(String::cast(*p));
1004           } else {
1005             // The original external string may have been internalized.
1006             DCHECK(o->IsThinString());
1007           }
1008           // Set the entry to the_hole_value (as deleted).
1009           *p = the_hole;
1010         }
1011       }
1012     }
1013   }
1014 
1015  private:
1016   Heap* heap_;
1017 };
1018 
1019 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1020 // are retained.
1021 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1022  public:
MarkCompactWeakObjectRetainer(MarkCompactCollector::NonAtomicMarkingState * marking_state)1023   explicit MarkCompactWeakObjectRetainer(
1024       MarkCompactCollector::NonAtomicMarkingState* marking_state)
1025       : marking_state_(marking_state) {}
1026 
RetainAs(Object * object)1027   virtual Object* RetainAs(Object* object) {
1028     HeapObject* heap_object = HeapObject::cast(object);
1029     DCHECK(!marking_state_->IsGrey(heap_object));
1030     if (marking_state_->IsBlack(heap_object)) {
1031       return object;
1032     } else if (object->IsAllocationSite() &&
1033                !(AllocationSite::cast(object)->IsZombie())) {
1034       // "dead" AllocationSites need to live long enough for a traversal of new
1035       // space. These sites get a one-time reprieve.
1036 
1037       Object* nested = object;
1038       while (nested->IsAllocationSite()) {
1039         AllocationSite* current_site = AllocationSite::cast(nested);
1040         // MarkZombie will override the nested_site, read it first before
1041         // marking
1042         nested = current_site->nested_site();
1043         current_site->MarkZombie();
1044         marking_state_->WhiteToBlack(current_site);
1045       }
1046 
1047       return object;
1048     } else {
1049       return nullptr;
1050     }
1051   }
1052 
1053  private:
1054   MarkCompactCollector::NonAtomicMarkingState* marking_state_;
1055 };
1056 
1057 class RecordMigratedSlotVisitor : public ObjectVisitor {
1058  public:
RecordMigratedSlotVisitor(MarkCompactCollector * collector)1059   explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
1060       : collector_(collector) {}
1061 
VisitPointer(HeapObject * host,Object ** p)1062   inline void VisitPointer(HeapObject* host, Object** p) final {
1063     DCHECK(!HasWeakHeapObjectTag(*p));
1064     RecordMigratedSlot(host, reinterpret_cast<MaybeObject*>(*p),
1065                        reinterpret_cast<Address>(p));
1066   }
1067 
VisitPointer(HeapObject * host,MaybeObject ** p)1068   inline void VisitPointer(HeapObject* host, MaybeObject** p) final {
1069     RecordMigratedSlot(host, *p, reinterpret_cast<Address>(p));
1070   }
1071 
VisitPointers(HeapObject * host,Object ** start,Object ** end)1072   inline void VisitPointers(HeapObject* host, Object** start,
1073                             Object** end) final {
1074     while (start < end) {
1075       VisitPointer(host, start);
1076       ++start;
1077     }
1078   }
1079 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)1080   inline void VisitPointers(HeapObject* host, MaybeObject** start,
1081                             MaybeObject** end) final {
1082     while (start < end) {
1083       VisitPointer(host, start);
1084       ++start;
1085     }
1086   }
1087 
VisitCodeTarget(Code * host,RelocInfo * rinfo)1088   inline void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
1089     DCHECK_EQ(host, rinfo->host());
1090     DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
1091     Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1092     // The target is always in old space, we don't have to record the slot in
1093     // the old-to-new remembered set.
1094     DCHECK(!Heap::InNewSpace(target));
1095     collector_->RecordRelocSlot(host, rinfo, target);
1096   }
1097 
VisitEmbeddedPointer(Code * host,RelocInfo * rinfo)1098   inline void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
1099     DCHECK_EQ(host, rinfo->host());
1100     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1101     HeapObject* object = HeapObject::cast(rinfo->target_object());
1102     GenerationalBarrierForCode(host, rinfo, object);
1103     collector_->RecordRelocSlot(host, rinfo, object);
1104   }
1105 
1106   // Entries that are skipped for recording.
VisitExternalReference(Code * host,RelocInfo * rinfo)1107   inline void VisitExternalReference(Code* host, RelocInfo* rinfo) final {}
VisitExternalReference(Foreign * host,Address * p)1108   inline void VisitExternalReference(Foreign* host, Address* p) final {}
VisitRuntimeEntry(Code * host,RelocInfo * rinfo)1109   inline void VisitRuntimeEntry(Code* host, RelocInfo* rinfo) final {}
VisitInternalReference(Code * host,RelocInfo * rinfo)1110   inline void VisitInternalReference(Code* host, RelocInfo* rinfo) final {}
1111 
1112  protected:
RecordMigratedSlot(HeapObject * host,MaybeObject * value,Address slot)1113   inline virtual void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
1114                                          Address slot) {
1115     if (value->IsStrongOrWeakHeapObject()) {
1116       Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
1117       if (p->InNewSpace()) {
1118         DCHECK_IMPLIES(p->InToSpace(),
1119                        p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
1120         RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
1121             Page::FromAddress(slot), slot);
1122       } else if (p->IsEvacuationCandidate()) {
1123         RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
1124             Page::FromAddress(slot), slot);
1125       }
1126     }
1127   }
1128 
1129   MarkCompactCollector* collector_;
1130 };
1131 
1132 class MigrationObserver {
1133  public:
MigrationObserver(Heap * heap)1134   explicit MigrationObserver(Heap* heap) : heap_(heap) {}
1135 
~MigrationObserver()1136   virtual ~MigrationObserver() {}
1137   virtual void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
1138                     int size) = 0;
1139 
1140  protected:
1141   Heap* heap_;
1142 };
1143 
1144 class ProfilingMigrationObserver final : public MigrationObserver {
1145  public:
ProfilingMigrationObserver(Heap * heap)1146   explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
1147 
Move(AllocationSpace dest,HeapObject * src,HeapObject * dst,int size)1148   inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
1149                    int size) final {
1150     if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
1151       PROFILE(heap_->isolate(),
1152               CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
1153     }
1154     heap_->OnMoveEvent(dst, src, size);
1155   }
1156 };
1157 
1158 class HeapObjectVisitor {
1159  public:
~HeapObjectVisitor()1160   virtual ~HeapObjectVisitor() {}
1161   virtual bool Visit(HeapObject* object, int size) = 0;
1162 };
1163 
1164 class EvacuateVisitorBase : public HeapObjectVisitor {
1165  public:
AddObserver(MigrationObserver * observer)1166   void AddObserver(MigrationObserver* observer) {
1167     migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
1168     observers_.push_back(observer);
1169   }
1170 
1171  protected:
1172   enum MigrationMode { kFast, kObserved };
1173 
1174   typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject* dst,
1175                                   HeapObject* src, int size,
1176                                   AllocationSpace dest);
1177 
1178   template <MigrationMode mode>
RawMigrateObject(EvacuateVisitorBase * base,HeapObject * dst,HeapObject * src,int size,AllocationSpace dest)1179   static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject* dst,
1180                                HeapObject* src, int size,
1181                                AllocationSpace dest) {
1182     Address dst_addr = dst->address();
1183     Address src_addr = src->address();
1184     DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
1185     DCHECK(dest != LO_SPACE);
1186     if (dest == OLD_SPACE) {
1187       DCHECK_OBJECT_SIZE(size);
1188       DCHECK(IsAligned(size, kPointerSize));
1189       base->heap_->CopyBlock(dst_addr, src_addr, size);
1190       if (mode != MigrationMode::kFast)
1191         base->ExecuteMigrationObservers(dest, src, dst, size);
1192       dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
1193     } else if (dest == CODE_SPACE) {
1194       DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
1195       base->heap_->CopyBlock(dst_addr, src_addr, size);
1196       Code::cast(dst)->Relocate(dst_addr - src_addr);
1197       if (mode != MigrationMode::kFast)
1198         base->ExecuteMigrationObservers(dest, src, dst, size);
1199       dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
1200     } else {
1201       DCHECK_OBJECT_SIZE(size);
1202       DCHECK(dest == NEW_SPACE);
1203       base->heap_->CopyBlock(dst_addr, src_addr, size);
1204       if (mode != MigrationMode::kFast)
1205         base->ExecuteMigrationObservers(dest, src, dst, size);
1206     }
1207     base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
1208                         static_cast<base::AtomicWord>(dst_addr));
1209   }
1210 
EvacuateVisitorBase(Heap * heap,LocalAllocator * local_allocator,RecordMigratedSlotVisitor * record_visitor)1211   EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator,
1212                       RecordMigratedSlotVisitor* record_visitor)
1213       : heap_(heap),
1214         local_allocator_(local_allocator),
1215         record_visitor_(record_visitor) {
1216     migration_function_ = RawMigrateObject<MigrationMode::kFast>;
1217   }
1218 
TryEvacuateObject(AllocationSpace target_space,HeapObject * object,int size,HeapObject ** target_object)1219   inline bool TryEvacuateObject(AllocationSpace target_space,
1220                                 HeapObject* object, int size,
1221                                 HeapObject** target_object) {
1222 #ifdef VERIFY_HEAP
1223     if (AbortCompactionForTesting(object)) return false;
1224 #endif  // VERIFY_HEAP
1225     AllocationAlignment alignment =
1226         HeapObject::RequiredAlignment(object->map());
1227     AllocationResult allocation =
1228         local_allocator_->Allocate(target_space, size, alignment);
1229     if (allocation.To(target_object)) {
1230       MigrateObject(*target_object, object, size, target_space);
1231       return true;
1232     }
1233     return false;
1234   }
1235 
ExecuteMigrationObservers(AllocationSpace dest,HeapObject * src,HeapObject * dst,int size)1236   inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject* src,
1237                                         HeapObject* dst, int size) {
1238     for (MigrationObserver* obs : observers_) {
1239       obs->Move(dest, src, dst, size);
1240     }
1241   }
1242 
MigrateObject(HeapObject * dst,HeapObject * src,int size,AllocationSpace dest)1243   inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1244                             AllocationSpace dest) {
1245     migration_function_(this, dst, src, size, dest);
1246   }
1247 
1248 #ifdef VERIFY_HEAP
AbortCompactionForTesting(HeapObject * object)1249   bool AbortCompactionForTesting(HeapObject* object) {
1250     if (FLAG_stress_compaction) {
1251       const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1252                              kPageAlignmentMask & ~kPointerAlignmentMask;
1253       if ((object->address() & kPageAlignmentMask) == mask) {
1254         Page* page = Page::FromAddress(object->address());
1255         if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1256           page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1257         } else {
1258           page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1259           return true;
1260         }
1261       }
1262     }
1263     return false;
1264   }
1265 #endif  // VERIFY_HEAP
1266 
1267   Heap* heap_;
1268   LocalAllocator* local_allocator_;
1269   RecordMigratedSlotVisitor* record_visitor_;
1270   std::vector<MigrationObserver*> observers_;
1271   MigrateFunction migration_function_;
1272 };
1273 
1274 class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
1275  public:
EvacuateNewSpaceVisitor(Heap * heap,LocalAllocator * local_allocator,RecordMigratedSlotVisitor * record_visitor,Heap::PretenuringFeedbackMap * local_pretenuring_feedback)1276   explicit EvacuateNewSpaceVisitor(
1277       Heap* heap, LocalAllocator* local_allocator,
1278       RecordMigratedSlotVisitor* record_visitor,
1279       Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1280       : EvacuateVisitorBase(heap, local_allocator, record_visitor),
1281         buffer_(LocalAllocationBuffer::InvalidBuffer()),
1282         promoted_size_(0),
1283         semispace_copied_size_(0),
1284         local_pretenuring_feedback_(local_pretenuring_feedback),
1285         is_incremental_marking_(heap->incremental_marking()->IsMarking()) {}
1286 
Visit(HeapObject * object,int size)1287   inline bool Visit(HeapObject* object, int size) override {
1288     if (TryEvacuateWithoutCopy(object)) return true;
1289     HeapObject* target_object = nullptr;
1290     if (heap_->ShouldBePromoted(object->address()) &&
1291         TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1292       promoted_size_ += size;
1293       return true;
1294     }
1295     heap_->UpdateAllocationSite(object->map(), object,
1296                                 local_pretenuring_feedback_);
1297     HeapObject* target = nullptr;
1298     AllocationSpace space = AllocateTargetObject(object, size, &target);
1299     MigrateObject(HeapObject::cast(target), object, size, space);
1300     semispace_copied_size_ += size;
1301     return true;
1302   }
1303 
promoted_size()1304   intptr_t promoted_size() { return promoted_size_; }
semispace_copied_size()1305   intptr_t semispace_copied_size() { return semispace_copied_size_; }
1306 
1307  private:
TryEvacuateWithoutCopy(HeapObject * object)1308   inline bool TryEvacuateWithoutCopy(HeapObject* object) {
1309     if (is_incremental_marking_) return false;
1310 
1311     Map* map = object->map();
1312 
1313     // Some objects can be evacuated without creating a copy.
1314     if (map->visitor_id() == kVisitThinString) {
1315       HeapObject* actual = ThinString::cast(object)->unchecked_actual();
1316       if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
1317       base::Relaxed_Store(
1318           reinterpret_cast<base::AtomicWord*>(object->address()),
1319           reinterpret_cast<base::AtomicWord>(
1320               MapWord::FromForwardingAddress(actual).ToMap()));
1321       return true;
1322     }
1323     // TODO(mlippautz): Handle ConsString.
1324 
1325     return false;
1326   }
1327 
AllocateTargetObject(HeapObject * old_object,int size,HeapObject ** target_object)1328   inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
1329                                               HeapObject** target_object) {
1330     AllocationAlignment alignment =
1331         HeapObject::RequiredAlignment(old_object->map());
1332     AllocationSpace space_allocated_in = NEW_SPACE;
1333     AllocationResult allocation =
1334         local_allocator_->Allocate(NEW_SPACE, size, alignment);
1335     if (allocation.IsRetry()) {
1336       allocation = AllocateInOldSpace(size, alignment);
1337       space_allocated_in = OLD_SPACE;
1338     }
1339     bool ok = allocation.To(target_object);
1340     DCHECK(ok);
1341     USE(ok);
1342     return space_allocated_in;
1343   }
1344 
AllocateInOldSpace(int size_in_bytes,AllocationAlignment alignment)1345   inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1346                                              AllocationAlignment alignment) {
1347     AllocationResult allocation =
1348         local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
1349     if (allocation.IsRetry()) {
1350       heap_->FatalProcessOutOfMemory(
1351           "MarkCompactCollector: semi-space copy, fallback in old gen");
1352     }
1353     return allocation;
1354   }
1355 
1356   LocalAllocationBuffer buffer_;
1357   intptr_t promoted_size_;
1358   intptr_t semispace_copied_size_;
1359   Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1360   bool is_incremental_marking_;
1361 };
1362 
1363 template <PageEvacuationMode mode>
1364 class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
1365  public:
EvacuateNewSpacePageVisitor(Heap * heap,RecordMigratedSlotVisitor * record_visitor,Heap::PretenuringFeedbackMap * local_pretenuring_feedback)1366   explicit EvacuateNewSpacePageVisitor(
1367       Heap* heap, RecordMigratedSlotVisitor* record_visitor,
1368       Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1369       : heap_(heap),
1370         record_visitor_(record_visitor),
1371         moved_bytes_(0),
1372         local_pretenuring_feedback_(local_pretenuring_feedback) {}
1373 
Move(Page * page)1374   static void Move(Page* page) {
1375     switch (mode) {
1376       case NEW_TO_NEW:
1377         page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1378         page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1379         break;
1380       case NEW_TO_OLD: {
1381         page->heap()->new_space()->from_space().RemovePage(page);
1382         Page* new_page = Page::ConvertNewToOld(page);
1383         DCHECK(!new_page->InNewSpace());
1384         new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1385         break;
1386       }
1387     }
1388   }
1389 
Visit(HeapObject * object,int size)1390   inline bool Visit(HeapObject* object, int size) {
1391     if (mode == NEW_TO_NEW) {
1392       heap_->UpdateAllocationSite(object->map(), object,
1393                                   local_pretenuring_feedback_);
1394     } else if (mode == NEW_TO_OLD) {
1395       object->IterateBodyFast(record_visitor_);
1396     }
1397     return true;
1398   }
1399 
moved_bytes()1400   intptr_t moved_bytes() { return moved_bytes_; }
account_moved_bytes(intptr_t bytes)1401   void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1402 
1403  private:
1404   Heap* heap_;
1405   RecordMigratedSlotVisitor* record_visitor_;
1406   intptr_t moved_bytes_;
1407   Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1408 };
1409 
1410 class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
1411  public:
EvacuateOldSpaceVisitor(Heap * heap,LocalAllocator * local_allocator,RecordMigratedSlotVisitor * record_visitor)1412   EvacuateOldSpaceVisitor(Heap* heap, LocalAllocator* local_allocator,
1413                           RecordMigratedSlotVisitor* record_visitor)
1414       : EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
1415 
Visit(HeapObject * object,int size)1416   inline bool Visit(HeapObject* object, int size) override {
1417     HeapObject* target_object = nullptr;
1418     if (TryEvacuateObject(
1419             Page::FromAddress(object->address())->owner()->identity(), object,
1420             size, &target_object)) {
1421       DCHECK(object->map_word().IsForwardingAddress());
1422       return true;
1423     }
1424     return false;
1425   }
1426 };
1427 
1428 class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
1429  public:
EvacuateRecordOnlyVisitor(Heap * heap)1430   explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
1431 
Visit(HeapObject * object,int size)1432   inline bool Visit(HeapObject* object, int size) {
1433     RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1434     object->IterateBodyFast(&visitor);
1435     return true;
1436   }
1437 
1438  private:
1439   Heap* heap_;
1440 };
1441 
IsUnmarkedHeapObject(Heap * heap,Object ** p)1442 bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, Object** p) {
1443   Object* o = *p;
1444   if (!o->IsHeapObject()) return false;
1445   HeapObject* heap_object = HeapObject::cast(o);
1446   return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
1447       heap_object);
1448 }
1449 
MarkStringTable(ObjectVisitor * custom_root_body_visitor)1450 void MarkCompactCollector::MarkStringTable(
1451     ObjectVisitor* custom_root_body_visitor) {
1452   StringTable* string_table = heap()->string_table();
1453   // Mark the string table itself.
1454   if (marking_state()->WhiteToBlack(string_table)) {
1455     // Explicitly mark the prefix.
1456     string_table->IteratePrefix(custom_root_body_visitor);
1457   }
1458 }
1459 
MarkRoots(RootVisitor * root_visitor,ObjectVisitor * custom_root_body_visitor)1460 void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
1461                                      ObjectVisitor* custom_root_body_visitor) {
1462   // Mark the heap roots including global variables, stack variables,
1463   // etc., and all objects reachable from them.
1464   heap()->IterateStrongRoots(root_visitor, VISIT_ONLY_STRONG);
1465 
1466   // Custom marking for string table and top optimized frame.
1467   MarkStringTable(custom_root_body_visitor);
1468   ProcessTopOptimizedFrame(custom_root_body_visitor);
1469 }
1470 
ProcessEphemeronsUntilFixpoint()1471 void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
1472   bool work_to_do = true;
1473   int iterations = 0;
1474   int max_iterations = FLAG_ephemeron_fixpoint_iterations;
1475 
1476   while (work_to_do) {
1477     PerformWrapperTracing();
1478 
1479     if (iterations >= max_iterations) {
1480       // Give up fixpoint iteration and switch to linear algorithm.
1481       ProcessEphemeronsLinear();
1482       break;
1483     }
1484 
1485     // Move ephemerons from next_ephemerons into current_ephemerons to
1486     // drain them in this iteration.
1487     weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1488     heap()->concurrent_marking()->set_ephemeron_marked(false);
1489 
1490     {
1491       TRACE_GC(heap()->tracer(),
1492                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1493 
1494       if (FLAG_parallel_marking) {
1495         DCHECK(FLAG_concurrent_marking);
1496         heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1497       }
1498 
1499       work_to_do = ProcessEphemerons();
1500       FinishConcurrentMarking(
1501           ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
1502     }
1503 
1504     CHECK(weak_objects_.current_ephemerons.IsEmpty());
1505     CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1506 
1507     work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
1508                  heap()->concurrent_marking()->ephemeron_marked() ||
1509                  !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1510     ++iterations;
1511   }
1512 
1513   CHECK(marking_worklist()->IsEmpty());
1514   CHECK(weak_objects_.current_ephemerons.IsEmpty());
1515   CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1516 }
1517 
ProcessEphemerons()1518 bool MarkCompactCollector::ProcessEphemerons() {
1519   Ephemeron ephemeron;
1520   bool ephemeron_marked = false;
1521 
1522   // Drain current_ephemerons and push ephemerons where key and value are still
1523   // unreachable into next_ephemerons.
1524   while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
1525     if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
1526       ephemeron_marked = true;
1527     }
1528   }
1529 
1530   // Drain marking worklist and push discovered ephemerons into
1531   // discovered_ephemerons.
1532   ProcessMarkingWorklist();
1533 
1534   // Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
1535   // before) and push ephemerons where key and value are still unreachable into
1536   // next_ephemerons.
1537   while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
1538     if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
1539       ephemeron_marked = true;
1540     }
1541   }
1542 
1543   // Flush local ephemerons for main task to global pool.
1544   weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThread);
1545   weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
1546 
1547   return ephemeron_marked;
1548 }
1549 
ProcessEphemeronsLinear()1550 void MarkCompactCollector::ProcessEphemeronsLinear() {
1551   TRACE_GC(heap()->tracer(),
1552            GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
1553   CHECK(heap()->concurrent_marking()->IsStopped());
1554   std::unordered_multimap<HeapObject*, HeapObject*> key_to_values;
1555   Ephemeron ephemeron;
1556 
1557   DCHECK(weak_objects_.current_ephemerons.IsEmpty());
1558   weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1559 
1560   while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
1561     VisitEphemeron(ephemeron.key, ephemeron.value);
1562 
1563     if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1564       key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1565     }
1566   }
1567 
1568   ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1569   bool work_to_do = true;
1570 
1571   while (work_to_do) {
1572     PerformWrapperTracing();
1573 
1574     ResetNewlyDiscovered();
1575     ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1576 
1577     {
1578       TRACE_GC(heap()->tracer(),
1579                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1580       // Drain marking worklist and push all discovered objects into
1581       // newly_discovered.
1582       ProcessMarkingWorklistInternal<
1583           MarkCompactCollector::MarkingWorklistProcessingMode::
1584               kTrackNewlyDiscoveredObjects>();
1585     }
1586 
1587     while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
1588       VisitEphemeron(ephemeron.key, ephemeron.value);
1589 
1590       if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1591         key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1592       }
1593     }
1594 
1595     if (ephemeron_marking_.newly_discovered_overflowed) {
1596       // If newly_discovered was overflowed just visit all ephemerons in
1597       // next_ephemerons.
1598       weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
1599         if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
1600             non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
1601           marking_worklist()->Push(ephemeron.value);
1602         }
1603       });
1604 
1605     } else {
1606       // This is the good case: newly_discovered stores all discovered
1607       // objects. Now use key_to_values to see if discovered objects keep more
1608       // objects alive due to ephemeron semantics.
1609       for (HeapObject* object : ephemeron_marking_.newly_discovered) {
1610         auto range = key_to_values.equal_range(object);
1611         for (auto it = range.first; it != range.second; ++it) {
1612           HeapObject* value = it->second;
1613           MarkObject(object, value);
1614         }
1615       }
1616     }
1617 
1618     // Do NOT drain marking worklist here, otherwise the current checks
1619     // for work_to_do are not sufficient for determining if another iteration
1620     // is necessary.
1621 
1622     work_to_do = !marking_worklist()->IsEmpty() ||
1623                  !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1624     CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1625   }
1626 
1627   ResetNewlyDiscovered();
1628   ephemeron_marking_.newly_discovered.shrink_to_fit();
1629 
1630   CHECK(marking_worklist()->IsEmpty());
1631 }
1632 
PerformWrapperTracing()1633 void MarkCompactCollector::PerformWrapperTracing() {
1634   if (heap_->local_embedder_heap_tracer()->InUse()) {
1635     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
1636     heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
1637     heap_->local_embedder_heap_tracer()->Trace(
1638         std::numeric_limits<double>::infinity());
1639   }
1640 }
1641 
ProcessMarkingWorklist()1642 void MarkCompactCollector::ProcessMarkingWorklist() {
1643   ProcessMarkingWorklistInternal<
1644       MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>();
1645 }
1646 
1647 template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
ProcessMarkingWorklistInternal()1648 void MarkCompactCollector::ProcessMarkingWorklistInternal() {
1649   HeapObject* object;
1650   MarkCompactMarkingVisitor visitor(this, marking_state());
1651   while ((object = marking_worklist()->Pop()) != nullptr) {
1652     DCHECK(!object->IsFiller());
1653     DCHECK(object->IsHeapObject());
1654     DCHECK(heap()->Contains(object));
1655     DCHECK(!(marking_state()->IsWhite(object)));
1656     marking_state()->GreyToBlack(object);
1657     if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
1658                     kTrackNewlyDiscoveredObjects) {
1659       AddNewlyDiscovered(object);
1660     }
1661     Map* map = object->map();
1662     MarkObject(object, map);
1663     visitor.Visit(map, object);
1664   }
1665   DCHECK(marking_worklist()->IsBailoutEmpty());
1666 }
1667 
VisitEphemeron(HeapObject * key,HeapObject * value)1668 bool MarkCompactCollector::VisitEphemeron(HeapObject* key, HeapObject* value) {
1669   if (marking_state()->IsBlackOrGrey(key)) {
1670     if (marking_state()->WhiteToGrey(value)) {
1671       marking_worklist()->Push(value);
1672       return true;
1673     }
1674 
1675   } else if (marking_state()->IsWhite(value)) {
1676     weak_objects_.next_ephemerons.Push(kMainThread, Ephemeron{key, value});
1677   }
1678 
1679   return false;
1680 }
1681 
ProcessEphemeronMarking()1682 void MarkCompactCollector::ProcessEphemeronMarking() {
1683   DCHECK(marking_worklist()->IsEmpty());
1684 
1685   // Incremental marking might leave ephemerons in main task's local
1686   // buffer, flush it into global pool.
1687   weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
1688 
1689   ProcessEphemeronsUntilFixpoint();
1690 
1691   CHECK(marking_worklist()->IsEmpty());
1692   CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
1693 }
1694 
ProcessTopOptimizedFrame(ObjectVisitor * visitor)1695 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1696   for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1697        !it.done(); it.Advance()) {
1698     if (it.frame()->type() == StackFrame::INTERPRETED) {
1699       return;
1700     }
1701     if (it.frame()->type() == StackFrame::OPTIMIZED) {
1702       Code* code = it.frame()->LookupCode();
1703       if (!code->CanDeoptAt(it.frame()->pc())) {
1704         Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
1705       }
1706       return;
1707     }
1708   }
1709 }
1710 
RecordObjectStats()1711 void MarkCompactCollector::RecordObjectStats() {
1712   if (V8_UNLIKELY(FLAG_gc_stats)) {
1713     heap()->CreateObjectStats();
1714     ObjectStatsCollector collector(heap(), heap()->live_object_stats_,
1715                                    heap()->dead_object_stats_);
1716     collector.Collect();
1717     if (V8_UNLIKELY(FLAG_gc_stats &
1718                     v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
1719       std::stringstream live, dead;
1720       heap()->live_object_stats_->Dump(live);
1721       heap()->dead_object_stats_->Dump(dead);
1722       TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
1723                            "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
1724                            "live", TRACE_STR_COPY(live.str().c_str()), "dead",
1725                            TRACE_STR_COPY(dead.str().c_str()));
1726     }
1727     if (FLAG_trace_gc_object_stats) {
1728       heap()->live_object_stats_->PrintJSON("live");
1729       heap()->dead_object_stats_->PrintJSON("dead");
1730     }
1731     heap()->live_object_stats_->CheckpointObjectStats();
1732     heap()->dead_object_stats_->ClearObjectStats();
1733   }
1734 }
1735 
MarkLiveObjects()1736 void MarkCompactCollector::MarkLiveObjects() {
1737   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
1738   // The recursive GC marker detects when it is nearing stack overflow,
1739   // and switches to a different marking system.  JS interrupts interfere
1740   // with the C stack limit check.
1741   PostponeInterruptsScope postpone(isolate());
1742 
1743   {
1744     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
1745     IncrementalMarking* incremental_marking = heap_->incremental_marking();
1746     if (was_marked_incrementally_) {
1747       incremental_marking->Finalize();
1748     } else {
1749       CHECK(incremental_marking->IsStopped());
1750     }
1751   }
1752 
1753 #ifdef DEBUG
1754   DCHECK(state_ == PREPARE_GC);
1755   state_ = MARK_LIVE_OBJECTS;
1756 #endif
1757 
1758   heap_->local_embedder_heap_tracer()->EnterFinalPause();
1759 
1760   RootMarkingVisitor root_visitor(this);
1761 
1762   {
1763     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
1764     CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
1765     MarkRoots(&root_visitor, &custom_root_body_visitor);
1766   }
1767 
1768   {
1769     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
1770     if (FLAG_parallel_marking) {
1771       DCHECK(FLAG_concurrent_marking);
1772       heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1773     }
1774     ProcessMarkingWorklist();
1775 
1776     FinishConcurrentMarking(
1777         ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
1778     ProcessMarkingWorklist();
1779   }
1780 
1781   {
1782     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
1783 
1784     DCHECK(marking_worklist()->IsEmpty());
1785 
1786     // Mark objects reachable through the embedder heap. This phase is
1787     // opportunistic as it may not discover graphs that are only reachable
1788     // through ephemerons.
1789     {
1790       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPERS);
1791       while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone()) {
1792         PerformWrapperTracing();
1793         ProcessMarkingWorklist();
1794       }
1795       DCHECK(marking_worklist()->IsEmpty());
1796     }
1797 
1798     // The objects reachable from the roots are marked, yet unreachable objects
1799     // are unmarked. Mark objects reachable due to embedder heap tracing or
1800     // harmony weak maps.
1801     {
1802       TRACE_GC(heap()->tracer(),
1803                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
1804       ProcessEphemeronMarking();
1805       DCHECK(marking_worklist()->IsEmpty());
1806     }
1807 
1808     // The objects reachable from the roots, weak maps, and embedder heap
1809     // tracing are marked. Objects pointed to only by weak global handles cannot
1810     // be immediately reclaimed. Instead, we have to mark them as pending and
1811     // mark objects reachable from them.
1812     //
1813     // First we identify nonlive weak handles and mark them as pending
1814     // destruction.
1815     {
1816       TRACE_GC(heap()->tracer(),
1817                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
1818       heap()->isolate()->global_handles()->IdentifyWeakHandles(
1819           &IsUnmarkedHeapObject);
1820       ProcessMarkingWorklist();
1821     }
1822 
1823     // Process finalizers, effectively keeping them alive until the next
1824     // garbage collection.
1825     {
1826       TRACE_GC(heap()->tracer(),
1827                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
1828       heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
1829           &root_visitor);
1830       ProcessMarkingWorklist();
1831     }
1832 
1833     // Repeat ephemeron processing from the newly marked objects.
1834     {
1835       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
1836       ProcessEphemeronMarking();
1837       {
1838         TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
1839         heap()->local_embedder_heap_tracer()->TraceEpilogue();
1840       }
1841       DCHECK(marking_worklist()->IsEmpty());
1842     }
1843 
1844     {
1845       heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
1846           &IsUnmarkedHeapObject);
1847     }
1848   }
1849 
1850   if (was_marked_incrementally_) {
1851     heap()->incremental_marking()->Deactivate();
1852   }
1853 }
1854 
ClearNonLiveReferences()1855 void MarkCompactCollector::ClearNonLiveReferences() {
1856   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
1857 
1858   {
1859     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
1860 
1861     // Prune the string table removing all strings only pointed to by the
1862     // string table.  Cannot use string_table() here because the string
1863     // table is marked.
1864     StringTable* string_table = heap()->string_table();
1865     InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
1866     string_table->IterateElements(&internalized_visitor);
1867     string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
1868 
1869     ExternalStringTableCleaner external_visitor(heap());
1870     heap()->external_string_table_.IterateAll(&external_visitor);
1871     heap()->external_string_table_.CleanUpAll();
1872   }
1873 
1874   {
1875     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
1876     // Process the weak references.
1877     MarkCompactWeakObjectRetainer mark_compact_object_retainer(
1878         non_atomic_marking_state());
1879     heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
1880   }
1881 
1882   {
1883     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
1884     // ClearFullMapTransitions must be called before weak references are
1885     // cleared.
1886     ClearFullMapTransitions();
1887   }
1888   ClearWeakReferences();
1889   MarkDependentCodeForDeoptimization();
1890 
1891   ClearWeakCollections();
1892 
1893   DCHECK(weak_objects_.transition_arrays.IsEmpty());
1894   DCHECK(weak_objects_.weak_references.IsEmpty());
1895   DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
1896 }
1897 
MarkDependentCodeForDeoptimization()1898 void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
1899   std::pair<HeapObject*, Code*> weak_object_in_code;
1900   while (weak_objects_.weak_objects_in_code.Pop(kMainThread,
1901                                                 &weak_object_in_code)) {
1902     HeapObject* object = weak_object_in_code.first;
1903     Code* code = weak_object_in_code.second;
1904     if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
1905         !code->marked_for_deoptimization()) {
1906       code->SetMarkedForDeoptimization("weak objects");
1907       code->InvalidateEmbeddedObjects(heap_);
1908       have_code_to_deoptimize_ = true;
1909     }
1910   }
1911 }
1912 
ClearPotentialSimpleMapTransition(Map * dead_target)1913 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* dead_target) {
1914   DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
1915   Object* potential_parent = dead_target->constructor_or_backpointer();
1916   if (potential_parent->IsMap()) {
1917     Map* parent = Map::cast(potential_parent);
1918     DisallowHeapAllocation no_gc_obviously;
1919     if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
1920         TransitionsAccessor(isolate(), parent, &no_gc_obviously)
1921             .HasSimpleTransitionTo(dead_target)) {
1922       ClearPotentialSimpleMapTransition(parent, dead_target);
1923     }
1924   }
1925 }
1926 
ClearPotentialSimpleMapTransition(Map * map,Map * dead_target)1927 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map* map,
1928                                                              Map* dead_target) {
1929   DCHECK(!map->is_prototype_map());
1930   DCHECK(!dead_target->is_prototype_map());
1931   DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
1932   // Take ownership of the descriptor array.
1933   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
1934   DescriptorArray* descriptors = map->instance_descriptors();
1935   if (descriptors == dead_target->instance_descriptors() &&
1936       number_of_own_descriptors > 0) {
1937     TrimDescriptorArray(map, descriptors);
1938     DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
1939   }
1940 }
1941 
ClearFullMapTransitions()1942 void MarkCompactCollector::ClearFullMapTransitions() {
1943   TransitionArray* array;
1944   while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
1945     int num_transitions = array->number_of_entries();
1946     if (num_transitions > 0) {
1947       Map* map;
1948       // The array might contain "undefined" elements because it's not yet
1949       // filled. Allow it.
1950       if (array->GetTargetIfExists(0, isolate(), &map)) {
1951         DCHECK_NOT_NULL(map);  // Weak pointers aren't cleared yet.
1952         Map* parent = Map::cast(map->constructor_or_backpointer());
1953         bool parent_is_alive =
1954             non_atomic_marking_state()->IsBlackOrGrey(parent);
1955         DescriptorArray* descriptors =
1956             parent_is_alive ? parent->instance_descriptors() : nullptr;
1957         bool descriptors_owner_died =
1958             CompactTransitionArray(parent, array, descriptors);
1959         if (descriptors_owner_died) {
1960           TrimDescriptorArray(parent, descriptors);
1961         }
1962       }
1963     }
1964   }
1965 }
1966 
CompactTransitionArray(Map * map,TransitionArray * transitions,DescriptorArray * descriptors)1967 bool MarkCompactCollector::CompactTransitionArray(
1968     Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
1969   DCHECK(!map->is_prototype_map());
1970   int num_transitions = transitions->number_of_entries();
1971   bool descriptors_owner_died = false;
1972   int transition_index = 0;
1973   // Compact all live transitions to the left.
1974   for (int i = 0; i < num_transitions; ++i) {
1975     Map* target = transitions->GetTarget(i);
1976     DCHECK_EQ(target->constructor_or_backpointer(), map);
1977     if (non_atomic_marking_state()->IsWhite(target)) {
1978       if (descriptors != nullptr &&
1979           target->instance_descriptors() == descriptors) {
1980         DCHECK(!target->is_prototype_map());
1981         descriptors_owner_died = true;
1982       }
1983     } else {
1984       if (i != transition_index) {
1985         Name* key = transitions->GetKey(i);
1986         transitions->SetKey(transition_index, key);
1987         HeapObjectReference** key_slot =
1988             transitions->GetKeySlot(transition_index);
1989         RecordSlot(transitions, key_slot, key);
1990         MaybeObject* raw_target = transitions->GetRawTarget(i);
1991         transitions->SetRawTarget(transition_index, raw_target);
1992         HeapObjectReference** target_slot =
1993             transitions->GetTargetSlot(transition_index);
1994         RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
1995       }
1996       transition_index++;
1997     }
1998   }
1999   // If there are no transitions to be cleared, return.
2000   if (transition_index == num_transitions) {
2001     DCHECK(!descriptors_owner_died);
2002     return false;
2003   }
2004   // Note that we never eliminate a transition array, though we might right-trim
2005   // such that number_of_transitions() == 0. If this assumption changes,
2006   // TransitionArray::Insert() will need to deal with the case that a transition
2007   // array disappeared during GC.
2008   int trim = transitions->Capacity() - transition_index;
2009   if (trim > 0) {
2010     heap_->RightTrimWeakFixedArray(transitions,
2011                                    trim * TransitionArray::kEntrySize);
2012     transitions->SetNumberOfTransitions(transition_index);
2013   }
2014   return descriptors_owner_died;
2015 }
2016 
TrimDescriptorArray(Map * map,DescriptorArray * descriptors)2017 void MarkCompactCollector::TrimDescriptorArray(Map* map,
2018                                                DescriptorArray* descriptors) {
2019   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2020   if (number_of_own_descriptors == 0) {
2021     DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
2022     return;
2023   }
2024 
2025   int number_of_descriptors = descriptors->number_of_descriptors_storage();
2026   int to_trim = number_of_descriptors - number_of_own_descriptors;
2027   if (to_trim > 0) {
2028     heap_->RightTrimWeakFixedArray(descriptors,
2029                                    to_trim * DescriptorArray::kEntrySize);
2030     descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2031 
2032     TrimEnumCache(map, descriptors);
2033     descriptors->Sort();
2034 
2035     if (FLAG_unbox_double_fields) {
2036       LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2037       layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2038                                                   number_of_own_descriptors);
2039       SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2040     }
2041   }
2042   DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2043   map->set_owns_descriptors(true);
2044 }
2045 
TrimEnumCache(Map * map,DescriptorArray * descriptors)2046 void MarkCompactCollector::TrimEnumCache(Map* map,
2047                                          DescriptorArray* descriptors) {
2048   int live_enum = map->EnumLength();
2049   if (live_enum == kInvalidEnumCacheSentinel) {
2050     live_enum = map->NumberOfEnumerableProperties();
2051   }
2052   if (live_enum == 0) return descriptors->ClearEnumCache();
2053   EnumCache* enum_cache = descriptors->GetEnumCache();
2054 
2055   FixedArray* keys = enum_cache->keys();
2056   int to_trim = keys->length() - live_enum;
2057   if (to_trim <= 0) return;
2058   heap_->RightTrimFixedArray(keys, to_trim);
2059 
2060   FixedArray* indices = enum_cache->indices();
2061   to_trim = indices->length() - live_enum;
2062   if (to_trim <= 0) return;
2063   heap_->RightTrimFixedArray(indices, to_trim);
2064 }
2065 
ClearWeakCollections()2066 void MarkCompactCollector::ClearWeakCollections() {
2067   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2068   EphemeronHashTable* table;
2069 
2070   while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) {
2071     for (int i = 0; i < table->Capacity(); i++) {
2072       HeapObject* key = HeapObject::cast(table->KeyAt(i));
2073 #ifdef VERIFY_HEAP
2074       Object* value = table->ValueAt(i);
2075 
2076       if (value->IsHeapObject()) {
2077         CHECK_IMPLIES(
2078             non_atomic_marking_state()->IsBlackOrGrey(key),
2079             non_atomic_marking_state()->IsBlackOrGrey(HeapObject::cast(value)));
2080       }
2081 #endif
2082       if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
2083         table->RemoveEntry(i);
2084       }
2085     }
2086   }
2087 }
2088 
ClearWeakReferences()2089 void MarkCompactCollector::ClearWeakReferences() {
2090   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
2091   std::pair<HeapObject*, HeapObjectReference**> slot;
2092   while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
2093     HeapObject* value;
2094     HeapObjectReference** location = slot.second;
2095     if ((*location)->ToWeakHeapObject(&value)) {
2096       DCHECK(!value->IsCell());
2097       if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
2098         // The value of the weak reference is alive.
2099         RecordSlot(slot.first, location, value);
2100       } else {
2101         if (value->IsMap()) {
2102           // The map is non-live.
2103           ClearPotentialSimpleMapTransition(Map::cast(value));
2104         }
2105         *location = HeapObjectReference::ClearedValue();
2106       }
2107     }
2108   }
2109 }
2110 
AbortWeakObjects()2111 void MarkCompactCollector::AbortWeakObjects() {
2112   weak_objects_.transition_arrays.Clear();
2113   weak_objects_.ephemeron_hash_tables.Clear();
2114   weak_objects_.current_ephemerons.Clear();
2115   weak_objects_.next_ephemerons.Clear();
2116   weak_objects_.discovered_ephemerons.Clear();
2117   weak_objects_.weak_references.Clear();
2118   weak_objects_.weak_objects_in_code.Clear();
2119 }
2120 
RecordRelocSlot(Code * host,RelocInfo * rinfo,Object * target)2121 void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
2122                                            Object* target) {
2123   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
2124   Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
2125   if (target_page->IsEvacuationCandidate() &&
2126       (rinfo->host() == nullptr ||
2127        !source_page->ShouldSkipEvacuationSlotRecording())) {
2128     RelocInfo::Mode rmode = rinfo->rmode();
2129     Address addr = rinfo->pc();
2130     SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
2131     if (rinfo->IsInConstantPool()) {
2132       addr = rinfo->constant_pool_entry_address();
2133       if (RelocInfo::IsCodeTargetMode(rmode)) {
2134         slot_type = CODE_ENTRY_SLOT;
2135       } else {
2136         DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2137         slot_type = OBJECT_SLOT;
2138       }
2139     }
2140     RememberedSet<OLD_TO_OLD>::InsertTyped(
2141         source_page, reinterpret_cast<Address>(host), slot_type, addr);
2142   }
2143 }
2144 
2145 template <AccessMode access_mode>
UpdateSlot(MaybeObject ** slot,MaybeObject * old,HeapObject * heap_obj,HeapObjectReferenceType reference_type)2146 static inline SlotCallbackResult UpdateSlot(
2147     MaybeObject** slot, MaybeObject* old, HeapObject* heap_obj,
2148     HeapObjectReferenceType reference_type) {
2149   MapWord map_word = heap_obj->map_word();
2150   if (map_word.IsForwardingAddress()) {
2151     DCHECK(Heap::InFromSpace(heap_obj) ||
2152            MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2153            Page::FromAddress(heap_obj->address())
2154                ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2155     MaybeObject* target =
2156         reference_type == HeapObjectReferenceType::WEAK
2157             ? HeapObjectReference::Weak(map_word.ToForwardingAddress())
2158             : HeapObjectReference::Strong(map_word.ToForwardingAddress());
2159     if (access_mode == AccessMode::NON_ATOMIC) {
2160       *slot = target;
2161     } else {
2162       base::AsAtomicPointer::Release_CompareAndSwap(slot, old, target);
2163     }
2164     DCHECK(!Heap::InFromSpace(target));
2165     DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
2166   } else {
2167     DCHECK(heap_obj->map()->IsMap());
2168   }
2169   // OLD_TO_OLD slots are always removed after updating.
2170   return REMOVE_SLOT;
2171 }
2172 
2173 template <AccessMode access_mode>
UpdateSlot(MaybeObject ** slot)2174 static inline SlotCallbackResult UpdateSlot(MaybeObject** slot) {
2175   MaybeObject* obj = base::AsAtomicPointer::Relaxed_Load(slot);
2176   HeapObject* heap_obj;
2177   if (obj->ToWeakHeapObject(&heap_obj)) {
2178     UpdateSlot<access_mode>(slot, obj, heap_obj, HeapObjectReferenceType::WEAK);
2179   } else if (obj->ToStrongHeapObject(&heap_obj)) {
2180     return UpdateSlot<access_mode>(slot, obj, heap_obj,
2181                                    HeapObjectReferenceType::STRONG);
2182   }
2183   return REMOVE_SLOT;
2184 }
2185 
2186 template <AccessMode access_mode>
UpdateStrongSlot(MaybeObject ** maybe_slot)2187 static inline SlotCallbackResult UpdateStrongSlot(MaybeObject** maybe_slot) {
2188   DCHECK((*maybe_slot)->IsSmi() || (*maybe_slot)->IsStrongHeapObject());
2189   Object** slot = reinterpret_cast<Object**>(maybe_slot);
2190   Object* obj = base::AsAtomicPointer::Relaxed_Load(slot);
2191   if (obj->IsHeapObject()) {
2192     HeapObject* heap_obj = HeapObject::cast(obj);
2193     return UpdateSlot<access_mode>(maybe_slot, MaybeObject::FromObject(obj),
2194                                    heap_obj, HeapObjectReferenceType::STRONG);
2195   }
2196   return REMOVE_SLOT;
2197 }
2198 
2199 // Visitor for updating root pointers and to-space pointers.
2200 // It does not expect to encounter pointers to dead objects.
2201 // TODO(ulan): Remove code object specific functions. This visitor
2202 // nevers visits code objects.
2203 class PointersUpdatingVisitor : public ObjectVisitor, public RootVisitor {
2204  public:
PointersUpdatingVisitor(Heap * heap)2205   explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2206 
VisitPointer(HeapObject * host,Object ** p)2207   void VisitPointer(HeapObject* host, Object** p) override {
2208     UpdateStrongSlotInternal(p);
2209   }
2210 
VisitPointer(HeapObject * host,MaybeObject ** p)2211   void VisitPointer(HeapObject* host, MaybeObject** p) override {
2212     UpdateSlotInternal(p);
2213   }
2214 
VisitPointers(HeapObject * host,Object ** start,Object ** end)2215   void VisitPointers(HeapObject* host, Object** start, Object** end) override {
2216     for (Object** p = start; p < end; p++) {
2217       UpdateStrongSlotInternal(p);
2218     }
2219   }
2220 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)2221   void VisitPointers(HeapObject* host, MaybeObject** start,
2222                      MaybeObject** end) final {
2223     for (MaybeObject** p = start; p < end; p++) {
2224       UpdateSlotInternal(p);
2225     }
2226   }
2227 
VisitRootPointer(Root root,const char * description,Object ** p)2228   void VisitRootPointer(Root root, const char* description,
2229                         Object** p) override {
2230     UpdateStrongSlotInternal(p);
2231   }
2232 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)2233   void VisitRootPointers(Root root, const char* description, Object** start,
2234                          Object** end) override {
2235     for (Object** p = start; p < end; p++) UpdateStrongSlotInternal(p);
2236   }
2237 
VisitEmbeddedPointer(Code * host,RelocInfo * rinfo)2238   void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) override {
2239     UpdateTypedSlotHelper::UpdateEmbeddedPointer(
2240         heap_, rinfo, UpdateStrongMaybeObjectSlotInternal);
2241   }
2242 
VisitCodeTarget(Code * host,RelocInfo * rinfo)2243   void VisitCodeTarget(Code* host, RelocInfo* rinfo) override {
2244     UpdateTypedSlotHelper::UpdateCodeTarget(
2245         rinfo, UpdateStrongMaybeObjectSlotInternal);
2246   }
2247 
2248  private:
UpdateStrongMaybeObjectSlotInternal(MaybeObject ** slot)2249   static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
2250       MaybeObject** slot) {
2251     DCHECK(!(*slot)->IsWeakHeapObject());
2252     DCHECK(!(*slot)->IsClearedWeakHeapObject());
2253     return UpdateStrongSlotInternal(reinterpret_cast<Object**>(slot));
2254   }
2255 
UpdateStrongSlotInternal(Object ** slot)2256   static inline SlotCallbackResult UpdateStrongSlotInternal(Object** slot) {
2257     DCHECK(!HasWeakHeapObjectTag(*slot));
2258     return UpdateStrongSlot<AccessMode::NON_ATOMIC>(
2259         reinterpret_cast<MaybeObject**>(slot));
2260   }
2261 
UpdateSlotInternal(MaybeObject ** slot)2262   static inline SlotCallbackResult UpdateSlotInternal(MaybeObject** slot) {
2263     return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
2264   }
2265 
2266   Heap* heap_;
2267 };
2268 
UpdateReferenceInExternalStringTableEntry(Heap * heap,Object ** p)2269 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2270                                                          Object** p) {
2271   MapWord map_word = HeapObject::cast(*p)->map_word();
2272 
2273   if (map_word.IsForwardingAddress()) {
2274     String* new_string = String::cast(map_word.ToForwardingAddress());
2275 
2276     if (new_string->IsExternalString()) {
2277       heap->ProcessMovedExternalString(
2278           Page::FromAddress(reinterpret_cast<Address>(*p)),
2279           Page::FromHeapObject(new_string), ExternalString::cast(new_string));
2280     }
2281     return new_string;
2282   }
2283 
2284   return String::cast(*p);
2285 }
2286 
EvacuatePrologue()2287 void MarkCompactCollector::EvacuatePrologue() {
2288   // New space.
2289   NewSpace* new_space = heap()->new_space();
2290   // Append the list of new space pages to be processed.
2291   for (Page* p :
2292        PageRange(new_space->first_allocatable_address(), new_space->top())) {
2293     new_space_evacuation_pages_.push_back(p);
2294   }
2295   new_space->Flip();
2296   new_space->ResetLinearAllocationArea();
2297 
2298   // Old space.
2299   DCHECK(old_space_evacuation_pages_.empty());
2300   old_space_evacuation_pages_ = std::move(evacuation_candidates_);
2301   evacuation_candidates_.clear();
2302   DCHECK(evacuation_candidates_.empty());
2303 }
2304 
EvacuateEpilogue()2305 void MarkCompactCollector::EvacuateEpilogue() {
2306   aborted_evacuation_candidates_.clear();
2307   // New space.
2308   heap()->new_space()->set_age_mark(heap()->new_space()->top());
2309   // Deallocate unmarked large objects.
2310   heap()->lo_space()->FreeUnmarkedObjects();
2311   // Old space. Deallocate evacuated candidate pages.
2312   ReleaseEvacuationCandidates();
2313   // Give pages that are queued to be freed back to the OS.
2314   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2315 #ifdef DEBUG
2316   // Old-to-old slot sets must be empty after evacuation.
2317   for (Page* p : *heap()->old_space()) {
2318     DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2319     DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2320     DCHECK_NULL(p->invalidated_slots());
2321   }
2322 #endif
2323 }
2324 
2325 class Evacuator : public Malloced {
2326  public:
2327   enum EvacuationMode {
2328     kObjectsNewToOld,
2329     kPageNewToOld,
2330     kObjectsOldToOld,
2331     kPageNewToNew,
2332   };
2333 
ComputeEvacuationMode(MemoryChunk * chunk)2334   static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
2335     // Note: The order of checks is important in this function.
2336     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
2337       return kPageNewToOld;
2338     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
2339       return kPageNewToNew;
2340     if (chunk->InNewSpace()) return kObjectsNewToOld;
2341     return kObjectsOldToOld;
2342   }
2343 
2344   // NewSpacePages with more live bytes than this threshold qualify for fast
2345   // evacuation.
PageEvacuationThreshold()2346   static int PageEvacuationThreshold() {
2347     if (FLAG_page_promotion)
2348       return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
2349     return Page::kAllocatableMemory + kPointerSize;
2350   }
2351 
Evacuator(Heap * heap,RecordMigratedSlotVisitor * record_visitor)2352   Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
2353       : heap_(heap),
2354         local_allocator_(heap_),
2355         local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
2356         new_space_visitor_(heap_, &local_allocator_, record_visitor,
2357                            &local_pretenuring_feedback_),
2358         new_to_new_page_visitor_(heap_, record_visitor,
2359                                  &local_pretenuring_feedback_),
2360         new_to_old_page_visitor_(heap_, record_visitor,
2361                                  &local_pretenuring_feedback_),
2362 
2363         old_space_visitor_(heap_, &local_allocator_, record_visitor),
2364         duration_(0.0),
2365         bytes_compacted_(0) {}
2366 
~Evacuator()2367   virtual ~Evacuator() {}
2368 
2369   void EvacuatePage(Page* page);
2370 
AddObserver(MigrationObserver * observer)2371   void AddObserver(MigrationObserver* observer) {
2372     new_space_visitor_.AddObserver(observer);
2373     old_space_visitor_.AddObserver(observer);
2374   }
2375 
2376   // Merge back locally cached info sequentially. Note that this method needs
2377   // to be called from the main thread.
2378   inline void Finalize();
2379 
2380   virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
2381 
2382  protected:
2383   static const int kInitialLocalPretenuringFeedbackCapacity = 256;
2384 
2385   // |saved_live_bytes| returns the live bytes of the page that was processed.
2386   virtual void RawEvacuatePage(Page* page, intptr_t* saved_live_bytes) = 0;
2387 
heap()2388   inline Heap* heap() { return heap_; }
2389 
ReportCompactionProgress(double duration,intptr_t bytes_compacted)2390   void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
2391     duration_ += duration;
2392     bytes_compacted_ += bytes_compacted;
2393   }
2394 
2395   Heap* heap_;
2396 
2397   // Locally cached collector data.
2398   LocalAllocator local_allocator_;
2399   Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
2400 
2401   // Visitors for the corresponding spaces.
2402   EvacuateNewSpaceVisitor new_space_visitor_;
2403   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
2404       new_to_new_page_visitor_;
2405   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
2406       new_to_old_page_visitor_;
2407   EvacuateOldSpaceVisitor old_space_visitor_;
2408 
2409   // Book keeping info.
2410   double duration_;
2411   intptr_t bytes_compacted_;
2412 };
2413 
EvacuatePage(Page * page)2414 void Evacuator::EvacuatePage(Page* page) {
2415   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
2416   DCHECK(page->SweepingDone());
2417   intptr_t saved_live_bytes = 0;
2418   double evacuation_time = 0.0;
2419   {
2420     AlwaysAllocateScope always_allocate(heap()->isolate());
2421     TimedScope timed_scope(&evacuation_time);
2422     RawEvacuatePage(page, &saved_live_bytes);
2423   }
2424   ReportCompactionProgress(evacuation_time, saved_live_bytes);
2425   if (FLAG_trace_evacuation) {
2426     PrintIsolate(
2427         heap()->isolate(),
2428         "evacuation[%p]: page=%p new_space=%d "
2429         "page_evacuation=%d executable=%d contains_age_mark=%d "
2430         "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
2431         static_cast<void*>(this), static_cast<void*>(page), page->InNewSpace(),
2432         page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
2433             page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
2434         page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
2435         page->Contains(heap()->new_space()->age_mark()), saved_live_bytes,
2436         evacuation_time, page->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2437   }
2438 }
2439 
Finalize()2440 void Evacuator::Finalize() {
2441   local_allocator_.Finalize();
2442   heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
2443   heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
2444                                        new_to_old_page_visitor_.moved_bytes());
2445   heap()->IncrementSemiSpaceCopiedObjectSize(
2446       new_space_visitor_.semispace_copied_size() +
2447       new_to_new_page_visitor_.moved_bytes());
2448   heap()->IncrementYoungSurvivorsCounter(
2449       new_space_visitor_.promoted_size() +
2450       new_space_visitor_.semispace_copied_size() +
2451       new_to_old_page_visitor_.moved_bytes() +
2452       new_to_new_page_visitor_.moved_bytes());
2453   heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
2454 }
2455 
2456 class FullEvacuator : public Evacuator {
2457  public:
FullEvacuator(MarkCompactCollector * collector,RecordMigratedSlotVisitor * record_visitor)2458   FullEvacuator(MarkCompactCollector* collector,
2459                 RecordMigratedSlotVisitor* record_visitor)
2460       : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
2461 
GetBackgroundTracingScope()2462   GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
2463     return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
2464   }
2465 
2466  protected:
2467   void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
2468 
2469   MarkCompactCollector* collector_;
2470 };
2471 
RawEvacuatePage(Page * page,intptr_t * live_bytes)2472 void FullEvacuator::RawEvacuatePage(Page* page, intptr_t* live_bytes) {
2473   const EvacuationMode evacuation_mode = ComputeEvacuationMode(page);
2474   TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2475                "FullEvacuator::RawEvacuatePage", "evacuation_mode",
2476                evacuation_mode);
2477   MarkCompactCollector::NonAtomicMarkingState* marking_state =
2478       collector_->non_atomic_marking_state();
2479   *live_bytes = marking_state->live_bytes(page);
2480   HeapObject* failed_object = nullptr;
2481   switch (evacuation_mode) {
2482     case kObjectsNewToOld:
2483       LiveObjectVisitor::VisitBlackObjectsNoFail(
2484           page, marking_state, &new_space_visitor_,
2485           LiveObjectVisitor::kClearMarkbits);
2486       // ArrayBufferTracker will be updated during pointers updating.
2487       break;
2488     case kPageNewToOld:
2489       LiveObjectVisitor::VisitBlackObjectsNoFail(
2490           page, marking_state, &new_to_old_page_visitor_,
2491           LiveObjectVisitor::kKeepMarking);
2492       new_to_old_page_visitor_.account_moved_bytes(
2493           marking_state->live_bytes(page));
2494       // ArrayBufferTracker will be updated during sweeping.
2495       break;
2496     case kPageNewToNew:
2497       LiveObjectVisitor::VisitBlackObjectsNoFail(
2498           page, marking_state, &new_to_new_page_visitor_,
2499           LiveObjectVisitor::kKeepMarking);
2500       new_to_new_page_visitor_.account_moved_bytes(
2501           marking_state->live_bytes(page));
2502       // ArrayBufferTracker will be updated during sweeping.
2503       break;
2504     case kObjectsOldToOld: {
2505       const bool success = LiveObjectVisitor::VisitBlackObjects(
2506           page, marking_state, &old_space_visitor_,
2507           LiveObjectVisitor::kClearMarkbits, &failed_object);
2508       if (!success) {
2509         // Aborted compaction page. Actual processing happens on the main
2510         // thread for simplicity reasons.
2511         collector_->ReportAbortedEvacuationCandidate(failed_object, page);
2512       } else {
2513         // ArrayBufferTracker will be updated during pointers updating.
2514       }
2515       break;
2516     }
2517   }
2518 }
2519 
2520 class PageEvacuationItem : public ItemParallelJob::Item {
2521  public:
PageEvacuationItem(Page * page)2522   explicit PageEvacuationItem(Page* page) : page_(page) {}
~PageEvacuationItem()2523   virtual ~PageEvacuationItem() {}
page() const2524   Page* page() const { return page_; }
2525 
2526  private:
2527   Page* page_;
2528 };
2529 
2530 class PageEvacuationTask : public ItemParallelJob::Task {
2531  public:
PageEvacuationTask(Isolate * isolate,Evacuator * evacuator)2532   PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
2533       : ItemParallelJob::Task(isolate),
2534         evacuator_(evacuator),
2535         tracer_(isolate->heap()->tracer()) {}
2536 
RunInParallel()2537   void RunInParallel() override {
2538     TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
2539     PageEvacuationItem* item = nullptr;
2540     while ((item = GetItem<PageEvacuationItem>()) != nullptr) {
2541       evacuator_->EvacuatePage(item->page());
2542       item->MarkFinished();
2543     }
2544   };
2545 
2546  private:
2547   Evacuator* evacuator_;
2548   GCTracer* tracer_;
2549 };
2550 
2551 template <class Evacuator, class Collector>
CreateAndExecuteEvacuationTasks(Collector * collector,ItemParallelJob * job,RecordMigratedSlotVisitor * record_visitor,MigrationObserver * migration_observer,const intptr_t live_bytes)2552 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
2553     Collector* collector, ItemParallelJob* job,
2554     RecordMigratedSlotVisitor* record_visitor,
2555     MigrationObserver* migration_observer, const intptr_t live_bytes) {
2556   // Used for trace summary.
2557   double compaction_speed = 0;
2558   if (FLAG_trace_evacuation) {
2559     compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
2560   }
2561 
2562   const bool profiling =
2563       heap()->isolate()->is_profiling() ||
2564       heap()->isolate()->logger()->is_listening_to_code_events() ||
2565       heap()->isolate()->heap_profiler()->is_tracking_object_moves() ||
2566       heap()->has_heap_object_allocation_tracker();
2567   ProfilingMigrationObserver profiling_observer(heap());
2568 
2569   const int wanted_num_tasks =
2570       NumberOfParallelCompactionTasks(job->NumberOfItems());
2571   Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
2572   for (int i = 0; i < wanted_num_tasks; i++) {
2573     evacuators[i] = new Evacuator(collector, record_visitor);
2574     if (profiling) evacuators[i]->AddObserver(&profiling_observer);
2575     if (migration_observer != nullptr)
2576       evacuators[i]->AddObserver(migration_observer);
2577     job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
2578   }
2579   job->Run(isolate()->async_counters());
2580   for (int i = 0; i < wanted_num_tasks; i++) {
2581     evacuators[i]->Finalize();
2582     delete evacuators[i];
2583   }
2584   delete[] evacuators;
2585 
2586   if (FLAG_trace_evacuation) {
2587     PrintIsolate(isolate(),
2588                  "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
2589                  "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR
2590                  " compaction_speed=%.f\n",
2591                  isolate()->time_millis_since_init(),
2592                  FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
2593                  wanted_num_tasks, job->NumberOfTasks(),
2594                  V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
2595                  live_bytes, compaction_speed);
2596   }
2597 }
2598 
ShouldMovePage(Page * p,intptr_t live_bytes)2599 bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
2600   const bool reduce_memory = heap()->ShouldReduceMemory();
2601   const Address age_mark = heap()->new_space()->age_mark();
2602   return !reduce_memory && !p->NeverEvacuate() &&
2603          (live_bytes > Evacuator::PageEvacuationThreshold()) &&
2604          !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
2605 }
2606 
EvacuatePagesInParallel()2607 void MarkCompactCollector::EvacuatePagesInParallel() {
2608   ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
2609                                  &page_parallel_job_semaphore_);
2610   intptr_t live_bytes = 0;
2611 
2612   for (Page* page : old_space_evacuation_pages_) {
2613     live_bytes += non_atomic_marking_state()->live_bytes(page);
2614     evacuation_job.AddItem(new PageEvacuationItem(page));
2615   }
2616 
2617   for (Page* page : new_space_evacuation_pages_) {
2618     intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
2619     if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
2620     live_bytes += live_bytes_on_page;
2621     if (ShouldMovePage(page, live_bytes_on_page)) {
2622       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
2623         EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
2624         DCHECK_EQ(heap()->old_space(), page->owner());
2625         // The move added page->allocated_bytes to the old space, but we are
2626         // going to sweep the page and add page->live_byte_count.
2627         heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
2628                                                     page);
2629       } else {
2630         EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
2631       }
2632     }
2633     evacuation_job.AddItem(new PageEvacuationItem(page));
2634   }
2635   if (evacuation_job.NumberOfItems() == 0) return;
2636 
2637   RecordMigratedSlotVisitor record_visitor(this);
2638   CreateAndExecuteEvacuationTasks<FullEvacuator>(
2639       this, &evacuation_job, &record_visitor, nullptr, live_bytes);
2640   PostProcessEvacuationCandidates();
2641 }
2642 
2643 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
2644  public:
RetainAs(Object * object)2645   virtual Object* RetainAs(Object* object) {
2646     if (object->IsHeapObject()) {
2647       HeapObject* heap_object = HeapObject::cast(object);
2648       MapWord map_word = heap_object->map_word();
2649       if (map_word.IsForwardingAddress()) {
2650         return map_word.ToForwardingAddress();
2651       }
2652     }
2653     return object;
2654   }
2655 };
2656 
2657 // Return true if the given code is deoptimized or will be deoptimized.
WillBeDeoptimized(Code * code)2658 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
2659   return code->is_optimized_code() && code->marked_for_deoptimization();
2660 }
2661 
RecordLiveSlotsOnPage(Page * page)2662 void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
2663   EvacuateRecordOnlyVisitor visitor(heap());
2664   LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
2665                                              &visitor,
2666                                              LiveObjectVisitor::kKeepMarking);
2667 }
2668 
2669 template <class Visitor, typename MarkingState>
VisitBlackObjects(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode,HeapObject ** failed_object)2670 bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
2671                                           MarkingState* marking_state,
2672                                           Visitor* visitor,
2673                                           IterationMode iteration_mode,
2674                                           HeapObject** failed_object) {
2675   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2676                "LiveObjectVisitor::VisitBlackObjects");
2677   for (auto object_and_size :
2678        LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
2679     HeapObject* const object = object_and_size.first;
2680     if (!visitor->Visit(object, object_and_size.second)) {
2681       if (iteration_mode == kClearMarkbits) {
2682         marking_state->bitmap(chunk)->ClearRange(
2683             chunk->AddressToMarkbitIndex(chunk->area_start()),
2684             chunk->AddressToMarkbitIndex(object->address()));
2685         *failed_object = object;
2686       }
2687       return false;
2688     }
2689   }
2690   if (iteration_mode == kClearMarkbits) {
2691     marking_state->ClearLiveness(chunk);
2692   }
2693   return true;
2694 }
2695 
2696 template <class Visitor, typename MarkingState>
VisitBlackObjectsNoFail(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode)2697 void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
2698                                                 MarkingState* marking_state,
2699                                                 Visitor* visitor,
2700                                                 IterationMode iteration_mode) {
2701   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2702                "LiveObjectVisitor::VisitBlackObjectsNoFail");
2703   for (auto object_and_size :
2704        LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
2705     HeapObject* const object = object_and_size.first;
2706     DCHECK(marking_state->IsBlack(object));
2707     const bool success = visitor->Visit(object, object_and_size.second);
2708     USE(success);
2709     DCHECK(success);
2710   }
2711   if (iteration_mode == kClearMarkbits) {
2712     marking_state->ClearLiveness(chunk);
2713   }
2714 }
2715 
2716 template <class Visitor, typename MarkingState>
VisitGreyObjectsNoFail(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode)2717 void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
2718                                                MarkingState* marking_state,
2719                                                Visitor* visitor,
2720                                                IterationMode iteration_mode) {
2721   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2722                "LiveObjectVisitor::VisitGreyObjectsNoFail");
2723   for (auto object_and_size :
2724        LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
2725     HeapObject* const object = object_and_size.first;
2726     DCHECK(marking_state->IsGrey(object));
2727     const bool success = visitor->Visit(object, object_and_size.second);
2728     USE(success);
2729     DCHECK(success);
2730   }
2731   if (iteration_mode == kClearMarkbits) {
2732     marking_state->ClearLiveness(chunk);
2733   }
2734 }
2735 
2736 template <typename MarkingState>
RecomputeLiveBytes(MemoryChunk * chunk,MarkingState * marking_state)2737 void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
2738                                            MarkingState* marking_state) {
2739   int new_live_size = 0;
2740   for (auto object_and_size :
2741        LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
2742     new_live_size += object_and_size.second;
2743   }
2744   marking_state->SetLiveBytes(chunk, new_live_size);
2745 }
2746 
Evacuate()2747 void MarkCompactCollector::Evacuate() {
2748   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
2749   base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
2750 
2751   {
2752     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
2753     EvacuatePrologue();
2754   }
2755 
2756   {
2757     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
2758     EvacuationScope evacuation_scope(this);
2759     EvacuatePagesInParallel();
2760   }
2761 
2762   UpdatePointersAfterEvacuation();
2763 
2764   {
2765     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
2766     if (!heap()->new_space()->Rebalance()) {
2767       heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
2768     }
2769   }
2770 
2771   // Give pages that are queued to be freed back to the OS. Note that filtering
2772   // slots only handles old space (for unboxed doubles), and thus map space can
2773   // still contain stale pointers. We only free the chunks after pointer updates
2774   // to still have access to page headers.
2775   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2776 
2777   {
2778     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
2779 
2780     for (Page* p : new_space_evacuation_pages_) {
2781       if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
2782         p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
2783         sweeper()->AddPageForIterability(p);
2784       } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
2785         p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
2786         DCHECK_EQ(OLD_SPACE, p->owner()->identity());
2787         sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
2788       }
2789     }
2790     new_space_evacuation_pages_.clear();
2791 
2792     for (Page* p : old_space_evacuation_pages_) {
2793       // Important: skip list should be cleared only after roots were updated
2794       // because root iteration traverses the stack and might have to find
2795       // code objects from non-updated pc pointing into evacuation candidate.
2796       SkipList* list = p->skip_list();
2797       if (list != nullptr) list->Clear();
2798       if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
2799         sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
2800         p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
2801       }
2802     }
2803   }
2804 
2805   {
2806     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
2807     EvacuateEpilogue();
2808   }
2809 
2810 #ifdef VERIFY_HEAP
2811   if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) {
2812     FullEvacuationVerifier verifier(heap());
2813     verifier.Run();
2814   }
2815 #endif
2816 }
2817 
2818 class UpdatingItem : public ItemParallelJob::Item {
2819  public:
~UpdatingItem()2820   virtual ~UpdatingItem() {}
2821   virtual void Process() = 0;
2822 };
2823 
2824 class PointersUpdatingTask : public ItemParallelJob::Task {
2825  public:
PointersUpdatingTask(Isolate * isolate,GCTracer::BackgroundScope::ScopeId scope)2826   explicit PointersUpdatingTask(Isolate* isolate,
2827                                 GCTracer::BackgroundScope::ScopeId scope)
2828       : ItemParallelJob::Task(isolate),
2829         tracer_(isolate->heap()->tracer()),
2830         scope_(scope) {}
2831 
RunInParallel()2832   void RunInParallel() override {
2833     TRACE_BACKGROUND_GC(tracer_, scope_);
2834     UpdatingItem* item = nullptr;
2835     while ((item = GetItem<UpdatingItem>()) != nullptr) {
2836       item->Process();
2837       item->MarkFinished();
2838     }
2839   };
2840 
2841  private:
2842   GCTracer* tracer_;
2843   GCTracer::BackgroundScope::ScopeId scope_;
2844 };
2845 
2846 template <typename MarkingState>
2847 class ToSpaceUpdatingItem : public UpdatingItem {
2848  public:
ToSpaceUpdatingItem(MemoryChunk * chunk,Address start,Address end,MarkingState * marking_state)2849   explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
2850                                MarkingState* marking_state)
2851       : chunk_(chunk),
2852         start_(start),
2853         end_(end),
2854         marking_state_(marking_state) {}
~ToSpaceUpdatingItem()2855   virtual ~ToSpaceUpdatingItem() {}
2856 
Process()2857   void Process() override {
2858     if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
2859       // New->new promoted pages contain garbage so they require iteration using
2860       // markbits.
2861       ProcessVisitLive();
2862     } else {
2863       ProcessVisitAll();
2864     }
2865   }
2866 
2867  private:
ProcessVisitAll()2868   void ProcessVisitAll() {
2869     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2870                  "ToSpaceUpdatingItem::ProcessVisitAll");
2871     PointersUpdatingVisitor visitor(chunk_->heap());
2872     for (Address cur = start_; cur < end_;) {
2873       HeapObject* object = HeapObject::FromAddress(cur);
2874       Map* map = object->map();
2875       int size = object->SizeFromMap(map);
2876       object->IterateBodyFast(map, size, &visitor);
2877       cur += size;
2878     }
2879   }
2880 
ProcessVisitLive()2881   void ProcessVisitLive() {
2882     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2883                  "ToSpaceUpdatingItem::ProcessVisitLive");
2884     // For young generation evacuations we want to visit grey objects, for
2885     // full MC, we need to visit black objects.
2886     PointersUpdatingVisitor visitor(chunk_->heap());
2887     for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
2888              chunk_, marking_state_->bitmap(chunk_))) {
2889       object_and_size.first->IterateBodyFast(&visitor);
2890     }
2891   }
2892 
2893   MemoryChunk* chunk_;
2894   Address start_;
2895   Address end_;
2896   MarkingState* marking_state_;
2897 };
2898 
2899 template <typename MarkingState>
2900 class RememberedSetUpdatingItem : public UpdatingItem {
2901  public:
RememberedSetUpdatingItem(Heap * heap,MarkingState * marking_state,MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)2902   explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
2903                                      MemoryChunk* chunk,
2904                                      RememberedSetUpdatingMode updating_mode)
2905       : heap_(heap),
2906         marking_state_(marking_state),
2907         chunk_(chunk),
2908         updating_mode_(updating_mode) {}
~RememberedSetUpdatingItem()2909   virtual ~RememberedSetUpdatingItem() {}
2910 
Process()2911   void Process() override {
2912     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2913                  "RememberedSetUpdatingItem::Process");
2914     base::LockGuard<base::Mutex> guard(chunk_->mutex());
2915     CodePageMemoryModificationScope memory_modification_scope(chunk_);
2916     UpdateUntypedPointers();
2917     UpdateTypedPointers();
2918   }
2919 
2920  private:
CheckAndUpdateOldToNewSlot(Address slot_address)2921   inline SlotCallbackResult CheckAndUpdateOldToNewSlot(Address slot_address) {
2922     MaybeObject** slot = reinterpret_cast<MaybeObject**>(slot_address);
2923     HeapObject* heap_object;
2924     if (!(*slot)->ToStrongOrWeakHeapObject(&heap_object)) {
2925       return REMOVE_SLOT;
2926     }
2927     if (Heap::InFromSpace(heap_object)) {
2928       MapWord map_word = heap_object->map_word();
2929       if (map_word.IsForwardingAddress()) {
2930         HeapObjectReference::Update(
2931             reinterpret_cast<HeapObjectReference**>(slot),
2932             map_word.ToForwardingAddress());
2933       }
2934       bool success = (*slot)->ToStrongOrWeakHeapObject(&heap_object);
2935       USE(success);
2936       DCHECK(success);
2937       // If the object was in from space before and is after executing the
2938       // callback in to space, the object is still live.
2939       // Unfortunately, we do not know about the slot. It could be in a
2940       // just freed free space object.
2941       if (Heap::InToSpace(heap_object)) {
2942         return KEEP_SLOT;
2943       }
2944     } else if (Heap::InToSpace(heap_object)) {
2945       // Slots can point to "to" space if the page has been moved, or if the
2946       // slot has been recorded multiple times in the remembered set, or
2947       // if the slot was already updated during old->old updating.
2948       // In case the page has been moved, check markbits to determine liveness
2949       // of the slot. In the other case, the slot can just be kept.
2950       if (Page::FromAddress(heap_object->address())
2951               ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
2952         // IsBlackOrGrey is required because objects are marked as grey for
2953         // the young generation collector while they are black for the full
2954         // MC.);
2955         if (marking_state_->IsBlackOrGrey(heap_object)) {
2956           return KEEP_SLOT;
2957         } else {
2958           return REMOVE_SLOT;
2959         }
2960       }
2961       return KEEP_SLOT;
2962     } else {
2963       DCHECK(!Heap::InNewSpace(heap_object));
2964     }
2965     return REMOVE_SLOT;
2966   }
2967 
UpdateUntypedPointers()2968   void UpdateUntypedPointers() {
2969     if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
2970       RememberedSet<OLD_TO_NEW>::Iterate(
2971           chunk_,
2972           [this](Address slot) { return CheckAndUpdateOldToNewSlot(slot); },
2973           SlotSet::PREFREE_EMPTY_BUCKETS);
2974     }
2975     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
2976         (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
2977       InvalidatedSlotsFilter filter(chunk_);
2978       RememberedSet<OLD_TO_OLD>::Iterate(
2979           chunk_,
2980           [&filter](Address slot) {
2981             if (!filter.IsValid(slot)) return REMOVE_SLOT;
2982             return UpdateSlot<AccessMode::NON_ATOMIC>(
2983                 reinterpret_cast<MaybeObject**>(slot));
2984           },
2985           SlotSet::PREFREE_EMPTY_BUCKETS);
2986     }
2987     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
2988         chunk_->invalidated_slots() != nullptr) {
2989 #ifdef DEBUG
2990       for (auto object_size : *chunk_->invalidated_slots()) {
2991         HeapObject* object = object_size.first;
2992         int size = object_size.second;
2993         DCHECK_LE(object->SizeFromMap(object->map()), size);
2994       }
2995 #endif
2996       // The invalidated slots are not needed after old-to-old slots were
2997       // processsed.
2998       chunk_->ReleaseInvalidatedSlots();
2999     }
3000   }
3001 
UpdateTypedPointers()3002   void UpdateTypedPointers() {
3003     if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
3004         nullptr) {
3005       CHECK_NE(chunk_->owner(), heap_->map_space());
3006       const auto check_and_update_old_to_new_slot_fn =
3007           [this](MaybeObject** slot) {
3008             return CheckAndUpdateOldToNewSlot(reinterpret_cast<Address>(slot));
3009           };
3010       RememberedSet<OLD_TO_NEW>::IterateTyped(
3011           chunk_, [=](SlotType slot_type, Address host_addr, Address slot) {
3012             return UpdateTypedSlotHelper::UpdateTypedSlot(
3013                 heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
3014           });
3015     }
3016     if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3017         (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
3018          nullptr)) {
3019       CHECK_NE(chunk_->owner(), heap_->map_space());
3020       RememberedSet<OLD_TO_OLD>::IterateTyped(
3021           chunk_, [this](SlotType slot_type, Address host_addr, Address slot) {
3022             // Using UpdateStrongSlot is OK here, because there are no weak
3023             // typed slots.
3024             return UpdateTypedSlotHelper::UpdateTypedSlot(
3025                 heap_, slot_type, slot,
3026                 UpdateStrongSlot<AccessMode::NON_ATOMIC>);
3027           });
3028     }
3029   }
3030 
3031   Heap* heap_;
3032   MarkingState* marking_state_;
3033   MemoryChunk* chunk_;
3034   RememberedSetUpdatingMode updating_mode_;
3035 };
3036 
CreateToSpaceUpdatingItem(MemoryChunk * chunk,Address start,Address end)3037 UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
3038     MemoryChunk* chunk, Address start, Address end) {
3039   return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
3040       chunk, start, end, non_atomic_marking_state());
3041 }
3042 
CreateRememberedSetUpdatingItem(MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)3043 UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
3044     MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
3045   return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
3046       heap(), non_atomic_marking_state(), chunk, updating_mode);
3047 }
3048 
3049 class GlobalHandlesUpdatingItem : public UpdatingItem {
3050  public:
GlobalHandlesUpdatingItem(Heap * heap,GlobalHandles * global_handles,size_t start,size_t end)3051   GlobalHandlesUpdatingItem(Heap* heap, GlobalHandles* global_handles,
3052                             size_t start, size_t end)
3053       : heap_(heap),
3054         global_handles_(global_handles),
3055         start_(start),
3056         end_(end) {}
~GlobalHandlesUpdatingItem()3057   virtual ~GlobalHandlesUpdatingItem() {}
3058 
Process()3059   void Process() override {
3060     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3061                  "GlobalHandlesUpdatingItem::Process");
3062     PointersUpdatingVisitor updating_visitor(heap_);
3063     global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
3064   }
3065 
3066  private:
3067   Heap* heap_;
3068   GlobalHandles* global_handles_;
3069   size_t start_;
3070   size_t end_;
3071 };
3072 
3073 // Update array buffers on a page that has been evacuated by copying objects.
3074 // Target page exclusivity in old space is guaranteed by the fact that
3075 // evacuation tasks either (a) retrieved a fresh page, or (b) retrieved all
3076 // free list items of a given page. For new space the tracker will update
3077 // using a lock.
3078 class ArrayBufferTrackerUpdatingItem : public UpdatingItem {
3079  public:
3080   enum EvacuationState { kRegular, kAborted };
3081 
ArrayBufferTrackerUpdatingItem(Page * page,EvacuationState state)3082   explicit ArrayBufferTrackerUpdatingItem(Page* page, EvacuationState state)
3083       : page_(page), state_(state) {}
~ArrayBufferTrackerUpdatingItem()3084   virtual ~ArrayBufferTrackerUpdatingItem() {}
3085 
Process()3086   void Process() override {
3087     TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3088                  "ArrayBufferTrackerUpdatingItem::Process", "EvacuationState",
3089                  state_);
3090     switch (state_) {
3091       case EvacuationState::kRegular:
3092         ArrayBufferTracker::ProcessBuffers(
3093             page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3094         break;
3095       case EvacuationState::kAborted:
3096         ArrayBufferTracker::ProcessBuffers(
3097             page_, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3098         break;
3099     }
3100   }
3101 
3102  private:
3103   Page* const page_;
3104   const EvacuationState state_;
3105 };
3106 
CollectToSpaceUpdatingItems(ItemParallelJob * job)3107 int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
3108     ItemParallelJob* job) {
3109   // Seed to space pages.
3110   const Address space_start = heap()->new_space()->first_allocatable_address();
3111   const Address space_end = heap()->new_space()->top();
3112   int pages = 0;
3113   for (Page* page : PageRange(space_start, space_end)) {
3114     Address start =
3115         page->Contains(space_start) ? space_start : page->area_start();
3116     Address end = page->Contains(space_end) ? space_end : page->area_end();
3117     job->AddItem(CreateToSpaceUpdatingItem(page, start, end));
3118     pages++;
3119   }
3120   if (pages == 0) return 0;
3121   return NumberOfParallelToSpacePointerUpdateTasks(pages);
3122 }
3123 
3124 template <typename IterateableSpace>
CollectRememberedSetUpdatingItems(ItemParallelJob * job,IterateableSpace * space,RememberedSetUpdatingMode mode)3125 int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
3126     ItemParallelJob* job, IterateableSpace* space,
3127     RememberedSetUpdatingMode mode) {
3128   int pages = 0;
3129   for (MemoryChunk* chunk : *space) {
3130     const bool contains_old_to_old_slots =
3131         chunk->slot_set<OLD_TO_OLD>() != nullptr ||
3132         chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
3133     const bool contains_old_to_new_slots =
3134         chunk->slot_set<OLD_TO_NEW>() != nullptr ||
3135         chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
3136     const bool contains_invalidated_slots =
3137         chunk->invalidated_slots() != nullptr;
3138     if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
3139         !contains_invalidated_slots)
3140       continue;
3141     if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
3142         contains_invalidated_slots) {
3143       job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
3144       pages++;
3145     }
3146   }
3147   return pages;
3148 }
3149 
CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob * job)3150 int MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
3151     ItemParallelJob* job) {
3152   int pages = 0;
3153   for (Page* p : new_space_evacuation_pages_) {
3154     if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
3155       if (p->local_tracker() == nullptr) continue;
3156 
3157       pages++;
3158       job->AddItem(new ArrayBufferTrackerUpdatingItem(
3159           p, ArrayBufferTrackerUpdatingItem::kRegular));
3160     }
3161   }
3162   return pages;
3163 }
3164 
CollectOldSpaceArrayBufferTrackerItems(ItemParallelJob * job)3165 int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
3166     ItemParallelJob* job) {
3167   int pages = 0;
3168   for (Page* p : old_space_evacuation_pages_) {
3169     if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsOldToOld &&
3170         p->IsEvacuationCandidate()) {
3171       if (p->local_tracker() == nullptr) continue;
3172 
3173       pages++;
3174       job->AddItem(new ArrayBufferTrackerUpdatingItem(
3175           p, ArrayBufferTrackerUpdatingItem::kRegular));
3176     }
3177   }
3178   for (auto object_and_page : aborted_evacuation_candidates_) {
3179     Page* p = object_and_page.second;
3180     if (p->local_tracker() == nullptr) continue;
3181 
3182     pages++;
3183     job->AddItem(new ArrayBufferTrackerUpdatingItem(
3184         p, ArrayBufferTrackerUpdatingItem::kAborted));
3185   }
3186   return pages;
3187 }
3188 
UpdatePointersAfterEvacuation()3189 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3190   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3191 
3192   PointersUpdatingVisitor updating_visitor(heap());
3193 
3194   {
3195     TRACE_GC(heap()->tracer(),
3196              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
3197     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3198   }
3199 
3200   {
3201     TRACE_GC(heap()->tracer(),
3202              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
3203     ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3204                                  &page_parallel_job_semaphore_);
3205 
3206     int remembered_set_pages = 0;
3207     remembered_set_pages += CollectRememberedSetUpdatingItems(
3208         &updating_job, heap()->old_space(), RememberedSetUpdatingMode::ALL);
3209     remembered_set_pages += CollectRememberedSetUpdatingItems(
3210         &updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
3211     remembered_set_pages += CollectRememberedSetUpdatingItems(
3212         &updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
3213     const int remembered_set_tasks =
3214         remembered_set_pages == 0
3215             ? 0
3216             : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
3217                                                  old_to_new_slots_);
3218     const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
3219     const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
3220     for (int i = 0; i < num_tasks; i++) {
3221       updating_job.AddTask(new PointersUpdatingTask(
3222           isolate(),
3223           GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3224     }
3225     updating_job.Run(isolate()->async_counters());
3226   }
3227 
3228   {
3229     // - Update pointers in map space in a separate phase to avoid data races
3230     //   with Map->LayoutDescriptor edge.
3231     // - Update array buffer trackers in the second phase to have access to
3232     //   byte length which is potentially a HeapNumber.
3233     TRACE_GC(heap()->tracer(),
3234              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
3235     ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3236                                  &page_parallel_job_semaphore_);
3237 
3238     int array_buffer_pages = 0;
3239     array_buffer_pages += CollectNewSpaceArrayBufferTrackerItems(&updating_job);
3240     array_buffer_pages += CollectOldSpaceArrayBufferTrackerItems(&updating_job);
3241 
3242     int remembered_set_pages = 0;
3243     remembered_set_pages += CollectRememberedSetUpdatingItems(
3244         &updating_job, heap()->map_space(), RememberedSetUpdatingMode::ALL);
3245     const int remembered_set_tasks =
3246         remembered_set_pages == 0
3247             ? 0
3248             : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
3249                                                  old_to_new_slots_);
3250     const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
3251     if (num_tasks > 0) {
3252       for (int i = 0; i < num_tasks; i++) {
3253         updating_job.AddTask(new PointersUpdatingTask(
3254             isolate(),
3255             GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3256       }
3257       updating_job.Run(isolate()->async_counters());
3258       heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
3259     }
3260   }
3261 
3262   {
3263     TRACE_GC(heap()->tracer(),
3264              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3265     // Update pointers from external string table.
3266     heap_->UpdateReferencesInExternalStringTable(
3267         &UpdateReferenceInExternalStringTableEntry);
3268 
3269     EvacuationWeakObjectRetainer evacuation_object_retainer;
3270     heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3271   }
3272 }
3273 
ReportAbortedEvacuationCandidate(HeapObject * failed_object,Page * page)3274 void MarkCompactCollector::ReportAbortedEvacuationCandidate(
3275     HeapObject* failed_object, Page* page) {
3276   base::LockGuard<base::Mutex> guard(&mutex_);
3277 
3278   aborted_evacuation_candidates_.push_back(std::make_pair(failed_object, page));
3279 }
3280 
PostProcessEvacuationCandidates()3281 void MarkCompactCollector::PostProcessEvacuationCandidates() {
3282   for (auto object_and_page : aborted_evacuation_candidates_) {
3283     HeapObject* failed_object = object_and_page.first;
3284     Page* page = object_and_page.second;
3285     page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3286     // Aborted compaction page. We have to record slots here, since we
3287     // might not have recorded them in first place.
3288 
3289     // Remove outdated slots.
3290     RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
3291                                            failed_object->address(),
3292                                            SlotSet::PREFREE_EMPTY_BUCKETS);
3293     RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
3294                                                 failed_object->address());
3295     // Recompute live bytes.
3296     LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
3297     // Re-record slots.
3298     EvacuateRecordOnlyVisitor record_visitor(heap());
3299     LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
3300                                                &record_visitor,
3301                                                LiveObjectVisitor::kKeepMarking);
3302     // Array buffers will be processed during pointer updating.
3303   }
3304   const int aborted_pages =
3305       static_cast<int>(aborted_evacuation_candidates_.size());
3306   int aborted_pages_verified = 0;
3307   for (Page* p : old_space_evacuation_pages_) {
3308     if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3309       // After clearing the evacuation candidate flag the page is again in a
3310       // regular state.
3311       p->ClearEvacuationCandidate();
3312       aborted_pages_verified++;
3313     } else {
3314       DCHECK(p->IsEvacuationCandidate());
3315       DCHECK(p->SweepingDone());
3316       p->owner()->memory_chunk_list().Remove(p);
3317     }
3318   }
3319   DCHECK_EQ(aborted_pages_verified, aborted_pages);
3320   if (FLAG_trace_evacuation && (aborted_pages > 0)) {
3321     PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
3322                  isolate()->time_millis_since_init(), aborted_pages);
3323   }
3324 }
3325 
ReleaseEvacuationCandidates()3326 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3327   for (Page* p : old_space_evacuation_pages_) {
3328     if (!p->IsEvacuationCandidate()) continue;
3329     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3330     non_atomic_marking_state()->SetLiveBytes(p, 0);
3331     CHECK(p->SweepingDone());
3332     space->ReleasePage(p);
3333   }
3334   old_space_evacuation_pages_.clear();
3335   compacting_ = false;
3336 }
3337 
StartSweepSpace(PagedSpace * space)3338 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
3339   space->ClearStats();
3340 
3341   int will_be_swept = 0;
3342   bool unused_page_present = false;
3343 
3344   // Loop needs to support deletion if live bytes == 0 for a page.
3345   for (auto it = space->begin(); it != space->end();) {
3346     Page* p = *(it++);
3347     DCHECK(p->SweepingDone());
3348 
3349     if (p->IsEvacuationCandidate()) {
3350       // Will be processed in Evacuate.
3351       DCHECK(!evacuation_candidates_.empty());
3352       continue;
3353     }
3354 
3355     if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3356       // We need to sweep the page to get it into an iterable state again. Note
3357       // that this adds unusable memory into the free list that is later on
3358       // (in the free list) dropped again. Since we only use the flag for
3359       // testing this is fine.
3360       p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
3361       sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
3362                           Heap::ShouldZapGarbage()
3363                               ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
3364                               : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
3365       space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
3366       continue;
3367     }
3368 
3369     // One unused page is kept, all further are released before sweeping them.
3370     if (non_atomic_marking_state()->live_bytes(p) == 0) {
3371       if (unused_page_present) {
3372         if (FLAG_gc_verbose) {
3373           PrintIsolate(isolate(), "sweeping: released page: %p",
3374                        static_cast<void*>(p));
3375         }
3376         ArrayBufferTracker::FreeAll(p);
3377         space->memory_chunk_list().Remove(p);
3378         space->ReleasePage(p);
3379         continue;
3380       }
3381       unused_page_present = true;
3382     }
3383 
3384     sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
3385     will_be_swept++;
3386   }
3387 
3388   if (FLAG_gc_verbose) {
3389     PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
3390                  space->name(), will_be_swept);
3391   }
3392 }
3393 
StartSweepSpaces()3394 void MarkCompactCollector::StartSweepSpaces() {
3395   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
3396 #ifdef DEBUG
3397   state_ = SWEEP_SPACES;
3398 #endif
3399 
3400   {
3401     {
3402       GCTracer::Scope sweep_scope(heap()->tracer(),
3403                                   GCTracer::Scope::MC_SWEEP_OLD);
3404       StartSweepSpace(heap()->old_space());
3405     }
3406     {
3407       GCTracer::Scope sweep_scope(heap()->tracer(),
3408                                   GCTracer::Scope::MC_SWEEP_CODE);
3409       StartSweepSpace(heap()->code_space());
3410     }
3411     {
3412       GCTracer::Scope sweep_scope(heap()->tracer(),
3413                                   GCTracer::Scope::MC_SWEEP_MAP);
3414       StartSweepSpace(heap()->map_space());
3415     }
3416     sweeper()->StartSweeping();
3417   }
3418 }
3419 
PrintWorklist(const char * worklist_name,ConcurrentMarkingWorklist * worklist)3420 void MarkCompactCollector::MarkingWorklist::PrintWorklist(
3421     const char* worklist_name, ConcurrentMarkingWorklist* worklist) {
3422   std::map<InstanceType, int> count;
3423   int total_count = 0;
3424   worklist->IterateGlobalPool([&count, &total_count](HeapObject* obj) {
3425     ++total_count;
3426     count[obj->map()->instance_type()]++;
3427   });
3428   std::vector<std::pair<int, InstanceType>> rank;
3429   for (auto i : count) {
3430     rank.push_back(std::make_pair(i.second, i.first));
3431   }
3432   std::map<InstanceType, std::string> instance_type_name;
3433 #define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name;
3434   INSTANCE_TYPE_LIST(INSTANCE_TYPE_NAME)
3435 #undef INSTANCE_TYPE_NAME
3436   std::sort(rank.begin(), rank.end(),
3437             std::greater<std::pair<int, InstanceType>>());
3438   PrintF("Worklist %s: %d\n", worklist_name, total_count);
3439   for (auto i : rank) {
3440     PrintF("  [%s]: %d\n", instance_type_name[i.second].c_str(), i.first);
3441   }
3442 }
3443 
3444 #ifdef ENABLE_MINOR_MC
3445 
3446 namespace {
3447 
3448 #ifdef VERIFY_HEAP
3449 
3450 class YoungGenerationMarkingVerifier : public MarkingVerifier {
3451  public:
YoungGenerationMarkingVerifier(Heap * heap)3452   explicit YoungGenerationMarkingVerifier(Heap* heap)
3453       : MarkingVerifier(heap),
3454         marking_state_(
3455             heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
3456 
bitmap(const MemoryChunk * chunk)3457   Bitmap* bitmap(const MemoryChunk* chunk) override {
3458     return marking_state_->bitmap(chunk);
3459   }
3460 
IsMarked(HeapObject * object)3461   bool IsMarked(HeapObject* object) override {
3462     return marking_state_->IsGrey(object);
3463   }
3464 
IsBlackOrGrey(HeapObject * object)3465   bool IsBlackOrGrey(HeapObject* object) override {
3466     return marking_state_->IsBlackOrGrey(object);
3467   }
3468 
Run()3469   void Run() override {
3470     VerifyRoots(VISIT_ALL_IN_SCAVENGE);
3471     VerifyMarking(heap_->new_space());
3472   }
3473 
VerifyPointers(Object ** start,Object ** end)3474   void VerifyPointers(Object** start, Object** end) override {
3475     for (Object** current = start; current < end; current++) {
3476       DCHECK(!HasWeakHeapObjectTag(*current));
3477       if ((*current)->IsHeapObject()) {
3478         HeapObject* object = HeapObject::cast(*current);
3479         if (!Heap::InNewSpace(object)) return;
3480         CHECK(IsMarked(object));
3481       }
3482     }
3483   }
3484 
VerifyPointers(MaybeObject ** start,MaybeObject ** end)3485   void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
3486     for (MaybeObject** current = start; current < end; current++) {
3487       HeapObject* object;
3488       // Minor MC treats weak references as strong.
3489       if ((*current)->ToStrongOrWeakHeapObject(&object)) {
3490         if (!Heap::InNewSpace(object)) {
3491           continue;
3492         }
3493         CHECK(IsMarked(object));
3494       }
3495     }
3496   }
3497 
3498  private:
3499   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
3500 };
3501 
3502 class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
3503  public:
YoungGenerationEvacuationVerifier(Heap * heap)3504   explicit YoungGenerationEvacuationVerifier(Heap* heap)
3505       : EvacuationVerifier(heap) {}
3506 
Run()3507   void Run() override {
3508     VerifyRoots(VISIT_ALL_IN_SCAVENGE);
3509     VerifyEvacuation(heap_->new_space());
3510     VerifyEvacuation(heap_->old_space());
3511     VerifyEvacuation(heap_->code_space());
3512     VerifyEvacuation(heap_->map_space());
3513   }
3514 
3515  protected:
VerifyPointers(Object ** start,Object ** end)3516   void VerifyPointers(Object** start, Object** end) override {
3517     for (Object** current = start; current < end; current++) {
3518       if ((*current)->IsHeapObject()) {
3519         HeapObject* object = HeapObject::cast(*current);
3520         CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
3521       }
3522     }
3523   }
VerifyPointers(MaybeObject ** start,MaybeObject ** end)3524   void VerifyPointers(MaybeObject** start, MaybeObject** end) override {
3525     for (MaybeObject** current = start; current < end; current++) {
3526       HeapObject* object;
3527       if ((*current)->ToStrongOrWeakHeapObject(&object)) {
3528         CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
3529       }
3530     }
3531   }
3532 };
3533 
3534 #endif  // VERIFY_HEAP
3535 
3536 template <class ParallelItem>
SeedGlobalHandles(Heap * heap,GlobalHandles * global_handles,ItemParallelJob * job)3537 void SeedGlobalHandles(Heap* heap, GlobalHandles* global_handles,
3538                        ItemParallelJob* job) {
3539   // Create batches of global handles.
3540   const size_t kGlobalHandlesBufferSize = 1000;
3541   const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
3542   for (size_t start = 0; start < new_space_nodes;
3543        start += kGlobalHandlesBufferSize) {
3544     size_t end = start + kGlobalHandlesBufferSize;
3545     if (end > new_space_nodes) end = new_space_nodes;
3546     job->AddItem(new ParallelItem(heap, global_handles, start, end));
3547   }
3548 }
3549 
IsUnmarkedObjectForYoungGeneration(Heap * heap,Object ** p)3550 bool IsUnmarkedObjectForYoungGeneration(Heap* heap, Object** p) {
3551   DCHECK_IMPLIES(Heap::InNewSpace(*p), Heap::InToSpace(*p));
3552   return Heap::InNewSpace(*p) && !heap->minor_mark_compact_collector()
3553                                       ->non_atomic_marking_state()
3554                                       ->IsGrey(HeapObject::cast(*p));
3555 }
3556 
3557 }  // namespace
3558 
3559 class YoungGenerationMarkingVisitor final
3560     : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
3561  public:
YoungGenerationMarkingVisitor(MinorMarkCompactCollector::MarkingState * marking_state,MinorMarkCompactCollector::MarkingWorklist * global_worklist,int task_id)3562   YoungGenerationMarkingVisitor(
3563       MinorMarkCompactCollector::MarkingState* marking_state,
3564       MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
3565       : worklist_(global_worklist, task_id), marking_state_(marking_state) {}
3566 
VisitPointers(HeapObject * host,Object ** start,Object ** end)3567   V8_INLINE void VisitPointers(HeapObject* host, Object** start,
3568                                Object** end) final {
3569     for (Object** p = start; p < end; p++) {
3570       VisitPointer(host, p);
3571     }
3572   }
3573 
VisitPointers(HeapObject * host,MaybeObject ** start,MaybeObject ** end)3574   V8_INLINE void VisitPointers(HeapObject* host, MaybeObject** start,
3575                                MaybeObject** end) final {
3576     for (MaybeObject** p = start; p < end; p++) {
3577       VisitPointer(host, p);
3578     }
3579   }
3580 
VisitPointer(HeapObject * host,Object ** slot)3581   V8_INLINE void VisitPointer(HeapObject* host, Object** slot) final {
3582     Object* target = *slot;
3583     DCHECK(!HasWeakHeapObjectTag(target));
3584     if (Heap::InNewSpace(target)) {
3585       HeapObject* target_object = HeapObject::cast(target);
3586       MarkObjectViaMarkingWorklist(target_object);
3587     }
3588   }
3589 
VisitPointer(HeapObject * host,MaybeObject ** slot)3590   V8_INLINE void VisitPointer(HeapObject* host, MaybeObject** slot) final {
3591     MaybeObject* target = *slot;
3592     if (Heap::InNewSpace(target)) {
3593       HeapObject* target_object;
3594       // Treat weak references as strong. TODO(marja): Proper weakness handling
3595       // for minor-mcs.
3596       if (target->ToStrongOrWeakHeapObject(&target_object)) {
3597         MarkObjectViaMarkingWorklist(target_object);
3598       }
3599     }
3600   }
3601 
3602  private:
MarkObjectViaMarkingWorklist(HeapObject * object)3603   inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
3604     if (marking_state_->WhiteToGrey(object)) {
3605       // Marking deque overflow is unsupported for the young generation.
3606       CHECK(worklist_.Push(object));
3607     }
3608   }
3609 
3610   MinorMarkCompactCollector::MarkingWorklist::View worklist_;
3611   MinorMarkCompactCollector::MarkingState* marking_state_;
3612 };
3613 
SetUp()3614 void MinorMarkCompactCollector::SetUp() {}
3615 
TearDown()3616 void MinorMarkCompactCollector::TearDown() {}
3617 
MinorMarkCompactCollector(Heap * heap)3618 MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
3619     : MarkCompactCollectorBase(heap),
3620       worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
3621       main_marking_visitor_(new YoungGenerationMarkingVisitor(
3622           marking_state(), worklist_, kMainMarker)),
3623       page_parallel_job_semaphore_(0) {
3624   static_assert(
3625       kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
3626       "more marker tasks than marking deque can handle");
3627 }
3628 
~MinorMarkCompactCollector()3629 MinorMarkCompactCollector::~MinorMarkCompactCollector() {
3630   delete worklist_;
3631   delete main_marking_visitor_;
3632 }
3633 
NumberOfParallelMarkingTasks(int pages)3634 int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
3635   DCHECK_GT(pages, 0);
3636   if (!FLAG_minor_mc_parallel_marking) return 1;
3637   // Pages are not private to markers but we can still use them to estimate the
3638   // amount of marking that is required.
3639   const int kPagesPerTask = 2;
3640   const int wanted_tasks = Max(1, pages / kPagesPerTask);
3641   return Min(NumberOfAvailableCores(),
3642              Min(wanted_tasks,
3643                  MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks));
3644 }
3645 
CleanupSweepToIteratePages()3646 void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
3647   for (Page* p : sweep_to_iterate_pages_) {
3648     if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
3649       p->ClearFlag(Page::SWEEP_TO_ITERATE);
3650       non_atomic_marking_state()->ClearLiveness(p);
3651     }
3652   }
3653   sweep_to_iterate_pages_.clear();
3654 }
3655 
3656 class YoungGenerationMigrationObserver final : public MigrationObserver {
3657  public:
YoungGenerationMigrationObserver(Heap * heap,MarkCompactCollector * mark_compact_collector)3658   YoungGenerationMigrationObserver(Heap* heap,
3659                                    MarkCompactCollector* mark_compact_collector)
3660       : MigrationObserver(heap),
3661         mark_compact_collector_(mark_compact_collector) {}
3662 
Move(AllocationSpace dest,HeapObject * src,HeapObject * dst,int size)3663   inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
3664                    int size) final {
3665     // Migrate color to old generation marking in case the object survived young
3666     // generation garbage collection.
3667     if (heap_->incremental_marking()->IsMarking()) {
3668       DCHECK(
3669           heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
3670       heap_->incremental_marking()->TransferColor(src, dst);
3671     }
3672   }
3673 
3674  protected:
3675   base::Mutex mutex_;
3676   MarkCompactCollector* mark_compact_collector_;
3677 };
3678 
3679 class YoungGenerationRecordMigratedSlotVisitor final
3680     : public RecordMigratedSlotVisitor {
3681  public:
YoungGenerationRecordMigratedSlotVisitor(MarkCompactCollector * collector)3682   explicit YoungGenerationRecordMigratedSlotVisitor(
3683       MarkCompactCollector* collector)
3684       : RecordMigratedSlotVisitor(collector) {}
3685 
VisitCodeTarget(Code * host,RelocInfo * rinfo)3686   void VisitCodeTarget(Code* host, RelocInfo* rinfo) final { UNREACHABLE(); }
VisitEmbeddedPointer(Code * host,RelocInfo * rinfo)3687   void VisitEmbeddedPointer(Code* host, RelocInfo* rinfo) final {
3688     UNREACHABLE();
3689   }
3690 
3691  private:
3692   // Only record slots for host objects that are considered as live by the full
3693   // collector.
IsLive(HeapObject * object)3694   inline bool IsLive(HeapObject* object) {
3695     return collector_->non_atomic_marking_state()->IsBlack(object);
3696   }
3697 
RecordMigratedSlot(HeapObject * host,MaybeObject * value,Address slot)3698   inline void RecordMigratedSlot(HeapObject* host, MaybeObject* value,
3699                                  Address slot) final {
3700     if (value->IsStrongOrWeakHeapObject()) {
3701       Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
3702       if (p->InNewSpace()) {
3703         DCHECK_IMPLIES(p->InToSpace(),
3704                        p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
3705         RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
3706             Page::FromAddress(slot), slot);
3707       } else if (p->IsEvacuationCandidate() && IsLive(host)) {
3708         RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
3709             Page::FromAddress(slot), slot);
3710       }
3711     }
3712   }
3713 };
3714 
UpdatePointersAfterEvacuation()3715 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
3716   TRACE_GC(heap()->tracer(),
3717            GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
3718 
3719   PointersUpdatingVisitor updating_visitor(heap());
3720   ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3721                                &page_parallel_job_semaphore_);
3722 
3723   CollectNewSpaceArrayBufferTrackerItems(&updating_job);
3724   // Create batches of global handles.
3725   SeedGlobalHandles<GlobalHandlesUpdatingItem>(
3726       heap(), isolate()->global_handles(), &updating_job);
3727   const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
3728   int remembered_set_pages = 0;
3729   remembered_set_pages += CollectRememberedSetUpdatingItems(
3730       &updating_job, heap()->old_space(),
3731       RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3732   remembered_set_pages += CollectRememberedSetUpdatingItems(
3733       &updating_job, heap()->code_space(),
3734       RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3735   remembered_set_pages += CollectRememberedSetUpdatingItems(
3736       &updating_job, heap()->map_space(),
3737       RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3738   remembered_set_pages += CollectRememberedSetUpdatingItems(
3739       &updating_job, heap()->lo_space(),
3740       RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3741   const int remembered_set_tasks =
3742       remembered_set_pages == 0 ? 0
3743                                 : NumberOfParallelPointerUpdateTasks(
3744                                       remembered_set_pages, old_to_new_slots_);
3745   const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
3746   for (int i = 0; i < num_tasks; i++) {
3747     updating_job.AddTask(new PointersUpdatingTask(
3748         isolate(), GCTracer::BackgroundScope::
3749                        MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3750   }
3751 
3752   {
3753     TRACE_GC(heap()->tracer(),
3754              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
3755     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
3756   }
3757   {
3758     TRACE_GC(heap()->tracer(),
3759              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
3760     updating_job.Run(isolate()->async_counters());
3761     heap()->array_buffer_collector()->FreeAllocationsOnBackgroundThread();
3762   }
3763 
3764   {
3765     TRACE_GC(heap()->tracer(),
3766              GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
3767 
3768     EvacuationWeakObjectRetainer evacuation_object_retainer;
3769     heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3770 
3771     // Update pointers from external string table.
3772     heap()->UpdateNewSpaceReferencesInExternalStringTable(
3773         &UpdateReferenceInExternalStringTableEntry);
3774   }
3775 }
3776 
3777 class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
3778  public:
RootMarkingVisitor(MinorMarkCompactCollector * collector)3779   explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
3780       : collector_(collector) {}
3781 
VisitRootPointer(Root root,const char * description,Object ** p)3782   void VisitRootPointer(Root root, const char* description, Object** p) final {
3783     MarkObjectByPointer(p);
3784   }
3785 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)3786   void VisitRootPointers(Root root, const char* description, Object** start,
3787                          Object** end) final {
3788     for (Object** p = start; p < end; p++) {
3789       MarkObjectByPointer(p);
3790     }
3791   }
3792 
3793  private:
MarkObjectByPointer(Object ** p)3794   V8_INLINE void MarkObjectByPointer(Object** p) {
3795     if (!(*p)->IsHeapObject()) return;
3796     collector_->MarkRootObject(HeapObject::cast(*p));
3797   }
3798   MinorMarkCompactCollector* const collector_;
3799 };
3800 
CollectGarbage()3801 void MinorMarkCompactCollector::CollectGarbage() {
3802   {
3803     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
3804     heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
3805     CleanupSweepToIteratePages();
3806   }
3807 
3808   MarkLiveObjects();
3809   ClearNonLiveReferences();
3810 #ifdef VERIFY_HEAP
3811   if (FLAG_verify_heap) {
3812     YoungGenerationMarkingVerifier verifier(heap());
3813     verifier.Run();
3814   }
3815 #endif  // VERIFY_HEAP
3816 
3817   Evacuate();
3818 #ifdef VERIFY_HEAP
3819   if (FLAG_verify_heap) {
3820     YoungGenerationEvacuationVerifier verifier(heap());
3821     verifier.Run();
3822   }
3823 #endif  // VERIFY_HEAP
3824 
3825   {
3826     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
3827     heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
3828   }
3829 
3830   {
3831     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
3832     for (Page* p :
3833          PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
3834       DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
3835       non_atomic_marking_state()->ClearLiveness(p);
3836       if (FLAG_concurrent_marking) {
3837         // Ensure that concurrent marker does not track pages that are
3838         // going to be unmapped.
3839         heap()->concurrent_marking()->ClearLiveness(p);
3840       }
3841     }
3842   }
3843 
3844   RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
3845       heap(), [](MemoryChunk* chunk) {
3846         if (chunk->SweepingDone()) {
3847           RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
3848         } else {
3849           RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
3850         }
3851       });
3852 
3853   heap()->account_external_memory_concurrently_freed();
3854 }
3855 
MakeIterable(Page * p,MarkingTreatmentMode marking_mode,FreeSpaceTreatmentMode free_space_mode)3856 void MinorMarkCompactCollector::MakeIterable(
3857     Page* p, MarkingTreatmentMode marking_mode,
3858     FreeSpaceTreatmentMode free_space_mode) {
3859   // We have to clear the full collectors markbits for the areas that we
3860   // remove here.
3861   MarkCompactCollector* full_collector = heap()->mark_compact_collector();
3862   Address free_start = p->area_start();
3863   DCHECK_EQ(0, free_start % (32 * kPointerSize));
3864 
3865   for (auto object_and_size :
3866        LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
3867     HeapObject* const object = object_and_size.first;
3868     DCHECK(non_atomic_marking_state()->IsGrey(object));
3869     Address free_end = object->address();
3870     if (free_end != free_start) {
3871       CHECK_GT(free_end, free_start);
3872       size_t size = static_cast<size_t>(free_end - free_start);
3873       full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
3874           p->AddressToMarkbitIndex(free_start),
3875           p->AddressToMarkbitIndex(free_end));
3876       if (free_space_mode == ZAP_FREE_SPACE) {
3877         ZapCode(free_start, size);
3878       }
3879       p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3880                                       ClearRecordedSlots::kNo);
3881     }
3882     Map* map = object->synchronized_map();
3883     int size = object->SizeFromMap(map);
3884     free_start = free_end + size;
3885   }
3886 
3887   if (free_start != p->area_end()) {
3888     CHECK_GT(p->area_end(), free_start);
3889     size_t size = static_cast<size_t>(p->area_end() - free_start);
3890     full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
3891         p->AddressToMarkbitIndex(free_start),
3892         p->AddressToMarkbitIndex(p->area_end()));
3893     if (free_space_mode == ZAP_FREE_SPACE) {
3894       ZapCode(free_start, size);
3895     }
3896     p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3897                                     ClearRecordedSlots::kNo);
3898   }
3899 
3900   if (marking_mode == MarkingTreatmentMode::CLEAR) {
3901     non_atomic_marking_state()->ClearLiveness(p);
3902     p->ClearFlag(Page::SWEEP_TO_ITERATE);
3903   }
3904 }
3905 
3906 namespace {
3907 
3908 // Helper class for pruning the string table.
3909 class YoungGenerationExternalStringTableCleaner : public RootVisitor {
3910  public:
YoungGenerationExternalStringTableCleaner(MinorMarkCompactCollector * collector)3911   YoungGenerationExternalStringTableCleaner(
3912       MinorMarkCompactCollector* collector)
3913       : heap_(collector->heap()),
3914         marking_state_(collector->non_atomic_marking_state()) {}
3915 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)3916   void VisitRootPointers(Root root, const char* description, Object** start,
3917                          Object** end) override {
3918     DCHECK_EQ(static_cast<int>(root),
3919               static_cast<int>(Root::kExternalStringsTable));
3920     // Visit all HeapObject pointers in [start, end).
3921     for (Object** p = start; p < end; p++) {
3922       Object* o = *p;
3923       if (o->IsHeapObject()) {
3924         HeapObject* heap_object = HeapObject::cast(o);
3925         if (marking_state_->IsWhite(heap_object)) {
3926           if (o->IsExternalString()) {
3927             heap_->FinalizeExternalString(String::cast(*p));
3928           } else {
3929             // The original external string may have been internalized.
3930             DCHECK(o->IsThinString());
3931           }
3932           // Set the entry to the_hole_value (as deleted).
3933           *p = ReadOnlyRoots(heap_).the_hole_value();
3934         }
3935       }
3936     }
3937   }
3938 
3939  private:
3940   Heap* heap_;
3941   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
3942 };
3943 
3944 // Marked young generation objects and all old generation objects will be
3945 // retained.
3946 class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
3947  public:
MinorMarkCompactWeakObjectRetainer(MinorMarkCompactCollector * collector)3948   explicit MinorMarkCompactWeakObjectRetainer(
3949       MinorMarkCompactCollector* collector)
3950       : marking_state_(collector->non_atomic_marking_state()) {}
3951 
RetainAs(Object * object)3952   virtual Object* RetainAs(Object* object) {
3953     HeapObject* heap_object = HeapObject::cast(object);
3954     if (!Heap::InNewSpace(heap_object)) return object;
3955 
3956     // Young generation marking only marks to grey instead of black.
3957     DCHECK(!marking_state_->IsBlack(heap_object));
3958     if (marking_state_->IsGrey(heap_object)) {
3959       return object;
3960     }
3961     return nullptr;
3962   }
3963 
3964  private:
3965   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
3966 };
3967 
3968 }  // namespace
3969 
ClearNonLiveReferences()3970 void MinorMarkCompactCollector::ClearNonLiveReferences() {
3971   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
3972 
3973   {
3974     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
3975     // Internalized strings are always stored in old space, so there is no need
3976     // to clean them here.
3977     YoungGenerationExternalStringTableCleaner external_visitor(this);
3978     heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
3979     heap()->external_string_table_.CleanUpNewSpaceStrings();
3980   }
3981 
3982   {
3983     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
3984     // Process the weak references.
3985     MinorMarkCompactWeakObjectRetainer retainer(this);
3986     heap()->ProcessYoungWeakReferences(&retainer);
3987   }
3988 }
3989 
EvacuatePrologue()3990 void MinorMarkCompactCollector::EvacuatePrologue() {
3991   NewSpace* new_space = heap()->new_space();
3992   // Append the list of new space pages to be processed.
3993   for (Page* p :
3994        PageRange(new_space->first_allocatable_address(), new_space->top())) {
3995     new_space_evacuation_pages_.push_back(p);
3996   }
3997   new_space->Flip();
3998   new_space->ResetLinearAllocationArea();
3999 }
4000 
EvacuateEpilogue()4001 void MinorMarkCompactCollector::EvacuateEpilogue() {
4002   heap()->new_space()->set_age_mark(heap()->new_space()->top());
4003   // Give pages that are queued to be freed back to the OS.
4004   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
4005 }
4006 
CreateToSpaceUpdatingItem(MemoryChunk * chunk,Address start,Address end)4007 UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
4008     MemoryChunk* chunk, Address start, Address end) {
4009   return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
4010       chunk, start, end, non_atomic_marking_state());
4011 }
4012 
CreateRememberedSetUpdatingItem(MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)4013 UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
4014     MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
4015   return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
4016       heap(), non_atomic_marking_state(), chunk, updating_mode);
4017 }
4018 
4019 class MarkingItem;
4020 class GlobalHandlesMarkingItem;
4021 class PageMarkingItem;
4022 class RootMarkingItem;
4023 class YoungGenerationMarkingTask;
4024 
4025 class MarkingItem : public ItemParallelJob::Item {
4026  public:
~MarkingItem()4027   virtual ~MarkingItem() {}
4028   virtual void Process(YoungGenerationMarkingTask* task) = 0;
4029 };
4030 
4031 class YoungGenerationMarkingTask : public ItemParallelJob::Task {
4032  public:
YoungGenerationMarkingTask(Isolate * isolate,MinorMarkCompactCollector * collector,MinorMarkCompactCollector::MarkingWorklist * global_worklist,int task_id)4033   YoungGenerationMarkingTask(
4034       Isolate* isolate, MinorMarkCompactCollector* collector,
4035       MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
4036       : ItemParallelJob::Task(isolate),
4037         collector_(collector),
4038         marking_worklist_(global_worklist, task_id),
4039         marking_state_(collector->marking_state()),
4040         visitor_(marking_state_, global_worklist, task_id) {
4041     local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
4042                               Page::kPageSize);
4043   }
4044 
RunInParallel()4045   void RunInParallel() override {
4046     TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
4047                         GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
4048     double marking_time = 0.0;
4049     {
4050       TimedScope scope(&marking_time);
4051       MarkingItem* item = nullptr;
4052       while ((item = GetItem<MarkingItem>()) != nullptr) {
4053         item->Process(this);
4054         item->MarkFinished();
4055         EmptyLocalMarkingWorklist();
4056       }
4057       EmptyMarkingWorklist();
4058       DCHECK(marking_worklist_.IsLocalEmpty());
4059       FlushLiveBytes();
4060     }
4061     if (FLAG_trace_minor_mc_parallel_marking) {
4062       PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
4063                    static_cast<void*>(this), marking_time);
4064     }
4065   };
4066 
MarkObject(Object * object)4067   void MarkObject(Object* object) {
4068     if (!Heap::InNewSpace(object)) return;
4069     HeapObject* heap_object = HeapObject::cast(object);
4070     if (marking_state_->WhiteToGrey(heap_object)) {
4071       const int size = visitor_.Visit(heap_object);
4072       IncrementLiveBytes(heap_object, size);
4073     }
4074   }
4075 
4076  private:
EmptyLocalMarkingWorklist()4077   void EmptyLocalMarkingWorklist() {
4078     HeapObject* object = nullptr;
4079     while (marking_worklist_.Pop(&object)) {
4080       const int size = visitor_.Visit(object);
4081       IncrementLiveBytes(object, size);
4082     }
4083   }
4084 
EmptyMarkingWorklist()4085   void EmptyMarkingWorklist() {
4086     HeapObject* object = nullptr;
4087     while (marking_worklist_.Pop(&object)) {
4088       const int size = visitor_.Visit(object);
4089       IncrementLiveBytes(object, size);
4090     }
4091   }
4092 
IncrementLiveBytes(HeapObject * object,intptr_t bytes)4093   void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
4094     local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
4095         bytes;
4096   }
4097 
FlushLiveBytes()4098   void FlushLiveBytes() {
4099     for (auto pair : local_live_bytes_) {
4100       marking_state_->IncrementLiveBytes(pair.first, pair.second);
4101     }
4102   }
4103 
4104   MinorMarkCompactCollector* collector_;
4105   MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
4106   MinorMarkCompactCollector::MarkingState* marking_state_;
4107   YoungGenerationMarkingVisitor visitor_;
4108   std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
4109 };
4110 
4111 class PageMarkingItem : public MarkingItem {
4112  public:
PageMarkingItem(MemoryChunk * chunk,std::atomic<int> * global_slots)4113   explicit PageMarkingItem(MemoryChunk* chunk, std::atomic<int>* global_slots)
4114       : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
~PageMarkingItem()4115   virtual ~PageMarkingItem() { *global_slots_ = *global_slots_ + slots_; }
4116 
Process(YoungGenerationMarkingTask * task)4117   void Process(YoungGenerationMarkingTask* task) override {
4118     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4119                  "PageMarkingItem::Process");
4120     base::LockGuard<base::Mutex> guard(chunk_->mutex());
4121     MarkUntypedPointers(task);
4122     MarkTypedPointers(task);
4123   }
4124 
4125  private:
heap()4126   inline Heap* heap() { return chunk_->heap(); }
4127 
MarkUntypedPointers(YoungGenerationMarkingTask * task)4128   void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
4129     RememberedSet<OLD_TO_NEW>::Iterate(
4130         chunk_,
4131         [this, task](Address slot) { return CheckAndMarkObject(task, slot); },
4132         SlotSet::PREFREE_EMPTY_BUCKETS);
4133   }
4134 
MarkTypedPointers(YoungGenerationMarkingTask * task)4135   void MarkTypedPointers(YoungGenerationMarkingTask* task) {
4136     RememberedSet<OLD_TO_NEW>::IterateTyped(
4137         chunk_,
4138         [this, task](SlotType slot_type, Address host_addr, Address slot) {
4139           return UpdateTypedSlotHelper::UpdateTypedSlot(
4140               heap(), slot_type, slot, [this, task](MaybeObject** slot) {
4141                 return CheckAndMarkObject(task,
4142                                           reinterpret_cast<Address>(slot));
4143               });
4144         });
4145   }
4146 
CheckAndMarkObject(YoungGenerationMarkingTask * task,Address slot_address)4147   SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
4148                                         Address slot_address) {
4149     MaybeObject* object = *reinterpret_cast<MaybeObject**>(slot_address);
4150     if (Heap::InNewSpace(object)) {
4151       // Marking happens before flipping the young generation, so the object
4152       // has to be in ToSpace.
4153       DCHECK(Heap::InToSpace(object));
4154       HeapObject* heap_object;
4155       bool success = object->ToStrongOrWeakHeapObject(&heap_object);
4156       USE(success);
4157       DCHECK(success);
4158       task->MarkObject(heap_object);
4159       slots_++;
4160       return KEEP_SLOT;
4161     }
4162     return REMOVE_SLOT;
4163   }
4164 
4165   MemoryChunk* chunk_;
4166   std::atomic<int>* global_slots_;
4167   int slots_;
4168 };
4169 
4170 class GlobalHandlesMarkingItem : public MarkingItem {
4171  public:
GlobalHandlesMarkingItem(Heap * heap,GlobalHandles * global_handles,size_t start,size_t end)4172   GlobalHandlesMarkingItem(Heap* heap, GlobalHandles* global_handles,
4173                            size_t start, size_t end)
4174       : global_handles_(global_handles), start_(start), end_(end) {}
~GlobalHandlesMarkingItem()4175   virtual ~GlobalHandlesMarkingItem() {}
4176 
Process(YoungGenerationMarkingTask * task)4177   void Process(YoungGenerationMarkingTask* task) override {
4178     TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4179                  "GlobalHandlesMarkingItem::Process");
4180     GlobalHandlesRootMarkingVisitor visitor(task);
4181     global_handles_
4182         ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
4183             &visitor, start_, end_);
4184   }
4185 
4186  private:
4187   class GlobalHandlesRootMarkingVisitor : public RootVisitor {
4188    public:
GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask * task)4189     explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
4190         : task_(task) {}
4191 
VisitRootPointer(Root root,const char * description,Object ** p)4192     void VisitRootPointer(Root root, const char* description,
4193                           Object** p) override {
4194       DCHECK_EQ(Root::kGlobalHandles, root);
4195       task_->MarkObject(*p);
4196     }
4197 
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)4198     void VisitRootPointers(Root root, const char* description, Object** start,
4199                            Object** end) override {
4200       DCHECK_EQ(Root::kGlobalHandles, root);
4201       for (Object** p = start; p < end; p++) {
4202         task_->MarkObject(*p);
4203       }
4204     }
4205 
4206    private:
4207     YoungGenerationMarkingTask* task_;
4208   };
4209 
4210   GlobalHandles* global_handles_;
4211   size_t start_;
4212   size_t end_;
4213 };
4214 
MarkRootSetInParallel(RootMarkingVisitor * root_visitor)4215 void MinorMarkCompactCollector::MarkRootSetInParallel(
4216     RootMarkingVisitor* root_visitor) {
4217   std::atomic<int> slots;
4218   {
4219     ItemParallelJob job(isolate()->cancelable_task_manager(),
4220                         &page_parallel_job_semaphore_);
4221 
4222     // Seed the root set (roots + old->new set).
4223     {
4224       TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
4225       heap()->IterateRoots(root_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
4226       // Create batches of global handles.
4227       SeedGlobalHandles<GlobalHandlesMarkingItem>(
4228           heap(), isolate()->global_handles(), &job);
4229       // Create items for each page.
4230       RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
4231           heap(), [&job, &slots](MemoryChunk* chunk) {
4232             job.AddItem(new PageMarkingItem(chunk, &slots));
4233           });
4234     }
4235 
4236     // Add tasks and run in parallel.
4237     {
4238       TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
4239       const int new_space_pages =
4240           static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
4241       const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
4242       for (int i = 0; i < num_tasks; i++) {
4243         job.AddTask(
4244             new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
4245       }
4246       job.Run(isolate()->async_counters());
4247       DCHECK(worklist()->IsEmpty());
4248     }
4249   }
4250   old_to_new_slots_ = slots;
4251 }
4252 
MarkLiveObjects()4253 void MinorMarkCompactCollector::MarkLiveObjects() {
4254   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
4255 
4256   PostponeInterruptsScope postpone(isolate());
4257 
4258   RootMarkingVisitor root_visitor(this);
4259 
4260   MarkRootSetInParallel(&root_visitor);
4261 
4262   // Mark rest on the main thread.
4263   {
4264     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
4265     ProcessMarkingWorklist();
4266   }
4267 
4268   {
4269     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
4270     isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
4271         &IsUnmarkedObjectForYoungGeneration);
4272     isolate()
4273         ->global_handles()
4274         ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(&root_visitor);
4275     isolate()
4276         ->global_handles()
4277         ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
4278             &root_visitor, &IsUnmarkedObjectForYoungGeneration);
4279     ProcessMarkingWorklist();
4280   }
4281 }
4282 
ProcessMarkingWorklist()4283 void MinorMarkCompactCollector::ProcessMarkingWorklist() {
4284   MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
4285   HeapObject* object = nullptr;
4286   while (marking_worklist.Pop(&object)) {
4287     DCHECK(!object->IsFiller());
4288     DCHECK(object->IsHeapObject());
4289     DCHECK(heap()->Contains(object));
4290     DCHECK(non_atomic_marking_state()->IsGrey(object));
4291     main_marking_visitor()->Visit(object);
4292   }
4293   DCHECK(marking_worklist.IsLocalEmpty());
4294 }
4295 
Evacuate()4296 void MinorMarkCompactCollector::Evacuate() {
4297   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
4298   base::LockGuard<base::Mutex> guard(heap()->relocation_mutex());
4299 
4300   {
4301     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
4302     EvacuatePrologue();
4303   }
4304 
4305   {
4306     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
4307     EvacuatePagesInParallel();
4308   }
4309 
4310   UpdatePointersAfterEvacuation();
4311 
4312   {
4313     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
4314     if (!heap()->new_space()->Rebalance()) {
4315       heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
4316     }
4317   }
4318 
4319   {
4320     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
4321     for (Page* p : new_space_evacuation_pages_) {
4322       if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
4323           p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
4324         p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
4325         p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
4326         p->SetFlag(Page::SWEEP_TO_ITERATE);
4327         sweep_to_iterate_pages_.push_back(p);
4328       }
4329     }
4330     new_space_evacuation_pages_.clear();
4331   }
4332 
4333   {
4334     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
4335     EvacuateEpilogue();
4336   }
4337 }
4338 
4339 namespace {
4340 
4341 class YoungGenerationEvacuator : public Evacuator {
4342  public:
YoungGenerationEvacuator(MinorMarkCompactCollector * collector,RecordMigratedSlotVisitor * record_visitor)4343   YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
4344                            RecordMigratedSlotVisitor* record_visitor)
4345       : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
4346 
GetBackgroundTracingScope()4347   GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
4348     return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
4349   }
4350 
4351  protected:
4352   void RawEvacuatePage(Page* page, intptr_t* live_bytes) override;
4353 
4354   MinorMarkCompactCollector* collector_;
4355 };
4356 
RawEvacuatePage(Page * page,intptr_t * live_bytes)4357 void YoungGenerationEvacuator::RawEvacuatePage(Page* page,
4358                                                intptr_t* live_bytes) {
4359   TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4360                "YoungGenerationEvacuator::RawEvacuatePage");
4361   MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
4362       collector_->non_atomic_marking_state();
4363   *live_bytes = marking_state->live_bytes(page);
4364   switch (ComputeEvacuationMode(page)) {
4365     case kObjectsNewToOld:
4366       LiveObjectVisitor::VisitGreyObjectsNoFail(
4367           page, marking_state, &new_space_visitor_,
4368           LiveObjectVisitor::kClearMarkbits);
4369       // ArrayBufferTracker will be updated during pointers updating.
4370       break;
4371     case kPageNewToOld:
4372       LiveObjectVisitor::VisitGreyObjectsNoFail(
4373           page, marking_state, &new_to_old_page_visitor_,
4374           LiveObjectVisitor::kKeepMarking);
4375       new_to_old_page_visitor_.account_moved_bytes(
4376           marking_state->live_bytes(page));
4377       // TODO(mlippautz): If cleaning array buffers is too slow here we can
4378       // delay it until the next GC.
4379       ArrayBufferTracker::FreeDead(page, marking_state);
4380       if (heap()->ShouldZapGarbage()) {
4381         collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
4382                                  ZAP_FREE_SPACE);
4383       } else if (heap()->incremental_marking()->IsMarking()) {
4384         // When incremental marking is on, we need to clear the mark bits of
4385         // the full collector. We cannot yet discard the young generation mark
4386         // bits as they are still relevant for pointers updating.
4387         collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
4388                                  IGNORE_FREE_SPACE);
4389       }
4390       break;
4391     case kPageNewToNew:
4392       LiveObjectVisitor::VisitGreyObjectsNoFail(
4393           page, marking_state, &new_to_new_page_visitor_,
4394           LiveObjectVisitor::kKeepMarking);
4395       new_to_new_page_visitor_.account_moved_bytes(
4396           marking_state->live_bytes(page));
4397       // TODO(mlippautz): If cleaning array buffers is too slow here we can
4398       // delay it until the next GC.
4399       ArrayBufferTracker::FreeDead(page, marking_state);
4400       if (heap()->ShouldZapGarbage()) {
4401         collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
4402                                  ZAP_FREE_SPACE);
4403       } else if (heap()->incremental_marking()->IsMarking()) {
4404         // When incremental marking is on, we need to clear the mark bits of
4405         // the full collector. We cannot yet discard the young generation mark
4406         // bits as they are still relevant for pointers updating.
4407         collector_->MakeIterable(page, MarkingTreatmentMode::KEEP,
4408                                  IGNORE_FREE_SPACE);
4409       }
4410       break;
4411     case kObjectsOldToOld:
4412       UNREACHABLE();
4413       break;
4414   }
4415 }
4416 
4417 }  // namespace
4418 
EvacuatePagesInParallel()4419 void MinorMarkCompactCollector::EvacuatePagesInParallel() {
4420   ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
4421                                  &page_parallel_job_semaphore_);
4422   intptr_t live_bytes = 0;
4423 
4424   for (Page* page : new_space_evacuation_pages_) {
4425     intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
4426     if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
4427     live_bytes += live_bytes_on_page;
4428     if (ShouldMovePage(page, live_bytes_on_page)) {
4429       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
4430         EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
4431       } else {
4432         EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
4433       }
4434     }
4435     evacuation_job.AddItem(new PageEvacuationItem(page));
4436   }
4437   if (evacuation_job.NumberOfItems() == 0) return;
4438 
4439   YoungGenerationMigrationObserver observer(heap(),
4440                                             heap()->mark_compact_collector());
4441   YoungGenerationRecordMigratedSlotVisitor record_visitor(
4442       heap()->mark_compact_collector());
4443   CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
4444       this, &evacuation_job, &record_visitor, &observer, live_bytes);
4445 }
4446 
CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob * job)4447 int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
4448     ItemParallelJob* job) {
4449   int pages = 0;
4450   for (Page* p : new_space_evacuation_pages_) {
4451     if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
4452       if (p->local_tracker() == nullptr) continue;
4453 
4454       pages++;
4455       job->AddItem(new ArrayBufferTrackerUpdatingItem(
4456           p, ArrayBufferTrackerUpdatingItem::kRegular));
4457     }
4458   }
4459   return pages;
4460 }
4461 
4462 #endif  // ENABLE_MINOR_MC
4463 
4464 }  // namespace internal
4465 }  // namespace v8
4466