• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/mark-compact.h"
6 
7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h"
11 #include "src/compilation-cache.h"
12 #include "src/deoptimizer.h"
13 #include "src/execution.h"
14 #include "src/frames-inl.h"
15 #include "src/gdb-jit.h"
16 #include "src/global-handles.h"
17 #include "src/heap/array-buffer-tracker.h"
18 #include "src/heap/gc-tracer.h"
19 #include "src/heap/incremental-marking.h"
20 #include "src/heap/mark-compact-inl.h"
21 #include "src/heap/object-stats.h"
22 #include "src/heap/objects-visiting-inl.h"
23 #include "src/heap/objects-visiting.h"
24 #include "src/heap/page-parallel-job.h"
25 #include "src/heap/spaces-inl.h"
26 #include "src/ic/ic.h"
27 #include "src/ic/stub-cache.h"
28 #include "src/tracing/tracing-category-observer.h"
29 #include "src/utils-inl.h"
30 #include "src/v8.h"
31 #include "src/v8threads.h"
32 
33 namespace v8 {
34 namespace internal {
35 
36 
37 const char* Marking::kWhiteBitPattern = "00";
38 const char* Marking::kBlackBitPattern = "11";
39 const char* Marking::kGreyBitPattern = "10";
40 const char* Marking::kImpossibleBitPattern = "01";
41 
42 // The following has to hold in order for {ObjectMarking::MarkBitFrom} to not
43 // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
44 STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
45 
46 
47 // -------------------------------------------------------------------------
48 // MarkCompactCollector
49 
MarkCompactCollector(Heap * heap)50 MarkCompactCollector::MarkCompactCollector(Heap* heap)
51     :  // NOLINT
52       heap_(heap),
53       page_parallel_job_semaphore_(0),
54 #ifdef DEBUG
55       state_(IDLE),
56 #endif
57       was_marked_incrementally_(false),
58       evacuation_(false),
59       compacting_(false),
60       black_allocation_(false),
61       have_code_to_deoptimize_(false),
62       marking_deque_(heap),
63       code_flusher_(nullptr),
64       sweeper_(heap) {
65 }
66 
67 #ifdef VERIFY_HEAP
68 class VerifyMarkingVisitor : public ObjectVisitor {
69  public:
VisitPointers(Object ** start,Object ** end)70   void VisitPointers(Object** start, Object** end) override {
71     for (Object** current = start; current < end; current++) {
72       if ((*current)->IsHeapObject()) {
73         HeapObject* object = HeapObject::cast(*current);
74         CHECK(ObjectMarking::IsBlackOrGrey(object));
75       }
76     }
77   }
78 
VisitEmbeddedPointer(RelocInfo * rinfo)79   void VisitEmbeddedPointer(RelocInfo* rinfo) override {
80     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
81     if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
82       Object* p = rinfo->target_object();
83       VisitPointer(&p);
84     }
85   }
86 
VisitCell(RelocInfo * rinfo)87   void VisitCell(RelocInfo* rinfo) override {
88     Code* code = rinfo->host();
89     DCHECK(rinfo->rmode() == RelocInfo::CELL);
90     if (!code->IsWeakObject(rinfo->target_cell())) {
91       ObjectVisitor::VisitCell(rinfo);
92     }
93   }
94 };
95 
96 
VerifyMarking(Heap * heap,Address bottom,Address top)97 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
98   VerifyMarkingVisitor visitor;
99   HeapObject* object;
100   Address next_object_must_be_here_or_later = bottom;
101   for (Address current = bottom; current < top;) {
102     object = HeapObject::FromAddress(current);
103     // One word fillers at the end of a black area can be grey.
104     if (ObjectMarking::IsBlackOrGrey(object) &&
105         object->map() != heap->one_pointer_filler_map()) {
106       CHECK(ObjectMarking::IsBlack(object));
107       CHECK(current >= next_object_must_be_here_or_later);
108       object->Iterate(&visitor);
109       next_object_must_be_here_or_later = current + object->Size();
110       // The object is either part of a black area of black allocation or a
111       // regular black object
112       Page* page = Page::FromAddress(current);
113       CHECK(
114           page->markbits()->AllBitsSetInRange(
115               page->AddressToMarkbitIndex(current),
116               page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
117           page->markbits()->AllBitsClearInRange(
118               page->AddressToMarkbitIndex(current + kPointerSize * 2),
119               page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
120       current = next_object_must_be_here_or_later;
121     } else {
122       current += kPointerSize;
123     }
124   }
125 }
126 
VerifyMarking(NewSpace * space)127 static void VerifyMarking(NewSpace* space) {
128   Address end = space->top();
129   // The bottom position is at the start of its page. Allows us to use
130   // page->area_start() as start of range on all pages.
131   CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
132 
133   PageRange range(space->bottom(), end);
134   for (auto it = range.begin(); it != range.end();) {
135     Page* page = *(it++);
136     Address limit = it != range.end() ? page->area_end() : end;
137     CHECK(limit == end || !page->Contains(end));
138     VerifyMarking(space->heap(), page->area_start(), limit);
139   }
140 }
141 
142 
VerifyMarking(PagedSpace * space)143 static void VerifyMarking(PagedSpace* space) {
144   for (Page* p : *space) {
145     VerifyMarking(space->heap(), p->area_start(), p->area_end());
146   }
147 }
148 
149 
VerifyMarking(Heap * heap)150 static void VerifyMarking(Heap* heap) {
151   VerifyMarking(heap->old_space());
152   VerifyMarking(heap->code_space());
153   VerifyMarking(heap->map_space());
154   VerifyMarking(heap->new_space());
155 
156   VerifyMarkingVisitor visitor;
157 
158   LargeObjectIterator it(heap->lo_space());
159   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
160     if (ObjectMarking::IsBlackOrGrey(obj)) {
161       obj->Iterate(&visitor);
162     }
163   }
164 
165   heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
166 }
167 
168 
169 class VerifyEvacuationVisitor : public ObjectVisitor {
170  public:
VisitPointers(Object ** start,Object ** end)171   void VisitPointers(Object** start, Object** end) override {
172     for (Object** current = start; current < end; current++) {
173       if ((*current)->IsHeapObject()) {
174         HeapObject* object = HeapObject::cast(*current);
175         CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
176       }
177     }
178   }
179 };
180 
181 
VerifyEvacuation(Page * page)182 static void VerifyEvacuation(Page* page) {
183   VerifyEvacuationVisitor visitor;
184   HeapObjectIterator iterator(page);
185   for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
186        heap_object = iterator.Next()) {
187     // We skip free space objects.
188     if (!heap_object->IsFiller()) {
189       heap_object->Iterate(&visitor);
190     }
191   }
192 }
193 
194 
VerifyEvacuation(NewSpace * space)195 static void VerifyEvacuation(NewSpace* space) {
196   VerifyEvacuationVisitor visitor;
197   PageRange range(space->bottom(), space->top());
198   for (auto it = range.begin(); it != range.end();) {
199     Page* page = *(it++);
200     Address current = page->area_start();
201     Address limit = it != range.end() ? page->area_end() : space->top();
202     CHECK(limit == space->top() || !page->Contains(space->top()));
203     while (current < limit) {
204       HeapObject* object = HeapObject::FromAddress(current);
205       object->Iterate(&visitor);
206       current += object->Size();
207     }
208   }
209 }
210 
211 
VerifyEvacuation(Heap * heap,PagedSpace * space)212 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
213   if (FLAG_use_allocation_folding && (space == heap->old_space())) {
214     return;
215   }
216   for (Page* p : *space) {
217     if (p->IsEvacuationCandidate()) continue;
218     VerifyEvacuation(p);
219   }
220 }
221 
222 
VerifyEvacuation(Heap * heap)223 static void VerifyEvacuation(Heap* heap) {
224   VerifyEvacuation(heap, heap->old_space());
225   VerifyEvacuation(heap, heap->code_space());
226   VerifyEvacuation(heap, heap->map_space());
227   VerifyEvacuation(heap->new_space());
228 
229   VerifyEvacuationVisitor visitor;
230   heap->IterateStrongRoots(&visitor, VISIT_ALL);
231 }
232 #endif  // VERIFY_HEAP
233 
234 
SetUp()235 void MarkCompactCollector::SetUp() {
236   DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
237   DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
238   DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
239   DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
240   marking_deque()->SetUp();
241 
242   if (FLAG_flush_code) {
243     code_flusher_ = new CodeFlusher(isolate());
244     if (FLAG_trace_code_flushing) {
245       PrintF("[code-flushing is now on]\n");
246     }
247   }
248 }
249 
250 
TearDown()251 void MarkCompactCollector::TearDown() {
252   AbortCompaction();
253   marking_deque()->TearDown();
254   delete code_flusher_;
255 }
256 
257 
AddEvacuationCandidate(Page * p)258 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
259   DCHECK(!p->NeverEvacuate());
260   p->MarkEvacuationCandidate();
261   evacuation_candidates_.Add(p);
262 }
263 
264 
TraceFragmentation(PagedSpace * space)265 static void TraceFragmentation(PagedSpace* space) {
266   int number_of_pages = space->CountTotalPages();
267   intptr_t reserved = (number_of_pages * space->AreaSize());
268   intptr_t free = reserved - space->SizeOfObjects();
269   PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
270          AllocationSpaceName(space->identity()), number_of_pages,
271          static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
272 }
273 
StartCompaction()274 bool MarkCompactCollector::StartCompaction() {
275   if (!compacting_) {
276     DCHECK(evacuation_candidates_.length() == 0);
277 
278     CollectEvacuationCandidates(heap()->old_space());
279 
280     if (FLAG_compact_code_space) {
281       CollectEvacuationCandidates(heap()->code_space());
282     } else if (FLAG_trace_fragmentation) {
283       TraceFragmentation(heap()->code_space());
284     }
285 
286     if (FLAG_trace_fragmentation) {
287       TraceFragmentation(heap()->map_space());
288     }
289 
290     compacting_ = evacuation_candidates_.length() > 0;
291   }
292 
293   return compacting_;
294 }
295 
CollectGarbage()296 void MarkCompactCollector::CollectGarbage() {
297   // Make sure that Prepare() has been called. The individual steps below will
298   // update the state as they proceed.
299   DCHECK(state_ == PREPARE_GC);
300 
301   MarkLiveObjects();
302 
303   DCHECK(heap_->incremental_marking()->IsStopped());
304 
305   ClearNonLiveReferences();
306 
307   RecordObjectStats();
308 
309 #ifdef VERIFY_HEAP
310   if (FLAG_verify_heap) {
311     VerifyMarking(heap_);
312   }
313 #endif
314 
315   StartSweepSpaces();
316 
317   EvacuateNewSpaceAndCandidates();
318 
319   Finish();
320 }
321 
322 #ifdef VERIFY_HEAP
VerifyMarkbitsAreClean(PagedSpace * space)323 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
324   for (Page* p : *space) {
325     CHECK(p->markbits()->IsClean());
326     CHECK_EQ(0, p->LiveBytes());
327   }
328 }
329 
330 
VerifyMarkbitsAreClean(NewSpace * space)331 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
332   for (Page* p : PageRange(space->bottom(), space->top())) {
333     CHECK(p->markbits()->IsClean());
334     CHECK_EQ(0, p->LiveBytes());
335   }
336 }
337 
338 
VerifyMarkbitsAreClean()339 void MarkCompactCollector::VerifyMarkbitsAreClean() {
340   VerifyMarkbitsAreClean(heap_->old_space());
341   VerifyMarkbitsAreClean(heap_->code_space());
342   VerifyMarkbitsAreClean(heap_->map_space());
343   VerifyMarkbitsAreClean(heap_->new_space());
344 
345   LargeObjectIterator it(heap_->lo_space());
346   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
347     CHECK(ObjectMarking::IsWhite(obj));
348     CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
349   }
350 }
351 
VerifyWeakEmbeddedObjectsInCode()352 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
353   HeapObjectIterator code_iterator(heap()->code_space());
354   for (HeapObject* obj = code_iterator.Next(); obj != NULL;
355        obj = code_iterator.Next()) {
356     Code* code = Code::cast(obj);
357     if (!code->is_optimized_code()) continue;
358     if (WillBeDeoptimized(code)) continue;
359     code->VerifyEmbeddedObjectsDependency();
360   }
361 }
362 
363 
VerifyOmittedMapChecks()364 void MarkCompactCollector::VerifyOmittedMapChecks() {
365   HeapObjectIterator iterator(heap()->map_space());
366   for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
367     Map* map = Map::cast(obj);
368     map->VerifyOmittedMapChecks();
369   }
370 }
371 #endif  // VERIFY_HEAP
372 
373 
ClearMarkbitsInPagedSpace(PagedSpace * space)374 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
375   for (Page* p : *space) {
376     p->ClearLiveness();
377   }
378 }
379 
380 
ClearMarkbitsInNewSpace(NewSpace * space)381 static void ClearMarkbitsInNewSpace(NewSpace* space) {
382   for (Page* page : *space) {
383     page->ClearLiveness();
384   }
385 }
386 
387 
ClearMarkbits()388 void MarkCompactCollector::ClearMarkbits() {
389   ClearMarkbitsInPagedSpace(heap_->code_space());
390   ClearMarkbitsInPagedSpace(heap_->map_space());
391   ClearMarkbitsInPagedSpace(heap_->old_space());
392   ClearMarkbitsInNewSpace(heap_->new_space());
393 
394   LargeObjectIterator it(heap_->lo_space());
395   for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
396     ObjectMarking::ClearMarkBit(obj);
397     MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
398     chunk->ResetProgressBar();
399     chunk->ResetLiveBytes();
400   }
401 }
402 
403 class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
404  public:
SweeperTask(Sweeper * sweeper,base::Semaphore * pending_sweeper_tasks,AllocationSpace space_to_start)405   SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
406               AllocationSpace space_to_start)
407       : sweeper_(sweeper),
408         pending_sweeper_tasks_(pending_sweeper_tasks),
409         space_to_start_(space_to_start) {}
410 
~SweeperTask()411   virtual ~SweeperTask() {}
412 
413  private:
414   // v8::Task overrides.
Run()415   void Run() override {
416     DCHECK_GE(space_to_start_, FIRST_SPACE);
417     DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
418     const int offset = space_to_start_ - FIRST_SPACE;
419     const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
420     for (int i = 0; i < num_spaces; i++) {
421       const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
422       DCHECK_GE(space_id, FIRST_SPACE);
423       DCHECK_LE(space_id, LAST_PAGED_SPACE);
424       sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
425     }
426     pending_sweeper_tasks_->Signal();
427   }
428 
429   Sweeper* sweeper_;
430   base::Semaphore* pending_sweeper_tasks_;
431   AllocationSpace space_to_start_;
432 
433   DISALLOW_COPY_AND_ASSIGN(SweeperTask);
434 };
435 
StartSweeping()436 void MarkCompactCollector::Sweeper::StartSweeping() {
437   sweeping_in_progress_ = true;
438   ForAllSweepingSpaces([this](AllocationSpace space) {
439     std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
440               [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
441   });
442 }
443 
StartSweeperTasks()444 void MarkCompactCollector::Sweeper::StartSweeperTasks() {
445   if (FLAG_concurrent_sweeping && sweeping_in_progress_) {
446     ForAllSweepingSpaces([this](AllocationSpace space) {
447       if (space == NEW_SPACE) return;
448       num_sweeping_tasks_.Increment(1);
449       V8::GetCurrentPlatform()->CallOnBackgroundThread(
450           new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space),
451           v8::Platform::kShortRunningTask);
452     });
453   }
454 }
455 
SweepOrWaitUntilSweepingCompleted(Page * page)456 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
457     Page* page) {
458   if (!page->SweepingDone()) {
459     ParallelSweepPage(page, page->owner()->identity());
460     if (!page->SweepingDone()) {
461       // We were not able to sweep that page, i.e., a concurrent
462       // sweeper thread currently owns this page. Wait for the sweeper
463       // thread to be done with this page.
464       page->WaitUntilSweepingCompleted();
465     }
466   }
467 }
468 
SweepAndRefill(CompactionSpace * space)469 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
470   if (FLAG_concurrent_sweeping &&
471       !sweeper().IsSweepingCompleted(space->identity())) {
472     sweeper().ParallelSweepSpace(space->identity(), 0);
473     space->RefillFreeList();
474   }
475 }
476 
GetSweptPageSafe(PagedSpace * space)477 Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
478   base::LockGuard<base::Mutex> guard(&mutex_);
479   SweptList& list = swept_list_[space->identity()];
480   if (list.length() > 0) {
481     return list.RemoveLast();
482   }
483   return nullptr;
484 }
485 
EnsureCompleted()486 void MarkCompactCollector::Sweeper::EnsureCompleted() {
487   if (!sweeping_in_progress_) return;
488 
489   // If sweeping is not completed or not running at all, we try to complete it
490   // here.
491   ForAllSweepingSpaces([this](AllocationSpace space) {
492     if (!FLAG_concurrent_sweeping || !this->IsSweepingCompleted(space)) {
493       ParallelSweepSpace(space, 0);
494     }
495   });
496 
497   if (FLAG_concurrent_sweeping) {
498     while (num_sweeping_tasks_.Value() > 0) {
499       pending_sweeper_tasks_semaphore_.Wait();
500       num_sweeping_tasks_.Increment(-1);
501     }
502   }
503 
504   ForAllSweepingSpaces([this](AllocationSpace space) {
505     if (space == NEW_SPACE) {
506       swept_list_[NEW_SPACE].Clear();
507     }
508     DCHECK(sweeping_list_[space].empty());
509   });
510   sweeping_in_progress_ = false;
511 }
512 
EnsureNewSpaceCompleted()513 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
514   if (!sweeping_in_progress_) return;
515   if (!FLAG_concurrent_sweeping || !IsSweepingCompleted(NEW_SPACE)) {
516     for (Page* p : *heap_->new_space()) {
517       SweepOrWaitUntilSweepingCompleted(p);
518     }
519   }
520 }
521 
EnsureSweepingCompleted()522 void MarkCompactCollector::EnsureSweepingCompleted() {
523   if (!sweeper().sweeping_in_progress()) return;
524 
525   sweeper().EnsureCompleted();
526   heap()->old_space()->RefillFreeList();
527   heap()->code_space()->RefillFreeList();
528   heap()->map_space()->RefillFreeList();
529 
530 #ifdef VERIFY_HEAP
531   if (FLAG_verify_heap && !evacuation()) {
532     VerifyEvacuation(heap_);
533   }
534 #endif
535 }
536 
AreSweeperTasksRunning()537 bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() {
538   DCHECK(FLAG_concurrent_sweeping);
539   while (pending_sweeper_tasks_semaphore_.WaitFor(
540       base::TimeDelta::FromSeconds(0))) {
541     num_sweeping_tasks_.Increment(-1);
542   }
543   return num_sweeping_tasks_.Value() != 0;
544 }
545 
IsSweepingCompleted(AllocationSpace space)546 bool MarkCompactCollector::Sweeper::IsSweepingCompleted(AllocationSpace space) {
547   DCHECK(FLAG_concurrent_sweeping);
548   if (AreSweeperTasksRunning()) return false;
549   base::LockGuard<base::Mutex> guard(&mutex_);
550   return sweeping_list_[space].empty();
551 }
552 
AllocationSpaceName(AllocationSpace space)553 const char* AllocationSpaceName(AllocationSpace space) {
554   switch (space) {
555     case NEW_SPACE:
556       return "NEW_SPACE";
557     case OLD_SPACE:
558       return "OLD_SPACE";
559     case CODE_SPACE:
560       return "CODE_SPACE";
561     case MAP_SPACE:
562       return "MAP_SPACE";
563     case LO_SPACE:
564       return "LO_SPACE";
565     default:
566       UNREACHABLE();
567   }
568 
569   return NULL;
570 }
571 
ComputeEvacuationHeuristics(size_t area_size,int * target_fragmentation_percent,size_t * max_evacuated_bytes)572 void MarkCompactCollector::ComputeEvacuationHeuristics(
573     size_t area_size, int* target_fragmentation_percent,
574     size_t* max_evacuated_bytes) {
575   // For memory reducing and optimize for memory mode we directly define both
576   // constants.
577   const int kTargetFragmentationPercentForReduceMemory = 20;
578   const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
579   const int kTargetFragmentationPercentForOptimizeMemory = 20;
580   const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
581 
582   // For regular mode (which is latency critical) we define less aggressive
583   // defaults to start and switch to a trace-based (using compaction speed)
584   // approach as soon as we have enough samples.
585   const int kTargetFragmentationPercent = 70;
586   const size_t kMaxEvacuatedBytes = 4 * MB;
587   // Time to take for a single area (=payload of page). Used as soon as there
588   // exist enough compaction speed samples.
589   const float kTargetMsPerArea = .5;
590 
591   if (heap()->ShouldReduceMemory()) {
592     *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
593     *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
594   } else if (heap()->ShouldOptimizeForMemoryUsage()) {
595     *target_fragmentation_percent =
596         kTargetFragmentationPercentForOptimizeMemory;
597     *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
598   } else {
599     const double estimated_compaction_speed =
600         heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
601     if (estimated_compaction_speed != 0) {
602       // Estimate the target fragmentation based on traced compaction speed
603       // and a goal for a single page.
604       const double estimated_ms_per_area =
605           1 + area_size / estimated_compaction_speed;
606       *target_fragmentation_percent = static_cast<int>(
607           100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
608       if (*target_fragmentation_percent <
609           kTargetFragmentationPercentForReduceMemory) {
610         *target_fragmentation_percent =
611             kTargetFragmentationPercentForReduceMemory;
612       }
613     } else {
614       *target_fragmentation_percent = kTargetFragmentationPercent;
615     }
616     *max_evacuated_bytes = kMaxEvacuatedBytes;
617   }
618 }
619 
620 
CollectEvacuationCandidates(PagedSpace * space)621 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
622   DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
623 
624   int number_of_pages = space->CountTotalPages();
625   size_t area_size = space->AreaSize();
626 
627   // Pairs of (live_bytes_in_page, page).
628   typedef std::pair<size_t, Page*> LiveBytesPagePair;
629   std::vector<LiveBytesPagePair> pages;
630   pages.reserve(number_of_pages);
631 
632   DCHECK(!sweeping_in_progress());
633   DCHECK(!FLAG_concurrent_sweeping ||
634          sweeper().IsSweepingCompleted(space->identity()));
635   Page* owner_of_linear_allocation_area =
636       space->top() == space->limit()
637           ? nullptr
638           : Page::FromAllocationAreaAddress(space->top());
639   for (Page* p : *space) {
640     if (p->NeverEvacuate() || p == owner_of_linear_allocation_area) continue;
641     // Invariant: Evacuation candidates are just created when marking is
642     // started. This means that sweeping has finished. Furthermore, at the end
643     // of a GC all evacuation candidates are cleared and their slot buffers are
644     // released.
645     CHECK(!p->IsEvacuationCandidate());
646     CHECK_NULL(p->old_to_old_slots());
647     CHECK_NULL(p->typed_old_to_old_slots());
648     CHECK(p->SweepingDone());
649     DCHECK(p->area_size() == area_size);
650     pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
651   }
652 
653   int candidate_count = 0;
654   size_t total_live_bytes = 0;
655 
656   const bool reduce_memory = heap()->ShouldReduceMemory();
657   if (FLAG_manual_evacuation_candidates_selection) {
658     for (size_t i = 0; i < pages.size(); i++) {
659       Page* p = pages[i].second;
660       if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
661         candidate_count++;
662         total_live_bytes += pages[i].first;
663         p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
664         AddEvacuationCandidate(p);
665       }
666     }
667   } else if (FLAG_stress_compaction) {
668     for (size_t i = 0; i < pages.size(); i++) {
669       Page* p = pages[i].second;
670       if (i % 2 == 0) {
671         candidate_count++;
672         total_live_bytes += pages[i].first;
673         AddEvacuationCandidate(p);
674       }
675     }
676   } else {
677     // The following approach determines the pages that should be evacuated.
678     //
679     // We use two conditions to decide whether a page qualifies as an evacuation
680     // candidate, or not:
681     // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
682     //   between live bytes and capacity of this page (= area).
683     // * Evacuation quota: A global quota determining how much bytes should be
684     //   compacted.
685     //
686     // The algorithm sorts all pages by live bytes and then iterates through
687     // them starting with the page with the most free memory, adding them to the
688     // set of evacuation candidates as long as both conditions (fragmentation
689     // and quota) hold.
690     size_t max_evacuated_bytes;
691     int target_fragmentation_percent;
692     ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
693                                 &max_evacuated_bytes);
694 
695     const size_t free_bytes_threshold =
696         target_fragmentation_percent * (area_size / 100);
697 
698     // Sort pages from the most free to the least free, then select
699     // the first n pages for evacuation such that:
700     // - the total size of evacuated objects does not exceed the specified
701     // limit.
702     // - fragmentation of (n+1)-th page does not exceed the specified limit.
703     std::sort(pages.begin(), pages.end(),
704               [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
705                 return a.first < b.first;
706               });
707     for (size_t i = 0; i < pages.size(); i++) {
708       size_t live_bytes = pages[i].first;
709       DCHECK_GE(area_size, live_bytes);
710       size_t free_bytes = area_size - live_bytes;
711       if (FLAG_always_compact ||
712           ((free_bytes >= free_bytes_threshold) &&
713            ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
714         candidate_count++;
715         total_live_bytes += live_bytes;
716       }
717       if (FLAG_trace_fragmentation_verbose) {
718         PrintIsolate(isolate(),
719                      "compaction-selection-page: space=%s free_bytes_page=%zu "
720                      "fragmentation_limit_kb=%" PRIuS
721                      " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
722                      "compaction_limit_kb=%zu\n",
723                      AllocationSpaceName(space->identity()), free_bytes / KB,
724                      free_bytes_threshold / KB, target_fragmentation_percent,
725                      total_live_bytes / KB, max_evacuated_bytes / KB);
726       }
727     }
728     // How many pages we will allocated for the evacuated objects
729     // in the worst case: ceil(total_live_bytes / area_size)
730     int estimated_new_pages =
731         static_cast<int>((total_live_bytes + area_size - 1) / area_size);
732     DCHECK_LE(estimated_new_pages, candidate_count);
733     int estimated_released_pages = candidate_count - estimated_new_pages;
734     // Avoid (compact -> expand) cycles.
735     if ((estimated_released_pages == 0) && !FLAG_always_compact) {
736       candidate_count = 0;
737     }
738     for (int i = 0; i < candidate_count; i++) {
739       AddEvacuationCandidate(pages[i].second);
740     }
741   }
742 
743   if (FLAG_trace_fragmentation) {
744     PrintIsolate(isolate(),
745                  "compaction-selection: space=%s reduce_memory=%d pages=%d "
746                  "total_live_bytes=%zu\n",
747                  AllocationSpaceName(space->identity()), reduce_memory,
748                  candidate_count, total_live_bytes / KB);
749   }
750 }
751 
752 
AbortCompaction()753 void MarkCompactCollector::AbortCompaction() {
754   if (compacting_) {
755     RememberedSet<OLD_TO_OLD>::ClearAll(heap());
756     for (Page* p : evacuation_candidates_) {
757       p->ClearEvacuationCandidate();
758     }
759     compacting_ = false;
760     evacuation_candidates_.Rewind(0);
761   }
762   DCHECK_EQ(0, evacuation_candidates_.length());
763 }
764 
765 
Prepare()766 void MarkCompactCollector::Prepare() {
767   was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
768 
769 #ifdef DEBUG
770   DCHECK(state_ == IDLE);
771   state_ = PREPARE_GC;
772 #endif
773 
774   DCHECK(!FLAG_never_compact || !FLAG_always_compact);
775 
776   // Instead of waiting we could also abort the sweeper threads here.
777   EnsureSweepingCompleted();
778 
779   if (heap()->incremental_marking()->IsSweeping()) {
780     heap()->incremental_marking()->Stop();
781   }
782 
783   // If concurrent unmapping tasks are still running, we should wait for
784   // them here.
785   heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
786 
787   // Clear marking bits if incremental marking is aborted.
788   if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
789     heap()->incremental_marking()->Stop();
790     heap()->incremental_marking()->AbortBlackAllocation();
791     ClearMarkbits();
792     AbortWeakCollections();
793     AbortWeakCells();
794     AbortTransitionArrays();
795     AbortCompaction();
796     heap_->local_embedder_heap_tracer()->AbortTracing();
797     marking_deque()->Clear();
798     was_marked_incrementally_ = false;
799   }
800 
801   if (!was_marked_incrementally_) {
802     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_PROLOGUE);
803     heap_->local_embedder_heap_tracer()->TracePrologue();
804   }
805 
806   // Don't start compaction if we are in the middle of incremental
807   // marking cycle. We did not collect any slots.
808   if (!FLAG_never_compact && !was_marked_incrementally_) {
809     StartCompaction();
810   }
811 
812   PagedSpaces spaces(heap());
813   for (PagedSpace* space = spaces.next(); space != NULL;
814        space = spaces.next()) {
815     space->PrepareForMarkCompact();
816   }
817   heap()->account_external_memory_concurrently_freed();
818 
819 #ifdef VERIFY_HEAP
820   if (!was_marked_incrementally_ && FLAG_verify_heap) {
821     VerifyMarkbitsAreClean();
822   }
823 #endif
824 }
825 
826 
Finish()827 void MarkCompactCollector::Finish() {
828   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
829 
830   if (!heap()->delay_sweeper_tasks_for_testing_) {
831     sweeper().StartSweeperTasks();
832   }
833 
834   // The hashing of weak_object_to_code_table is no longer valid.
835   heap()->weak_object_to_code_table()->Rehash(
836       heap()->isolate()->factory()->undefined_value());
837 
838   // Clear the marking state of live large objects.
839   heap_->lo_space()->ClearMarkingStateOfLiveObjects();
840 
841 #ifdef DEBUG
842   DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
843   state_ = IDLE;
844 #endif
845   heap_->isolate()->inner_pointer_to_code_cache()->Flush();
846 
847   // The stub caches are not traversed during GC; clear them to force
848   // their lazy re-initialization. This must be done after the
849   // GC, because it relies on the new address of certain old space
850   // objects (empty string, illegal builtin).
851   isolate()->load_stub_cache()->Clear();
852   isolate()->store_stub_cache()->Clear();
853 
854   if (have_code_to_deoptimize_) {
855     // Some code objects were marked for deoptimization during the GC.
856     Deoptimizer::DeoptimizeMarkedCode(isolate());
857     have_code_to_deoptimize_ = false;
858   }
859 
860   heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
861 }
862 
863 
864 // -------------------------------------------------------------------------
865 // Phase 1: tracing and marking live objects.
866 //   before: all objects are in normal state.
867 //   after: a live object's map pointer is marked as '00'.
868 
869 // Marking all live objects in the heap as part of mark-sweep or mark-compact
870 // collection.  Before marking, all objects are in their normal state.  After
871 // marking, live objects' map pointers are marked indicating that the object
872 // has been found reachable.
873 //
874 // The marking algorithm is a (mostly) depth-first (because of possible stack
875 // overflow) traversal of the graph of objects reachable from the roots.  It
876 // uses an explicit stack of pointers rather than recursion.  The young
877 // generation's inactive ('from') space is used as a marking stack.  The
878 // objects in the marking stack are the ones that have been reached and marked
879 // but their children have not yet been visited.
880 //
881 // The marking stack can overflow during traversal.  In that case, we set an
882 // overflow flag.  When the overflow flag is set, we continue marking objects
883 // reachable from the objects on the marking stack, but no longer push them on
884 // the marking stack.  Instead, we mark them as both marked and overflowed.
885 // When the stack is in the overflowed state, objects marked as overflowed
886 // have been reached and marked but their children have not been visited yet.
887 // After emptying the marking stack, we clear the overflow flag and traverse
888 // the heap looking for objects marked as overflowed, push them on the stack,
889 // and continue with marking.  This process repeats until all reachable
890 // objects have been marked.
891 
ProcessJSFunctionCandidates()892 void CodeFlusher::ProcessJSFunctionCandidates() {
893   Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
894   Code* interpreter_entry_trampoline =
895       isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
896   Object* undefined = isolate_->heap()->undefined_value();
897 
898   JSFunction* candidate = jsfunction_candidates_head_;
899   JSFunction* next_candidate;
900   while (candidate != NULL) {
901     next_candidate = GetNextCandidate(candidate);
902     ClearNextCandidate(candidate, undefined);
903 
904     SharedFunctionInfo* shared = candidate->shared();
905 
906     Code* code = shared->code();
907     if (ObjectMarking::IsWhite(code)) {
908       if (FLAG_trace_code_flushing && shared->is_compiled()) {
909         PrintF("[code-flushing clears: ");
910         shared->ShortPrint();
911         PrintF(" - age: %d]\n", code->GetAge());
912       }
913       // Always flush the optimized code map if there is one.
914       if (!shared->OptimizedCodeMapIsCleared()) {
915         shared->ClearOptimizedCodeMap();
916       }
917       if (shared->HasBytecodeArray()) {
918         shared->set_code(interpreter_entry_trampoline);
919         candidate->set_code(interpreter_entry_trampoline);
920       } else {
921         shared->set_code(lazy_compile);
922         candidate->set_code(lazy_compile);
923       }
924     } else {
925       DCHECK(ObjectMarking::IsBlack(code));
926       candidate->set_code(code);
927     }
928 
929     // We are in the middle of a GC cycle so the write barrier in the code
930     // setter did not record the slot update and we have to do that manually.
931     Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
932     Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
933     isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
934         candidate, slot, target);
935 
936     Object** shared_code_slot =
937         HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
938     isolate_->heap()->mark_compact_collector()->RecordSlot(
939         shared, shared_code_slot, *shared_code_slot);
940 
941     candidate = next_candidate;
942   }
943 
944   jsfunction_candidates_head_ = NULL;
945 }
946 
947 
ProcessSharedFunctionInfoCandidates()948 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
949   Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
950   Code* interpreter_entry_trampoline =
951       isolate_->builtins()->builtin(Builtins::kInterpreterEntryTrampoline);
952   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
953   SharedFunctionInfo* next_candidate;
954   while (candidate != NULL) {
955     next_candidate = GetNextCandidate(candidate);
956     ClearNextCandidate(candidate);
957 
958     Code* code = candidate->code();
959     if (ObjectMarking::IsWhite(code)) {
960       if (FLAG_trace_code_flushing && candidate->is_compiled()) {
961         PrintF("[code-flushing clears: ");
962         candidate->ShortPrint();
963         PrintF(" - age: %d]\n", code->GetAge());
964       }
965       // Always flush the optimized code map if there is one.
966       if (!candidate->OptimizedCodeMapIsCleared()) {
967         candidate->ClearOptimizedCodeMap();
968       }
969       if (candidate->HasBytecodeArray()) {
970         candidate->set_code(interpreter_entry_trampoline);
971       } else {
972         candidate->set_code(lazy_compile);
973       }
974     }
975 
976     Object** code_slot =
977         HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
978     isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
979                                                            *code_slot);
980 
981     candidate = next_candidate;
982   }
983 
984   shared_function_info_candidates_head_ = NULL;
985 }
986 
987 
EvictCandidate(SharedFunctionInfo * shared_info)988 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
989   // Make sure previous flushing decisions are revisited.
990   isolate_->heap()->incremental_marking()->IterateBlackObject(shared_info);
991 
992   if (FLAG_trace_code_flushing) {
993     PrintF("[code-flushing abandons function-info: ");
994     shared_info->ShortPrint();
995     PrintF("]\n");
996   }
997 
998   SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
999   SharedFunctionInfo* next_candidate;
1000   if (candidate == shared_info) {
1001     next_candidate = GetNextCandidate(shared_info);
1002     shared_function_info_candidates_head_ = next_candidate;
1003     ClearNextCandidate(shared_info);
1004   } else {
1005     while (candidate != NULL) {
1006       next_candidate = GetNextCandidate(candidate);
1007 
1008       if (next_candidate == shared_info) {
1009         next_candidate = GetNextCandidate(shared_info);
1010         SetNextCandidate(candidate, next_candidate);
1011         ClearNextCandidate(shared_info);
1012         break;
1013       }
1014 
1015       candidate = next_candidate;
1016     }
1017   }
1018 }
1019 
1020 
EvictCandidate(JSFunction * function)1021 void CodeFlusher::EvictCandidate(JSFunction* function) {
1022   DCHECK(!function->next_function_link()->IsUndefined(isolate_));
1023   Object* undefined = isolate_->heap()->undefined_value();
1024 
1025   // Make sure previous flushing decisions are revisited.
1026   isolate_->heap()->incremental_marking()->IterateBlackObject(function);
1027   isolate_->heap()->incremental_marking()->IterateBlackObject(
1028       function->shared());
1029 
1030   if (FLAG_trace_code_flushing) {
1031     PrintF("[code-flushing abandons closure: ");
1032     function->shared()->ShortPrint();
1033     PrintF("]\n");
1034   }
1035 
1036   JSFunction* candidate = jsfunction_candidates_head_;
1037   JSFunction* next_candidate;
1038   if (candidate == function) {
1039     next_candidate = GetNextCandidate(function);
1040     jsfunction_candidates_head_ = next_candidate;
1041     ClearNextCandidate(function, undefined);
1042   } else {
1043     while (candidate != NULL) {
1044       next_candidate = GetNextCandidate(candidate);
1045 
1046       if (next_candidate == function) {
1047         next_candidate = GetNextCandidate(function);
1048         SetNextCandidate(candidate, next_candidate);
1049         ClearNextCandidate(function, undefined);
1050         break;
1051       }
1052 
1053       candidate = next_candidate;
1054     }
1055   }
1056 }
1057 
1058 
IteratePointersToFromSpace(ObjectVisitor * v)1059 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1060   Heap* heap = isolate_->heap();
1061 
1062   JSFunction** slot = &jsfunction_candidates_head_;
1063   JSFunction* candidate = jsfunction_candidates_head_;
1064   while (candidate != NULL) {
1065     if (heap->InFromSpace(candidate)) {
1066       v->VisitPointer(reinterpret_cast<Object**>(slot));
1067     }
1068     candidate = GetNextCandidate(*slot);
1069     slot = GetNextCandidateSlot(*slot);
1070   }
1071 }
1072 
1073 class StaticYoungGenerationMarkingVisitor
1074     : public StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor> {
1075  public:
Initialize(Heap * heap)1076   static void Initialize(Heap* heap) {
1077     StaticNewSpaceVisitor<StaticYoungGenerationMarkingVisitor>::Initialize();
1078   }
1079 
VisitPointer(Heap * heap,HeapObject * object,Object ** p)1080   inline static void VisitPointer(Heap* heap, HeapObject* object, Object** p) {
1081     Object* target = *p;
1082     if (heap->InNewSpace(target)) {
1083       if (MarkRecursively(heap, HeapObject::cast(target))) return;
1084       heap->mark_compact_collector()->MarkObject(HeapObject::cast(target));
1085     }
1086   }
1087 
1088  protected:
MarkRecursively(Heap * heap,HeapObject * object)1089   inline static bool MarkRecursively(Heap* heap, HeapObject* object) {
1090     StackLimitCheck check(heap->isolate());
1091     if (check.HasOverflowed()) return false;
1092 
1093     if (ObjectMarking::IsBlackOrGrey(object)) return true;
1094     ObjectMarking::WhiteToBlack(object);
1095     IterateBody(object->map(), object);
1096     return true;
1097   }
1098 };
1099 
1100 class MarkCompactMarkingVisitor
1101     : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1102  public:
1103   static void Initialize();
1104 
INLINE(static void VisitPointer (Heap * heap,HeapObject * object,Object ** p))1105   INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
1106     MarkObjectByPointer(heap->mark_compact_collector(), object, p);
1107   }
1108 
INLINE(static void VisitPointers (Heap * heap,HeapObject * object,Object ** start,Object ** end))1109   INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
1110                                    Object** start, Object** end)) {
1111     // Mark all objects pointed to in [start, end).
1112     const int kMinRangeForMarkingRecursion = 64;
1113     if (end - start >= kMinRangeForMarkingRecursion) {
1114       if (VisitUnmarkedObjects(heap, object, start, end)) return;
1115       // We are close to a stack overflow, so just mark the objects.
1116     }
1117     MarkCompactCollector* collector = heap->mark_compact_collector();
1118     for (Object** p = start; p < end; p++) {
1119       MarkObjectByPointer(collector, object, p);
1120     }
1121   }
1122 
1123   // Marks the object black and pushes it on the marking stack.
INLINE(static void MarkObject (Heap * heap,HeapObject * object))1124   INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1125     heap->mark_compact_collector()->MarkObject(object);
1126   }
1127 
1128   // Marks the object black without pushing it on the marking stack.
1129   // Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush (Heap * heap,HeapObject * object))1130   INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1131     if (ObjectMarking::IsWhite(object)) {
1132       ObjectMarking::WhiteToBlack(object);
1133       return true;
1134     }
1135     return false;
1136   }
1137 
1138   // Mark object pointed to by p.
INLINE(static void MarkObjectByPointer (MarkCompactCollector * collector,HeapObject * object,Object ** p))1139   INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1140                                          HeapObject* object, Object** p)) {
1141     if (!(*p)->IsHeapObject()) return;
1142     HeapObject* target_object = HeapObject::cast(*p);
1143     collector->RecordSlot(object, p, target_object);
1144     collector->MarkObject(target_object);
1145   }
1146 
1147 
1148   // Visit an unmarked object.
INLINE(static void VisitUnmarkedObject (MarkCompactCollector * collector,HeapObject * obj))1149   INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1150                                          HeapObject* obj)) {
1151 #ifdef DEBUG
1152     DCHECK(collector->heap()->Contains(obj));
1153     DCHECK(ObjectMarking::IsWhite(obj));
1154 #endif
1155     Map* map = obj->map();
1156     Heap* heap = obj->GetHeap();
1157     ObjectMarking::WhiteToBlack(obj);
1158     // Mark the map pointer and the body.
1159     heap->mark_compact_collector()->MarkObject(map);
1160     IterateBody(map, obj);
1161   }
1162 
1163   // Visit all unmarked objects pointed to by [start, end).
1164   // Returns false if the operation fails (lack of stack space).
INLINE(static bool VisitUnmarkedObjects (Heap * heap,HeapObject * object,Object ** start,Object ** end))1165   INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
1166                                           Object** start, Object** end)) {
1167     // Return false is we are close to the stack limit.
1168     StackLimitCheck check(heap->isolate());
1169     if (check.HasOverflowed()) return false;
1170 
1171     MarkCompactCollector* collector = heap->mark_compact_collector();
1172     // Visit the unmarked objects.
1173     for (Object** p = start; p < end; p++) {
1174       Object* o = *p;
1175       if (!o->IsHeapObject()) continue;
1176       collector->RecordSlot(object, p, o);
1177       HeapObject* obj = HeapObject::cast(o);
1178       if (ObjectMarking::IsBlackOrGrey(obj)) continue;
1179       VisitUnmarkedObject(collector, obj);
1180     }
1181     return true;
1182   }
1183 
1184  private:
1185   // Code flushing support.
1186 
1187   static const int kRegExpCodeThreshold = 5;
1188 
UpdateRegExpCodeAgeAndFlush(Heap * heap,JSRegExp * re,bool is_one_byte)1189   static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1190                                           bool is_one_byte) {
1191     // Make sure that the fixed array is in fact initialized on the RegExp.
1192     // We could potentially trigger a GC when initializing the RegExp.
1193     if (HeapObject::cast(re->data())->map()->instance_type() !=
1194         FIXED_ARRAY_TYPE)
1195       return;
1196 
1197     // Make sure this is a RegExp that actually contains code.
1198     if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1199 
1200     Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1201     if (!code->IsSmi() &&
1202         HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1203       // Save a copy that can be reinstated if we need the code again.
1204       re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1205 
1206       // Saving a copy might create a pointer into compaction candidate
1207       // that was not observed by marker.  This might happen if JSRegExp data
1208       // was marked through the compilation cache before marker reached JSRegExp
1209       // object.
1210       FixedArray* data = FixedArray::cast(re->data());
1211       if (ObjectMarking::IsBlackOrGrey(data)) {
1212         Object** slot =
1213             data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1214         heap->mark_compact_collector()->RecordSlot(data, slot, code);
1215       }
1216 
1217       // Set a number in the 0-255 range to guarantee no smi overflow.
1218       re->SetDataAt(JSRegExp::code_index(is_one_byte),
1219                     Smi::FromInt(heap->ms_count() & 0xff));
1220     } else if (code->IsSmi()) {
1221       int value = Smi::cast(code)->value();
1222       // The regexp has not been compiled yet or there was a compilation error.
1223       if (value == JSRegExp::kUninitializedValue ||
1224           value == JSRegExp::kCompilationErrorValue) {
1225         return;
1226       }
1227 
1228       // Check if we should flush now.
1229       if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
1230         re->SetDataAt(JSRegExp::code_index(is_one_byte),
1231                       Smi::FromInt(JSRegExp::kUninitializedValue));
1232         re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1233                       Smi::FromInt(JSRegExp::kUninitializedValue));
1234       }
1235     }
1236   }
1237 
1238 
1239   // Works by setting the current sweep_generation (as a smi) in the
1240   // code object place in the data array of the RegExp and keeps a copy
1241   // around that can be reinstated if we reuse the RegExp before flushing.
1242   // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1243   // we flush the code.
VisitRegExpAndFlushCode(Map * map,HeapObject * object)1244   static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1245     Heap* heap = map->GetHeap();
1246     MarkCompactCollector* collector = heap->mark_compact_collector();
1247     if (!collector->is_code_flushing_enabled()) {
1248       JSObjectVisitor::Visit(map, object);
1249       return;
1250     }
1251     JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1252     // Flush code or set age on both one byte and two byte code.
1253     UpdateRegExpCodeAgeAndFlush(heap, re, true);
1254     UpdateRegExpCodeAgeAndFlush(heap, re, false);
1255     // Visit the fields of the RegExp, including the updated FixedArray.
1256     JSObjectVisitor::Visit(map, object);
1257   }
1258 };
1259 
1260 
Initialize()1261 void MarkCompactMarkingVisitor::Initialize() {
1262   StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1263 
1264   table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1265 }
1266 
1267 
1268 class CodeMarkingVisitor : public ThreadVisitor {
1269  public:
CodeMarkingVisitor(MarkCompactCollector * collector)1270   explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1271       : collector_(collector) {}
1272 
VisitThread(Isolate * isolate,ThreadLocalTop * top)1273   void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1274     collector_->PrepareThreadForCodeFlushing(isolate, top);
1275   }
1276 
1277  private:
1278   MarkCompactCollector* collector_;
1279 };
1280 
1281 
1282 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1283  public:
SharedFunctionInfoMarkingVisitor(MarkCompactCollector * collector)1284   explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1285       : collector_(collector) {}
1286 
VisitPointers(Object ** start,Object ** end)1287   void VisitPointers(Object** start, Object** end) override {
1288     for (Object** p = start; p < end; p++) VisitPointer(p);
1289   }
1290 
VisitPointer(Object ** slot)1291   void VisitPointer(Object** slot) override {
1292     Object* obj = *slot;
1293     if (obj->IsSharedFunctionInfo()) {
1294       SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1295       collector_->MarkObject(shared->code());
1296       collector_->MarkObject(shared);
1297     }
1298   }
1299 
1300  private:
1301   MarkCompactCollector* collector_;
1302 };
1303 
1304 
PrepareThreadForCodeFlushing(Isolate * isolate,ThreadLocalTop * top)1305 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1306                                                         ThreadLocalTop* top) {
1307   for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1308     // Note: for the frame that has a pending lazy deoptimization
1309     // StackFrame::unchecked_code will return a non-optimized code object for
1310     // the outermost function and StackFrame::LookupCode will return
1311     // actual optimized code object.
1312     StackFrame* frame = it.frame();
1313     Code* code = frame->unchecked_code();
1314     MarkObject(code);
1315     if (frame->is_optimized()) {
1316       Code* optimized_code = frame->LookupCode();
1317       MarkObject(optimized_code);
1318     }
1319   }
1320 }
1321 
1322 
PrepareForCodeFlushing()1323 void MarkCompactCollector::PrepareForCodeFlushing() {
1324   // If code flushing is disabled, there is no need to prepare for it.
1325   if (!is_code_flushing_enabled()) return;
1326 
1327   // Make sure we are not referencing the code from the stack.
1328   DCHECK(this == heap()->mark_compact_collector());
1329   PrepareThreadForCodeFlushing(heap()->isolate(),
1330                                heap()->isolate()->thread_local_top());
1331 
1332   // Iterate the archived stacks in all threads to check if
1333   // the code is referenced.
1334   CodeMarkingVisitor code_marking_visitor(this);
1335   heap()->isolate()->thread_manager()->IterateArchivedThreads(
1336       &code_marking_visitor);
1337 
1338   SharedFunctionInfoMarkingVisitor visitor(this);
1339   heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1340   heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1341 
1342   ProcessMarkingDeque<MarkCompactMode::FULL>();
1343 }
1344 
1345 
1346 // Visitor class for marking heap roots.
1347 template <MarkCompactMode mode>
1348 class RootMarkingVisitor : public ObjectVisitor {
1349  public:
RootMarkingVisitor(Heap * heap)1350   explicit RootMarkingVisitor(Heap* heap)
1351       : collector_(heap->mark_compact_collector()) {}
1352 
VisitPointer(Object ** p)1353   void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
1354 
VisitPointers(Object ** start,Object ** end)1355   void VisitPointers(Object** start, Object** end) override {
1356     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1357   }
1358 
1359   // Skip the weak next code link in a code object, which is visited in
1360   // ProcessTopOptimizedFrame.
VisitNextCodeLink(Object ** p)1361   void VisitNextCodeLink(Object** p) override {}
1362 
1363  private:
MarkObjectByPointer(Object ** p)1364   void MarkObjectByPointer(Object** p) {
1365     if (!(*p)->IsHeapObject()) return;
1366 
1367     HeapObject* object = HeapObject::cast(*p);
1368 
1369     if (mode == MarkCompactMode::YOUNG_GENERATION &&
1370         !collector_->heap()->InNewSpace(object))
1371       return;
1372 
1373     if (ObjectMarking::IsBlackOrGrey(object)) return;
1374 
1375     Map* map = object->map();
1376     // Mark the object.
1377     ObjectMarking::WhiteToBlack(object);
1378 
1379     switch (mode) {
1380       case MarkCompactMode::FULL: {
1381         // Mark the map pointer and body, and push them on the marking stack.
1382         collector_->MarkObject(map);
1383         MarkCompactMarkingVisitor::IterateBody(map, object);
1384       } break;
1385       case MarkCompactMode::YOUNG_GENERATION:
1386         StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
1387         break;
1388     }
1389 
1390     // Mark all the objects reachable from the map and body.  May leave
1391     // overflowed objects in the heap.
1392     collector_->EmptyMarkingDeque<mode>();
1393   }
1394 
1395   MarkCompactCollector* collector_;
1396 };
1397 
1398 
1399 // Helper class for pruning the string table.
1400 template <bool finalize_external_strings, bool record_slots>
1401 class StringTableCleaner : public ObjectVisitor {
1402  public:
StringTableCleaner(Heap * heap,HeapObject * table)1403   StringTableCleaner(Heap* heap, HeapObject* table)
1404       : heap_(heap), pointers_removed_(0), table_(table) {
1405     DCHECK(!record_slots || table != nullptr);
1406   }
1407 
VisitPointers(Object ** start,Object ** end)1408   void VisitPointers(Object** start, Object** end) override {
1409     // Visit all HeapObject pointers in [start, end).
1410     MarkCompactCollector* collector = heap_->mark_compact_collector();
1411     for (Object** p = start; p < end; p++) {
1412       Object* o = *p;
1413       if (o->IsHeapObject()) {
1414         if (ObjectMarking::IsWhite(HeapObject::cast(o))) {
1415           if (finalize_external_strings) {
1416             if (o->IsExternalString()) {
1417               heap_->FinalizeExternalString(String::cast(*p));
1418             } else {
1419               // The original external string may have been internalized.
1420               DCHECK(o->IsThinString());
1421             }
1422           } else {
1423             pointers_removed_++;
1424           }
1425           // Set the entry to the_hole_value (as deleted).
1426           *p = heap_->the_hole_value();
1427         } else if (record_slots) {
1428           // StringTable contains only old space strings.
1429           DCHECK(!heap_->InNewSpace(o));
1430           collector->RecordSlot(table_, p, o);
1431         }
1432       }
1433     }
1434   }
1435 
PointersRemoved()1436   int PointersRemoved() {
1437     DCHECK(!finalize_external_strings);
1438     return pointers_removed_;
1439   }
1440 
1441  private:
1442   Heap* heap_;
1443   int pointers_removed_;
1444   HeapObject* table_;
1445 };
1446 
1447 typedef StringTableCleaner<false, true> InternalizedStringTableCleaner;
1448 typedef StringTableCleaner<true, false> ExternalStringTableCleaner;
1449 
1450 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1451 // are retained.
1452 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1453  public:
RetainAs(Object * object)1454   virtual Object* RetainAs(Object* object) {
1455     DCHECK(!ObjectMarking::IsGrey(HeapObject::cast(object)));
1456     if (ObjectMarking::IsBlack(HeapObject::cast(object))) {
1457       return object;
1458     } else if (object->IsAllocationSite() &&
1459                !(AllocationSite::cast(object)->IsZombie())) {
1460       // "dead" AllocationSites need to live long enough for a traversal of new
1461       // space. These sites get a one-time reprieve.
1462       AllocationSite* site = AllocationSite::cast(object);
1463       site->MarkZombie();
1464       ObjectMarking::WhiteToBlack(site);
1465       return object;
1466     } else {
1467       return NULL;
1468     }
1469   }
1470 };
1471 
1472 
1473 // Fill the marking stack with overflowed objects returned by the given
1474 // iterator.  Stop when the marking stack is filled or the end of the space
1475 // is reached, whichever comes first.
1476 template <class T>
DiscoverGreyObjectsWithIterator(T * it)1477 void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
1478   // The caller should ensure that the marking stack is initially not full,
1479   // so that we don't waste effort pointlessly scanning for objects.
1480   DCHECK(!marking_deque()->IsFull());
1481 
1482   Map* filler_map = heap()->one_pointer_filler_map();
1483   for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1484     if ((object->map() != filler_map) && ObjectMarking::IsGrey(object)) {
1485       ObjectMarking::GreyToBlack(object);
1486       PushBlack(object);
1487       if (marking_deque()->IsFull()) return;
1488     }
1489   }
1490 }
1491 
DiscoverGreyObjectsOnPage(MemoryChunk * p)1492 void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1493   DCHECK(!marking_deque()->IsFull());
1494   LiveObjectIterator<kGreyObjects> it(p);
1495   HeapObject* object = NULL;
1496   while ((object = it.Next()) != NULL) {
1497     DCHECK(ObjectMarking::IsGrey(object));
1498     ObjectMarking::GreyToBlack(object);
1499     PushBlack(object);
1500     if (marking_deque()->IsFull()) return;
1501   }
1502 }
1503 
1504 class RecordMigratedSlotVisitor final : public ObjectVisitor {
1505  public:
RecordMigratedSlotVisitor(MarkCompactCollector * collector)1506   explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
1507       : collector_(collector) {}
1508 
VisitPointer(Object ** p)1509   inline void VisitPointer(Object** p) final {
1510     RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
1511   }
1512 
VisitPointers(Object ** start,Object ** end)1513   inline void VisitPointers(Object** start, Object** end) final {
1514     while (start < end) {
1515       RecordMigratedSlot(*start, reinterpret_cast<Address>(start));
1516       ++start;
1517     }
1518   }
1519 
VisitCodeEntry(Address code_entry_slot)1520   inline void VisitCodeEntry(Address code_entry_slot) final {
1521     Address code_entry = Memory::Address_at(code_entry_slot);
1522     if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
1523       RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
1524                                              nullptr, CODE_ENTRY_SLOT,
1525                                              code_entry_slot);
1526     }
1527   }
1528 
VisitCodeTarget(RelocInfo * rinfo)1529   inline void VisitCodeTarget(RelocInfo* rinfo) final {
1530     DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
1531     Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1532     Code* host = rinfo->host();
1533     // The target is always in old space, we don't have to record the slot in
1534     // the old-to-new remembered set.
1535     DCHECK(!collector_->heap()->InNewSpace(target));
1536     collector_->RecordRelocSlot(host, rinfo, target);
1537   }
1538 
VisitDebugTarget(RelocInfo * rinfo)1539   inline void VisitDebugTarget(RelocInfo* rinfo) final {
1540     DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1541            rinfo->IsPatchedDebugBreakSlotSequence());
1542     Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
1543     Code* host = rinfo->host();
1544     // The target is always in old space, we don't have to record the slot in
1545     // the old-to-new remembered set.
1546     DCHECK(!collector_->heap()->InNewSpace(target));
1547     collector_->RecordRelocSlot(host, rinfo, target);
1548   }
1549 
VisitEmbeddedPointer(RelocInfo * rinfo)1550   inline void VisitEmbeddedPointer(RelocInfo* rinfo) final {
1551     DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1552     HeapObject* object = HeapObject::cast(rinfo->target_object());
1553     Code* host = rinfo->host();
1554     collector_->heap()->RecordWriteIntoCode(host, rinfo, object);
1555     collector_->RecordRelocSlot(host, rinfo, object);
1556   }
1557 
VisitCell(RelocInfo * rinfo)1558   inline void VisitCell(RelocInfo* rinfo) final {
1559     DCHECK(rinfo->rmode() == RelocInfo::CELL);
1560     Cell* cell = rinfo->target_cell();
1561     Code* host = rinfo->host();
1562     // The cell is always in old space, we don't have to record the slot in
1563     // the old-to-new remembered set.
1564     DCHECK(!collector_->heap()->InNewSpace(cell));
1565     collector_->RecordRelocSlot(host, rinfo, cell);
1566   }
1567 
1568   // Entries that will never move.
VisitCodeAgeSequence(RelocInfo * rinfo)1569   inline void VisitCodeAgeSequence(RelocInfo* rinfo) final {
1570     DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
1571     Code* stub = rinfo->code_age_stub();
1572     USE(stub);
1573     DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
1574   }
1575 
1576   // Entries that are skipped for recording.
VisitExternalReference(RelocInfo * rinfo)1577   inline void VisitExternalReference(RelocInfo* rinfo) final {}
VisitExternalReference(Address * p)1578   inline void VisitExternalReference(Address* p) final {}
VisitRuntimeEntry(RelocInfo * rinfo)1579   inline void VisitRuntimeEntry(RelocInfo* rinfo) final {}
VisitExternalOneByteString(v8::String::ExternalOneByteStringResource ** resource)1580   inline void VisitExternalOneByteString(
1581       v8::String::ExternalOneByteStringResource** resource) final {}
VisitExternalTwoByteString(v8::String::ExternalStringResource ** resource)1582   inline void VisitExternalTwoByteString(
1583       v8::String::ExternalStringResource** resource) final {}
VisitInternalReference(RelocInfo * rinfo)1584   inline void VisitInternalReference(RelocInfo* rinfo) final {}
VisitEmbedderReference(Object ** p,uint16_t class_id)1585   inline void VisitEmbedderReference(Object** p, uint16_t class_id) final {}
1586 
1587  private:
RecordMigratedSlot(Object * value,Address slot)1588   inline void RecordMigratedSlot(Object* value, Address slot) {
1589     if (value->IsHeapObject()) {
1590       Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
1591       if (p->InNewSpace()) {
1592         RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
1593       } else if (p->IsEvacuationCandidate()) {
1594         RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
1595       }
1596     }
1597   }
1598 
1599   MarkCompactCollector* collector_;
1600 };
1601 
1602 class MarkCompactCollector::HeapObjectVisitor {
1603  public:
~HeapObjectVisitor()1604   virtual ~HeapObjectVisitor() {}
1605   virtual bool Visit(HeapObject* object) = 0;
1606 };
1607 
1608 class MarkCompactCollector::EvacuateVisitorBase
1609     : public MarkCompactCollector::HeapObjectVisitor {
1610  protected:
1611   enum MigrationMode { kFast, kProfiled };
1612 
EvacuateVisitorBase(Heap * heap,CompactionSpaceCollection * compaction_spaces)1613   EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces)
1614       : heap_(heap),
1615         compaction_spaces_(compaction_spaces),
1616         profiling_(
1617             heap->isolate()->is_profiling() ||
1618             heap->isolate()->logger()->is_logging_code_events() ||
1619             heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
1620 
TryEvacuateObject(PagedSpace * target_space,HeapObject * object,HeapObject ** target_object)1621   inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1622                                 HeapObject** target_object) {
1623 #ifdef VERIFY_HEAP
1624     if (AbortCompactionForTesting(object)) return false;
1625 #endif  // VERIFY_HEAP
1626     int size = object->Size();
1627     AllocationAlignment alignment = object->RequiredAlignment();
1628     AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1629     if (allocation.To(target_object)) {
1630       MigrateObject(*target_object, object, size, target_space->identity());
1631       return true;
1632     }
1633     return false;
1634   }
1635 
MigrateObject(HeapObject * dst,HeapObject * src,int size,AllocationSpace dest)1636   inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1637                             AllocationSpace dest) {
1638     if (profiling_) {
1639       MigrateObject<kProfiled>(dst, src, size, dest);
1640     } else {
1641       MigrateObject<kFast>(dst, src, size, dest);
1642     }
1643   }
1644 
1645   template <MigrationMode mode>
MigrateObject(HeapObject * dst,HeapObject * src,int size,AllocationSpace dest)1646   inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1647                             AllocationSpace dest) {
1648     Address dst_addr = dst->address();
1649     Address src_addr = src->address();
1650     DCHECK(heap_->AllowedToBeMigrated(src, dest));
1651     DCHECK(dest != LO_SPACE);
1652     if (dest == OLD_SPACE) {
1653       DCHECK_OBJECT_SIZE(size);
1654       DCHECK(IsAligned(size, kPointerSize));
1655       heap_->CopyBlock(dst_addr, src_addr, size);
1656       if ((mode == kProfiled) && dst->IsBytecodeArray()) {
1657         PROFILE(heap_->isolate(),
1658                 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1659       }
1660       RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1661       dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
1662     } else if (dest == CODE_SPACE) {
1663       DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
1664       if (mode == kProfiled) {
1665         PROFILE(heap_->isolate(),
1666                 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1667       }
1668       heap_->CopyBlock(dst_addr, src_addr, size);
1669       Code::cast(dst)->Relocate(dst_addr - src_addr);
1670       RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1671       dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
1672     } else {
1673       DCHECK_OBJECT_SIZE(size);
1674       DCHECK(dest == NEW_SPACE);
1675       heap_->CopyBlock(dst_addr, src_addr, size);
1676     }
1677     if (mode == kProfiled) {
1678       heap_->OnMoveEvent(dst, src, size);
1679     }
1680     Memory::Address_at(src_addr) = dst_addr;
1681   }
1682 
1683 #ifdef VERIFY_HEAP
AbortCompactionForTesting(HeapObject * object)1684   bool AbortCompactionForTesting(HeapObject* object) {
1685     if (FLAG_stress_compaction) {
1686       const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1687                              Page::kPageAlignmentMask & ~kPointerAlignmentMask;
1688       if ((reinterpret_cast<uintptr_t>(object->address()) &
1689            Page::kPageAlignmentMask) == mask) {
1690         Page* page = Page::FromAddress(object->address());
1691         if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1692           page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1693         } else {
1694           page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1695           return true;
1696         }
1697       }
1698     }
1699     return false;
1700   }
1701 #endif  // VERIFY_HEAP
1702 
1703   Heap* heap_;
1704   CompactionSpaceCollection* compaction_spaces_;
1705   bool profiling_;
1706 };
1707 
1708 class MarkCompactCollector::EvacuateNewSpaceVisitor final
1709     : public MarkCompactCollector::EvacuateVisitorBase {
1710  public:
1711   static const intptr_t kLabSize = 4 * KB;
1712   static const intptr_t kMaxLabObjectSize = 256;
1713 
EvacuateNewSpaceVisitor(Heap * heap,CompactionSpaceCollection * compaction_spaces,base::HashMap * local_pretenuring_feedback)1714   explicit EvacuateNewSpaceVisitor(Heap* heap,
1715                                    CompactionSpaceCollection* compaction_spaces,
1716                                    base::HashMap* local_pretenuring_feedback)
1717       : EvacuateVisitorBase(heap, compaction_spaces),
1718         buffer_(LocalAllocationBuffer::InvalidBuffer()),
1719         space_to_allocate_(NEW_SPACE),
1720         promoted_size_(0),
1721         semispace_copied_size_(0),
1722         local_pretenuring_feedback_(local_pretenuring_feedback) {}
1723 
Visit(HeapObject * object)1724   inline bool Visit(HeapObject* object) override {
1725     heap_->UpdateAllocationSite<Heap::kCached>(object,
1726                                                local_pretenuring_feedback_);
1727     int size = object->Size();
1728     HeapObject* target_object = nullptr;
1729     if (heap_->ShouldBePromoted(object->address(), size) &&
1730         TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1731                           &target_object)) {
1732       promoted_size_ += size;
1733       return true;
1734     }
1735     HeapObject* target = nullptr;
1736     AllocationSpace space = AllocateTargetObject(object, &target);
1737     MigrateObject(HeapObject::cast(target), object, size, space);
1738     semispace_copied_size_ += size;
1739     return true;
1740   }
1741 
promoted_size()1742   intptr_t promoted_size() { return promoted_size_; }
semispace_copied_size()1743   intptr_t semispace_copied_size() { return semispace_copied_size_; }
1744 
1745  private:
1746   enum NewSpaceAllocationMode {
1747     kNonstickyBailoutOldSpace,
1748     kStickyBailoutOldSpace,
1749   };
1750 
AllocateTargetObject(HeapObject * old_object,HeapObject ** target_object)1751   inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
1752                                               HeapObject** target_object) {
1753     const int size = old_object->Size();
1754     AllocationAlignment alignment = old_object->RequiredAlignment();
1755     AllocationResult allocation;
1756     AllocationSpace space_allocated_in = space_to_allocate_;
1757     if (space_to_allocate_ == NEW_SPACE) {
1758       if (size > kMaxLabObjectSize) {
1759         allocation =
1760             AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
1761       } else {
1762         allocation = AllocateInLab(size, alignment);
1763       }
1764     }
1765     if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
1766       allocation = AllocateInOldSpace(size, alignment);
1767       space_allocated_in = OLD_SPACE;
1768     }
1769     bool ok = allocation.To(target_object);
1770     DCHECK(ok);
1771     USE(ok);
1772     return space_allocated_in;
1773   }
1774 
NewLocalAllocationBuffer()1775   inline bool NewLocalAllocationBuffer() {
1776     AllocationResult result =
1777         AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
1778     LocalAllocationBuffer saved_old_buffer = buffer_;
1779     buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
1780     if (buffer_.IsValid()) {
1781       buffer_.TryMerge(&saved_old_buffer);
1782       return true;
1783     }
1784     return false;
1785   }
1786 
AllocateInNewSpace(int size_in_bytes,AllocationAlignment alignment,NewSpaceAllocationMode mode)1787   inline AllocationResult AllocateInNewSpace(int size_in_bytes,
1788                                              AllocationAlignment alignment,
1789                                              NewSpaceAllocationMode mode) {
1790     AllocationResult allocation =
1791         heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
1792     if (allocation.IsRetry()) {
1793       if (!heap_->new_space()->AddFreshPageSynchronized()) {
1794         if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1795       } else {
1796         allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
1797                                                                  alignment);
1798         if (allocation.IsRetry()) {
1799           if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1800         }
1801       }
1802     }
1803     return allocation;
1804   }
1805 
AllocateInOldSpace(int size_in_bytes,AllocationAlignment alignment)1806   inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1807                                              AllocationAlignment alignment) {
1808     AllocationResult allocation =
1809         compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
1810                                                         alignment);
1811     if (allocation.IsRetry()) {
1812       v8::internal::Heap::FatalProcessOutOfMemory(
1813           "MarkCompactCollector: semi-space copy, fallback in old gen", true);
1814     }
1815     return allocation;
1816   }
1817 
AllocateInLab(int size_in_bytes,AllocationAlignment alignment)1818   inline AllocationResult AllocateInLab(int size_in_bytes,
1819                                         AllocationAlignment alignment) {
1820     AllocationResult allocation;
1821     if (!buffer_.IsValid()) {
1822       if (!NewLocalAllocationBuffer()) {
1823         space_to_allocate_ = OLD_SPACE;
1824         return AllocationResult::Retry(OLD_SPACE);
1825       }
1826     }
1827     allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1828     if (allocation.IsRetry()) {
1829       if (!NewLocalAllocationBuffer()) {
1830         space_to_allocate_ = OLD_SPACE;
1831         return AllocationResult::Retry(OLD_SPACE);
1832       } else {
1833         allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1834         if (allocation.IsRetry()) {
1835           space_to_allocate_ = OLD_SPACE;
1836           return AllocationResult::Retry(OLD_SPACE);
1837         }
1838       }
1839     }
1840     return allocation;
1841   }
1842 
1843   LocalAllocationBuffer buffer_;
1844   AllocationSpace space_to_allocate_;
1845   intptr_t promoted_size_;
1846   intptr_t semispace_copied_size_;
1847   base::HashMap* local_pretenuring_feedback_;
1848 };
1849 
1850 template <PageEvacuationMode mode>
1851 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
1852     : public MarkCompactCollector::HeapObjectVisitor {
1853  public:
EvacuateNewSpacePageVisitor(Heap * heap,base::HashMap * local_pretenuring_feedback)1854   explicit EvacuateNewSpacePageVisitor(
1855       Heap* heap, base::HashMap* local_pretenuring_feedback)
1856       : heap_(heap),
1857         moved_bytes_(0),
1858         local_pretenuring_feedback_(local_pretenuring_feedback) {}
1859 
Move(Page * page)1860   static void Move(Page* page) {
1861     switch (mode) {
1862       case NEW_TO_NEW:
1863         page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1864         page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1865         break;
1866       case NEW_TO_OLD: {
1867         page->Unlink();
1868         Page* new_page = Page::ConvertNewToOld(page);
1869         new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1870         break;
1871       }
1872     }
1873   }
1874 
Visit(HeapObject * object)1875   inline bool Visit(HeapObject* object) {
1876     heap_->UpdateAllocationSite<Heap::kCached>(object,
1877                                                local_pretenuring_feedback_);
1878     if (mode == NEW_TO_OLD) {
1879       RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1880       object->IterateBodyFast(&visitor);
1881     }
1882     return true;
1883   }
1884 
moved_bytes()1885   intptr_t moved_bytes() { return moved_bytes_; }
account_moved_bytes(intptr_t bytes)1886   void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1887 
1888  private:
1889   Heap* heap_;
1890   intptr_t moved_bytes_;
1891   base::HashMap* local_pretenuring_feedback_;
1892 };
1893 
1894 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1895     : public MarkCompactCollector::EvacuateVisitorBase {
1896  public:
EvacuateOldSpaceVisitor(Heap * heap,CompactionSpaceCollection * compaction_spaces)1897   EvacuateOldSpaceVisitor(Heap* heap,
1898                           CompactionSpaceCollection* compaction_spaces)
1899       : EvacuateVisitorBase(heap, compaction_spaces) {}
1900 
Visit(HeapObject * object)1901   inline bool Visit(HeapObject* object) override {
1902     CompactionSpace* target_space = compaction_spaces_->Get(
1903         Page::FromAddress(object->address())->owner()->identity());
1904     HeapObject* target_object = nullptr;
1905     if (TryEvacuateObject(target_space, object, &target_object)) {
1906       DCHECK(object->map_word().IsForwardingAddress());
1907       return true;
1908     }
1909     return false;
1910   }
1911 };
1912 
1913 class MarkCompactCollector::EvacuateRecordOnlyVisitor final
1914     : public MarkCompactCollector::HeapObjectVisitor {
1915  public:
EvacuateRecordOnlyVisitor(Heap * heap)1916   explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
1917 
Visit(HeapObject * object)1918   inline bool Visit(HeapObject* object) {
1919     RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1920     object->IterateBody(&visitor);
1921     return true;
1922   }
1923 
1924  private:
1925   Heap* heap_;
1926 };
1927 
DiscoverGreyObjectsInSpace(PagedSpace * space)1928 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1929   for (Page* p : *space) {
1930     DiscoverGreyObjectsOnPage(p);
1931     if (marking_deque()->IsFull()) return;
1932   }
1933 }
1934 
1935 
DiscoverGreyObjectsInNewSpace()1936 void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
1937   NewSpace* space = heap()->new_space();
1938   for (Page* page : PageRange(space->bottom(), space->top())) {
1939     DiscoverGreyObjectsOnPage(page);
1940     if (marking_deque()->IsFull()) return;
1941   }
1942 }
1943 
1944 
IsUnmarkedHeapObject(Object ** p)1945 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1946   Object* o = *p;
1947   if (!o->IsHeapObject()) return false;
1948   return ObjectMarking::IsWhite(HeapObject::cast(o));
1949 }
1950 
1951 
IsUnmarkedHeapObjectWithHeap(Heap * heap,Object ** p)1952 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1953                                                         Object** p) {
1954   Object* o = *p;
1955   DCHECK(o->IsHeapObject());
1956   return ObjectMarking::IsWhite(HeapObject::cast(o));
1957 }
1958 
MarkStringTable(RootMarkingVisitor<MarkCompactMode::FULL> * visitor)1959 void MarkCompactCollector::MarkStringTable(
1960     RootMarkingVisitor<MarkCompactMode::FULL>* visitor) {
1961   StringTable* string_table = heap()->string_table();
1962   // Mark the string table itself.
1963   if (ObjectMarking::IsWhite(string_table)) {
1964     // String table could have already been marked by visiting the handles list.
1965     ObjectMarking::WhiteToBlack(string_table);
1966   }
1967   // Explicitly mark the prefix.
1968   string_table->IteratePrefix(visitor);
1969   ProcessMarkingDeque<MarkCompactMode::FULL>();
1970 }
1971 
MarkRoots(RootMarkingVisitor<MarkCompactMode::FULL> * visitor)1972 void MarkCompactCollector::MarkRoots(
1973     RootMarkingVisitor<MarkCompactMode::FULL>* visitor) {
1974   // Mark the heap roots including global variables, stack variables,
1975   // etc., and all objects reachable from them.
1976   heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
1977 
1978   // Handle the string table specially.
1979   MarkStringTable(visitor);
1980 
1981   // There may be overflowed objects in the heap.  Visit them now.
1982   while (marking_deque()->overflowed()) {
1983     RefillMarkingDeque<MarkCompactMode::FULL>();
1984     EmptyMarkingDeque<MarkCompactMode::FULL>();
1985   }
1986 }
1987 
1988 
MarkImplicitRefGroups(MarkObjectFunction mark_object)1989 void MarkCompactCollector::MarkImplicitRefGroups(
1990     MarkObjectFunction mark_object) {
1991   List<ImplicitRefGroup*>* ref_groups =
1992       isolate()->global_handles()->implicit_ref_groups();
1993 
1994   int last = 0;
1995   for (int i = 0; i < ref_groups->length(); i++) {
1996     ImplicitRefGroup* entry = ref_groups->at(i);
1997     DCHECK(entry != NULL);
1998 
1999     if (ObjectMarking::IsWhite(*entry->parent)) {
2000       (*ref_groups)[last++] = entry;
2001       continue;
2002     }
2003 
2004     Object*** children = entry->children;
2005     // A parent object is marked, so mark all child heap objects.
2006     for (size_t j = 0; j < entry->length; ++j) {
2007       if ((*children[j])->IsHeapObject()) {
2008         mark_object(heap(), HeapObject::cast(*children[j]));
2009       }
2010     }
2011 
2012     // Once the entire group has been marked, dispose it because it's
2013     // not needed anymore.
2014     delete entry;
2015   }
2016   ref_groups->Rewind(last);
2017 }
2018 
2019 
2020 // Mark all objects reachable from the objects on the marking stack.
2021 // Before: the marking stack contains zero or more heap object pointers.
2022 // After: the marking stack is empty, and all objects reachable from the
2023 // marking stack have been marked, or are overflowed in the heap.
2024 template <MarkCompactMode mode>
EmptyMarkingDeque()2025 void MarkCompactCollector::EmptyMarkingDeque() {
2026   while (!marking_deque()->IsEmpty()) {
2027     HeapObject* object = marking_deque()->Pop();
2028 
2029     DCHECK(!object->IsFiller());
2030     DCHECK(object->IsHeapObject());
2031     DCHECK(heap()->Contains(object));
2032     DCHECK(!ObjectMarking::IsWhite(object));
2033 
2034     Map* map = object->map();
2035     switch (mode) {
2036       case MarkCompactMode::FULL: {
2037         MarkObject(map);
2038         MarkCompactMarkingVisitor::IterateBody(map, object);
2039       } break;
2040       case MarkCompactMode::YOUNG_GENERATION: {
2041         DCHECK(ObjectMarking::IsBlack(object));
2042         StaticYoungGenerationMarkingVisitor::IterateBody(map, object);
2043       } break;
2044     }
2045   }
2046 }
2047 
2048 
2049 // Sweep the heap for overflowed objects, clear their overflow bits, and
2050 // push them on the marking stack.  Stop early if the marking stack fills
2051 // before sweeping completes.  If sweeping completes, there are no remaining
2052 // overflowed objects in the heap so the overflow flag on the markings stack
2053 // is cleared.
2054 template <MarkCompactMode mode>
RefillMarkingDeque()2055 void MarkCompactCollector::RefillMarkingDeque() {
2056   isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
2057   DCHECK(marking_deque()->overflowed());
2058 
2059   DiscoverGreyObjectsInNewSpace();
2060   if (marking_deque()->IsFull()) return;
2061 
2062   if (mode == MarkCompactMode::FULL) {
2063     DiscoverGreyObjectsInSpace(heap()->old_space());
2064     if (marking_deque()->IsFull()) return;
2065     DiscoverGreyObjectsInSpace(heap()->code_space());
2066     if (marking_deque()->IsFull()) return;
2067     DiscoverGreyObjectsInSpace(heap()->map_space());
2068     if (marking_deque()->IsFull()) return;
2069     LargeObjectIterator lo_it(heap()->lo_space());
2070     DiscoverGreyObjectsWithIterator(&lo_it);
2071     if (marking_deque()->IsFull()) return;
2072   }
2073 
2074   marking_deque()->ClearOverflowed();
2075 }
2076 
2077 
2078 // Mark all objects reachable (transitively) from objects on the marking
2079 // stack.  Before: the marking stack contains zero or more heap object
2080 // pointers.  After: the marking stack is empty and there are no overflowed
2081 // objects in the heap.
2082 template <MarkCompactMode mode>
ProcessMarkingDeque()2083 void MarkCompactCollector::ProcessMarkingDeque() {
2084   EmptyMarkingDeque<mode>();
2085   while (marking_deque()->overflowed()) {
2086     RefillMarkingDeque<mode>();
2087     EmptyMarkingDeque<mode>();
2088   }
2089   DCHECK(marking_deque()->IsEmpty());
2090 }
2091 
2092 // Mark all objects reachable (transitively) from objects on the marking
2093 // stack including references only considered in the atomic marking pause.
ProcessEphemeralMarking(ObjectVisitor * visitor,bool only_process_harmony_weak_collections)2094 void MarkCompactCollector::ProcessEphemeralMarking(
2095     ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
2096   DCHECK(marking_deque()->IsEmpty() && !marking_deque()->overflowed());
2097   bool work_to_do = true;
2098   while (work_to_do) {
2099     if (!only_process_harmony_weak_collections) {
2100       if (heap_->local_embedder_heap_tracer()->InUse()) {
2101         TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_TRACING);
2102         heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
2103         heap_->local_embedder_heap_tracer()->Trace(
2104             0,
2105             EmbedderHeapTracer::AdvanceTracingActions(
2106                 EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
2107       } else {
2108         TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_OBJECT_GROUPING);
2109         isolate()->global_handles()->IterateObjectGroups(
2110             visitor, &IsUnmarkedHeapObjectWithHeap);
2111         MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
2112       }
2113     } else {
2114       // TODO(mlippautz): We currently do not trace through blink when
2115       // discovering new objects reachable from weak roots (that have been made
2116       // strong). This is a limitation of not having a separate handle type
2117       // that doesn't require zapping before this phase. See crbug.com/668060.
2118       heap_->local_embedder_heap_tracer()->ClearCachedWrappersToTrace();
2119     }
2120     ProcessWeakCollections();
2121     work_to_do = !marking_deque()->IsEmpty();
2122     ProcessMarkingDeque<MarkCompactMode::FULL>();
2123   }
2124   CHECK(marking_deque()->IsEmpty());
2125   CHECK_EQ(0, heap()->local_embedder_heap_tracer()->NumberOfWrappersToTrace());
2126 }
2127 
ProcessTopOptimizedFrame(ObjectVisitor * visitor)2128 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2129   for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2130        !it.done(); it.Advance()) {
2131     if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2132       return;
2133     }
2134     if (it.frame()->type() == StackFrame::OPTIMIZED) {
2135       Code* code = it.frame()->LookupCode();
2136       if (!code->CanDeoptAt(it.frame()->pc())) {
2137         Code::BodyDescriptor::IterateBody(code, visitor);
2138       }
2139       ProcessMarkingDeque<MarkCompactMode::FULL>();
2140       return;
2141     }
2142   }
2143 }
2144 
SetUp()2145 void MarkingDeque::SetUp() {
2146   backing_store_ = new base::VirtualMemory(kMaxSize);
2147   backing_store_committed_size_ = 0;
2148   if (backing_store_ == nullptr) {
2149     V8::FatalProcessOutOfMemory("MarkingDeque::SetUp");
2150   }
2151 }
2152 
TearDown()2153 void MarkingDeque::TearDown() {
2154   delete backing_store_;
2155 }
2156 
StartUsing()2157 void MarkingDeque::StartUsing() {
2158   base::LockGuard<base::Mutex> guard(&mutex_);
2159   if (in_use_) {
2160     // This can happen in mark-compact GC if the incremental marker already
2161     // started using the marking deque.
2162     return;
2163   }
2164   in_use_ = true;
2165   EnsureCommitted();
2166   array_ = reinterpret_cast<HeapObject**>(backing_store_->address());
2167   size_t size = FLAG_force_marking_deque_overflows
2168                     ? 64 * kPointerSize
2169                     : backing_store_committed_size_;
2170   DCHECK(
2171       base::bits::IsPowerOfTwo32(static_cast<uint32_t>(size / kPointerSize)));
2172   mask_ = static_cast<int>((size / kPointerSize) - 1);
2173   top_ = bottom_ = 0;
2174   overflowed_ = false;
2175 }
2176 
StopUsing()2177 void MarkingDeque::StopUsing() {
2178   base::LockGuard<base::Mutex> guard(&mutex_);
2179   if (!in_use_) return;
2180   DCHECK(IsEmpty());
2181   DCHECK(!overflowed_);
2182   top_ = bottom_ = mask_ = 0;
2183   in_use_ = false;
2184   if (FLAG_concurrent_sweeping) {
2185     StartUncommitTask();
2186   } else {
2187     Uncommit();
2188   }
2189 }
2190 
Clear()2191 void MarkingDeque::Clear() {
2192   DCHECK(in_use_);
2193   top_ = bottom_ = 0;
2194   overflowed_ = false;
2195 }
2196 
Uncommit()2197 void MarkingDeque::Uncommit() {
2198   DCHECK(!in_use_);
2199   bool success = backing_store_->Uncommit(backing_store_->address(),
2200                                           backing_store_committed_size_);
2201   backing_store_committed_size_ = 0;
2202   CHECK(success);
2203 }
2204 
EnsureCommitted()2205 void MarkingDeque::EnsureCommitted() {
2206   DCHECK(in_use_);
2207   if (backing_store_committed_size_ > 0) return;
2208 
2209   for (size_t size = kMaxSize; size >= kMinSize; size /= 2) {
2210     if (backing_store_->Commit(backing_store_->address(), size, false)) {
2211       backing_store_committed_size_ = size;
2212       break;
2213     }
2214   }
2215   if (backing_store_committed_size_ == 0) {
2216     V8::FatalProcessOutOfMemory("MarkingDeque::EnsureCommitted");
2217   }
2218 }
2219 
StartUncommitTask()2220 void MarkingDeque::StartUncommitTask() {
2221   if (!uncommit_task_pending_) {
2222     uncommit_task_pending_ = true;
2223     UncommitTask* task = new UncommitTask(heap_->isolate(), this);
2224     V8::GetCurrentPlatform()->CallOnBackgroundThread(
2225         task, v8::Platform::kShortRunningTask);
2226   }
2227 }
2228 
2229 class MarkCompactCollector::ObjectStatsVisitor
2230     : public MarkCompactCollector::HeapObjectVisitor {
2231  public:
ObjectStatsVisitor(Heap * heap,ObjectStats * live_stats,ObjectStats * dead_stats)2232   ObjectStatsVisitor(Heap* heap, ObjectStats* live_stats,
2233                      ObjectStats* dead_stats)
2234       : live_collector_(heap, live_stats), dead_collector_(heap, dead_stats) {
2235     DCHECK_NOT_NULL(live_stats);
2236     DCHECK_NOT_NULL(dead_stats);
2237     // Global objects are roots and thus recorded as live.
2238     live_collector_.CollectGlobalStatistics();
2239   }
2240 
Visit(HeapObject * obj)2241   bool Visit(HeapObject* obj) override {
2242     if (ObjectMarking::IsBlack(obj)) {
2243       live_collector_.CollectStatistics(obj);
2244     } else {
2245       DCHECK(!ObjectMarking::IsGrey(obj));
2246       dead_collector_.CollectStatistics(obj);
2247     }
2248     return true;
2249   }
2250 
2251  private:
2252   ObjectStatsCollector live_collector_;
2253   ObjectStatsCollector dead_collector_;
2254 };
2255 
VisitAllObjects(HeapObjectVisitor * visitor)2256 void MarkCompactCollector::VisitAllObjects(HeapObjectVisitor* visitor) {
2257   SpaceIterator space_it(heap());
2258   HeapObject* obj = nullptr;
2259   while (space_it.has_next()) {
2260     std::unique_ptr<ObjectIterator> it(space_it.next()->GetObjectIterator());
2261     ObjectIterator* obj_it = it.get();
2262     while ((obj = obj_it->Next()) != nullptr) {
2263       visitor->Visit(obj);
2264     }
2265   }
2266 }
2267 
RecordObjectStats()2268 void MarkCompactCollector::RecordObjectStats() {
2269   if (V8_UNLIKELY(FLAG_gc_stats)) {
2270     heap()->CreateObjectStats();
2271     ObjectStatsVisitor visitor(heap(), heap()->live_object_stats_,
2272                                heap()->dead_object_stats_);
2273     VisitAllObjects(&visitor);
2274     if (V8_UNLIKELY(FLAG_gc_stats &
2275                     v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
2276       std::stringstream live, dead;
2277       heap()->live_object_stats_->Dump(live);
2278       heap()->dead_object_stats_->Dump(dead);
2279       TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
2280                            "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
2281                            "live", TRACE_STR_COPY(live.str().c_str()), "dead",
2282                            TRACE_STR_COPY(dead.str().c_str()));
2283     }
2284     if (FLAG_trace_gc_object_stats) {
2285       heap()->live_object_stats_->PrintJSON("live");
2286       heap()->dead_object_stats_->PrintJSON("dead");
2287     }
2288     heap()->live_object_stats_->CheckpointObjectStats();
2289     heap()->dead_object_stats_->ClearObjectStats();
2290   }
2291 }
2292 
CheckAndMarkObject(Heap * heap,Address slot_address)2293 SlotCallbackResult MarkCompactCollector::CheckAndMarkObject(
2294     Heap* heap, Address slot_address) {
2295   Object* object = *reinterpret_cast<Object**>(slot_address);
2296   if (heap->InNewSpace(object)) {
2297     // Marking happens before flipping the young generation, so the object
2298     // has to be in ToSpace.
2299     DCHECK(heap->InToSpace(object));
2300     HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
2301     if (ObjectMarking::IsBlackOrGrey(heap_object)) {
2302       return KEEP_SLOT;
2303     }
2304     ObjectMarking::WhiteToBlack(heap_object);
2305     StaticYoungGenerationMarkingVisitor::IterateBody(heap_object->map(),
2306                                                      heap_object);
2307     return KEEP_SLOT;
2308   }
2309   return REMOVE_SLOT;
2310 }
2311 
IsUnmarkedObject(Heap * heap,Object ** p)2312 static bool IsUnmarkedObject(Heap* heap, Object** p) {
2313   DCHECK_IMPLIES(heap->InNewSpace(*p), heap->InToSpace(*p));
2314   return heap->InNewSpace(*p) && !ObjectMarking::IsBlack(HeapObject::cast(*p));
2315 }
2316 
MarkLiveObjectsInYoungGeneration()2317 void MarkCompactCollector::MarkLiveObjectsInYoungGeneration() {
2318   TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
2319 
2320   PostponeInterruptsScope postpone(isolate());
2321 
2322   StaticYoungGenerationMarkingVisitor::Initialize(heap());
2323   RootMarkingVisitor<MarkCompactMode::YOUNG_GENERATION> root_visitor(heap());
2324 
2325   marking_deque()->StartUsing();
2326 
2327   isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
2328       &Heap::IsUnmodifiedHeapObject);
2329 
2330   {
2331     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
2332     heap()->IterateRoots(&root_visitor, VISIT_ALL_IN_SCAVENGE);
2333     ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
2334   }
2335 
2336   {
2337     TRACE_GC(heap()->tracer(),
2338              GCTracer::Scope::MINOR_MC_MARK_OLD_TO_NEW_POINTERS);
2339     RememberedSet<OLD_TO_NEW>::Iterate(heap(), [this](Address addr) {
2340       return CheckAndMarkObject(heap(), addr);
2341     });
2342     RememberedSet<OLD_TO_NEW>::IterateTyped(
2343         heap(), [this](SlotType type, Address host_addr, Address addr) {
2344           return UpdateTypedSlotHelper::UpdateTypedSlot(
2345               isolate(), type, addr, [this](Object** addr) {
2346                 return CheckAndMarkObject(heap(),
2347                                           reinterpret_cast<Address>(addr));
2348               });
2349         });
2350     ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
2351   }
2352 
2353   {
2354     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
2355     heap()->VisitEncounteredWeakCollections(&root_visitor);
2356     ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
2357   }
2358 
2359   if (is_code_flushing_enabled()) {
2360     TRACE_GC(heap()->tracer(),
2361              GCTracer::Scope::MINOR_MC_MARK_CODE_FLUSH_CANDIDATES);
2362     code_flusher()->IteratePointersToFromSpace(&root_visitor);
2363     ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
2364   }
2365 
2366   {
2367     TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
2368     isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
2369         &IsUnmarkedObject);
2370     isolate()
2371         ->global_handles()
2372         ->IterateNewSpaceWeakUnmodifiedRoots<GlobalHandles::VISIT_OTHERS>(
2373             &root_visitor);
2374     ProcessMarkingDeque<MarkCompactMode::YOUNG_GENERATION>();
2375   }
2376 
2377   marking_deque()->StopUsing();
2378 }
2379 
MarkLiveObjects()2380 void MarkCompactCollector::MarkLiveObjects() {
2381   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
2382   // The recursive GC marker detects when it is nearing stack overflow,
2383   // and switches to a different marking system.  JS interrupts interfere
2384   // with the C stack limit check.
2385   PostponeInterruptsScope postpone(isolate());
2386 
2387   {
2388     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
2389     IncrementalMarking* incremental_marking = heap_->incremental_marking();
2390     if (was_marked_incrementally_) {
2391       incremental_marking->Finalize();
2392     } else {
2393       CHECK(incremental_marking->IsStopped());
2394     }
2395   }
2396 
2397 #ifdef DEBUG
2398   DCHECK(state_ == PREPARE_GC);
2399   state_ = MARK_LIVE_OBJECTS;
2400 #endif
2401 
2402   marking_deque()->StartUsing();
2403 
2404   heap_->local_embedder_heap_tracer()->EnterFinalPause();
2405 
2406   {
2407     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
2408     PrepareForCodeFlushing();
2409   }
2410 
2411   RootMarkingVisitor<MarkCompactMode::FULL> root_visitor(heap());
2412 
2413   {
2414     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2415     MarkRoots(&root_visitor);
2416     ProcessTopOptimizedFrame(&root_visitor);
2417   }
2418 
2419   {
2420     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
2421 
2422     // The objects reachable from the roots are marked, yet unreachable
2423     // objects are unmarked.  Mark objects reachable due to host
2424     // application specific logic or through Harmony weak maps.
2425     {
2426       TRACE_GC(heap()->tracer(),
2427                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
2428       ProcessEphemeralMarking(&root_visitor, false);
2429     }
2430 
2431     // The objects reachable from the roots, weak maps or object groups
2432     // are marked. Objects pointed to only by weak global handles cannot be
2433     // immediately reclaimed. Instead, we have to mark them as pending and mark
2434     // objects reachable from them.
2435     //
2436     // First we identify nonlive weak handles and mark them as pending
2437     // destruction.
2438     {
2439       TRACE_GC(heap()->tracer(),
2440                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
2441       heap()->isolate()->global_handles()->IdentifyWeakHandles(
2442           &IsUnmarkedHeapObject);
2443       ProcessMarkingDeque<MarkCompactMode::FULL>();
2444     }
2445     // Then we mark the objects.
2446 
2447     {
2448       TRACE_GC(heap()->tracer(),
2449                GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
2450       heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2451       ProcessMarkingDeque<MarkCompactMode::FULL>();
2452     }
2453 
2454     // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2455     // the weak roots we just marked as pending destruction.
2456     //
2457     // We only process harmony collections, as all object groups have been fully
2458     // processed and no weakly reachable node can discover new objects groups.
2459     {
2460       TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
2461       ProcessEphemeralMarking(&root_visitor, true);
2462       {
2463         TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WRAPPER_EPILOGUE);
2464         heap()->local_embedder_heap_tracer()->TraceEpilogue();
2465       }
2466     }
2467   }
2468 }
2469 
2470 
ClearNonLiveReferences()2471 void MarkCompactCollector::ClearNonLiveReferences() {
2472   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
2473 
2474   {
2475     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
2476 
2477     // Prune the string table removing all strings only pointed to by the
2478     // string table.  Cannot use string_table() here because the string
2479     // table is marked.
2480     StringTable* string_table = heap()->string_table();
2481     InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
2482     string_table->IterateElements(&internalized_visitor);
2483     string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2484 
2485     ExternalStringTableCleaner external_visitor(heap(), nullptr);
2486     heap()->external_string_table_.IterateAll(&external_visitor);
2487     heap()->external_string_table_.CleanUpAll();
2488   }
2489 
2490   {
2491     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
2492     // Process the weak references.
2493     MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2494     heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
2495   }
2496 
2497   {
2498     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
2499 
2500     // Remove object groups after marking phase.
2501     heap()->isolate()->global_handles()->RemoveObjectGroups();
2502     heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2503   }
2504 
2505   // Flush code from collected candidates.
2506   if (is_code_flushing_enabled()) {
2507     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
2508     code_flusher_->ProcessCandidates();
2509   }
2510 
2511 
2512   DependentCode* dependent_code_list;
2513   Object* non_live_map_list;
2514   ClearWeakCells(&non_live_map_list, &dependent_code_list);
2515 
2516   {
2517     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
2518     ClearSimpleMapTransitions(non_live_map_list);
2519     ClearFullMapTransitions();
2520   }
2521 
2522   MarkDependentCodeForDeoptimization(dependent_code_list);
2523 
2524   ClearWeakCollections();
2525 }
2526 
2527 
MarkDependentCodeForDeoptimization(DependentCode * list_head)2528 void MarkCompactCollector::MarkDependentCodeForDeoptimization(
2529     DependentCode* list_head) {
2530   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
2531   Isolate* isolate = this->isolate();
2532   DependentCode* current = list_head;
2533   while (current->length() > 0) {
2534     have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
2535         isolate, DependentCode::kWeakCodeGroup);
2536     current = current->next_link();
2537   }
2538 
2539   {
2540     ArrayList* list = heap_->weak_new_space_object_to_code_list();
2541     int counter = 0;
2542     for (int i = 0; i < list->Length(); i += 2) {
2543       WeakCell* obj = WeakCell::cast(list->Get(i));
2544       WeakCell* dep = WeakCell::cast(list->Get(i + 1));
2545       if (obj->cleared() || dep->cleared()) {
2546         if (!dep->cleared()) {
2547           Code* code = Code::cast(dep->value());
2548           if (!code->marked_for_deoptimization()) {
2549             DependentCode::SetMarkedForDeoptimization(
2550                 code, DependentCode::DependencyGroup::kWeakCodeGroup);
2551             code->InvalidateEmbeddedObjects();
2552             have_code_to_deoptimize_ = true;
2553           }
2554         }
2555       } else {
2556         // We record the slot manually because marking is finished at this
2557         // point and the write barrier would bailout.
2558         list->Set(counter, obj, SKIP_WRITE_BARRIER);
2559         RecordSlot(list, list->Slot(counter), obj);
2560         counter++;
2561         list->Set(counter, dep, SKIP_WRITE_BARRIER);
2562         RecordSlot(list, list->Slot(counter), dep);
2563         counter++;
2564       }
2565     }
2566   }
2567 
2568   WeakHashTable* table = heap_->weak_object_to_code_table();
2569   uint32_t capacity = table->Capacity();
2570   for (uint32_t i = 0; i < capacity; i++) {
2571     uint32_t key_index = table->EntryToIndex(i);
2572     Object* key = table->get(key_index);
2573     if (!table->IsKey(isolate, key)) continue;
2574     uint32_t value_index = table->EntryToValueIndex(i);
2575     Object* value = table->get(value_index);
2576     DCHECK(key->IsWeakCell());
2577     if (WeakCell::cast(key)->cleared()) {
2578       have_code_to_deoptimize_ |=
2579           DependentCode::cast(value)->MarkCodeForDeoptimization(
2580               isolate, DependentCode::kWeakCodeGroup);
2581       table->set(key_index, heap_->the_hole_value());
2582       table->set(value_index, heap_->the_hole_value());
2583       table->ElementRemoved();
2584     }
2585   }
2586 }
2587 
2588 
ClearSimpleMapTransitions(Object * non_live_map_list)2589 void MarkCompactCollector::ClearSimpleMapTransitions(
2590     Object* non_live_map_list) {
2591   Object* the_hole_value = heap()->the_hole_value();
2592   Object* weak_cell_obj = non_live_map_list;
2593   while (weak_cell_obj != Smi::kZero) {
2594     WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
2595     Map* map = Map::cast(weak_cell->value());
2596     DCHECK(ObjectMarking::IsWhite(map));
2597     Object* potential_parent = map->constructor_or_backpointer();
2598     if (potential_parent->IsMap()) {
2599       Map* parent = Map::cast(potential_parent);
2600       if (ObjectMarking::IsBlackOrGrey(parent) &&
2601           parent->raw_transitions() == weak_cell) {
2602         ClearSimpleMapTransition(parent, map);
2603       }
2604     }
2605     weak_cell->clear();
2606     weak_cell_obj = weak_cell->next();
2607     weak_cell->clear_next(the_hole_value);
2608   }
2609 }
2610 
2611 
ClearSimpleMapTransition(Map * map,Map * dead_transition)2612 void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
2613                                                     Map* dead_transition) {
2614   // A previously existing simple transition (stored in a WeakCell) is going
2615   // to be cleared. Clear the useless cell pointer, and take ownership
2616   // of the descriptor array.
2617   map->set_raw_transitions(Smi::kZero);
2618   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2619   DescriptorArray* descriptors = map->instance_descriptors();
2620   if (descriptors == dead_transition->instance_descriptors() &&
2621       number_of_own_descriptors > 0) {
2622     TrimDescriptorArray(map, descriptors);
2623     DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2624     map->set_owns_descriptors(true);
2625   }
2626 }
2627 
2628 
ClearFullMapTransitions()2629 void MarkCompactCollector::ClearFullMapTransitions() {
2630   HeapObject* undefined = heap()->undefined_value();
2631   Object* obj = heap()->encountered_transition_arrays();
2632   while (obj != Smi::kZero) {
2633     TransitionArray* array = TransitionArray::cast(obj);
2634     int num_transitions = array->number_of_entries();
2635     DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
2636     if (num_transitions > 0) {
2637       Map* map = array->GetTarget(0);
2638       Map* parent = Map::cast(map->constructor_or_backpointer());
2639       bool parent_is_alive = ObjectMarking::IsBlackOrGrey(parent);
2640       DescriptorArray* descriptors =
2641           parent_is_alive ? parent->instance_descriptors() : nullptr;
2642       bool descriptors_owner_died =
2643           CompactTransitionArray(parent, array, descriptors);
2644       if (descriptors_owner_died) {
2645         TrimDescriptorArray(parent, descriptors);
2646       }
2647     }
2648     obj = array->next_link();
2649     array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2650   }
2651   heap()->set_encountered_transition_arrays(Smi::kZero);
2652 }
2653 
2654 
CompactTransitionArray(Map * map,TransitionArray * transitions,DescriptorArray * descriptors)2655 bool MarkCompactCollector::CompactTransitionArray(
2656     Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
2657   int num_transitions = transitions->number_of_entries();
2658   bool descriptors_owner_died = false;
2659   int transition_index = 0;
2660   // Compact all live transitions to the left.
2661   for (int i = 0; i < num_transitions; ++i) {
2662     Map* target = transitions->GetTarget(i);
2663     DCHECK_EQ(target->constructor_or_backpointer(), map);
2664     if (ObjectMarking::IsWhite(target)) {
2665       if (descriptors != nullptr &&
2666           target->instance_descriptors() == descriptors) {
2667         descriptors_owner_died = true;
2668       }
2669     } else {
2670       if (i != transition_index) {
2671         Name* key = transitions->GetKey(i);
2672         transitions->SetKey(transition_index, key);
2673         Object** key_slot = transitions->GetKeySlot(transition_index);
2674         RecordSlot(transitions, key_slot, key);
2675         // Target slots do not need to be recorded since maps are not compacted.
2676         transitions->SetTarget(transition_index, transitions->GetTarget(i));
2677       }
2678       transition_index++;
2679     }
2680   }
2681   // If there are no transitions to be cleared, return.
2682   if (transition_index == num_transitions) {
2683     DCHECK(!descriptors_owner_died);
2684     return false;
2685   }
2686   // Note that we never eliminate a transition array, though we might right-trim
2687   // such that number_of_transitions() == 0. If this assumption changes,
2688   // TransitionArray::Insert() will need to deal with the case that a transition
2689   // array disappeared during GC.
2690   int trim = TransitionArray::Capacity(transitions) - transition_index;
2691   if (trim > 0) {
2692     heap_->RightTrimFixedArray(transitions,
2693                                trim * TransitionArray::kTransitionSize);
2694     transitions->SetNumberOfTransitions(transition_index);
2695   }
2696   return descriptors_owner_died;
2697 }
2698 
2699 
TrimDescriptorArray(Map * map,DescriptorArray * descriptors)2700 void MarkCompactCollector::TrimDescriptorArray(Map* map,
2701                                                DescriptorArray* descriptors) {
2702   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2703   if (number_of_own_descriptors == 0) {
2704     DCHECK(descriptors == heap_->empty_descriptor_array());
2705     return;
2706   }
2707 
2708   int number_of_descriptors = descriptors->number_of_descriptors_storage();
2709   int to_trim = number_of_descriptors - number_of_own_descriptors;
2710   if (to_trim > 0) {
2711     heap_->RightTrimFixedArray(descriptors,
2712                                to_trim * DescriptorArray::kEntrySize);
2713     descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2714 
2715     if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2716     descriptors->Sort();
2717 
2718     if (FLAG_unbox_double_fields) {
2719       LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2720       layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2721                                                   number_of_own_descriptors);
2722       SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2723     }
2724   }
2725   DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2726   map->set_owns_descriptors(true);
2727 }
2728 
2729 
TrimEnumCache(Map * map,DescriptorArray * descriptors)2730 void MarkCompactCollector::TrimEnumCache(Map* map,
2731                                          DescriptorArray* descriptors) {
2732   int live_enum = map->EnumLength();
2733   if (live_enum == kInvalidEnumCacheSentinel) {
2734     live_enum =
2735         map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
2736   }
2737   if (live_enum == 0) return descriptors->ClearEnumCache();
2738 
2739   FixedArray* enum_cache = descriptors->GetEnumCache();
2740 
2741   int to_trim = enum_cache->length() - live_enum;
2742   if (to_trim <= 0) return;
2743   heap_->RightTrimFixedArray(descriptors->GetEnumCache(), to_trim);
2744 
2745   if (!descriptors->HasEnumIndicesCache()) return;
2746   FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
2747   heap_->RightTrimFixedArray(enum_indices_cache, to_trim);
2748 }
2749 
2750 
ProcessWeakCollections()2751 void MarkCompactCollector::ProcessWeakCollections() {
2752   Object* weak_collection_obj = heap()->encountered_weak_collections();
2753   while (weak_collection_obj != Smi::kZero) {
2754     JSWeakCollection* weak_collection =
2755         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2756     DCHECK(ObjectMarking::IsBlackOrGrey(weak_collection));
2757     if (weak_collection->table()->IsHashTable()) {
2758       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2759       for (int i = 0; i < table->Capacity(); i++) {
2760         if (ObjectMarking::IsBlackOrGrey(HeapObject::cast(table->KeyAt(i)))) {
2761           Object** key_slot =
2762               table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2763           RecordSlot(table, key_slot, *key_slot);
2764           Object** value_slot =
2765               table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2766           MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
2767                                                          value_slot);
2768         }
2769       }
2770     }
2771     weak_collection_obj = weak_collection->next();
2772   }
2773 }
2774 
2775 
ClearWeakCollections()2776 void MarkCompactCollector::ClearWeakCollections() {
2777   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2778   Object* weak_collection_obj = heap()->encountered_weak_collections();
2779   while (weak_collection_obj != Smi::kZero) {
2780     JSWeakCollection* weak_collection =
2781         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2782     DCHECK(ObjectMarking::IsBlackOrGrey(weak_collection));
2783     if (weak_collection->table()->IsHashTable()) {
2784       ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2785       for (int i = 0; i < table->Capacity(); i++) {
2786         HeapObject* key = HeapObject::cast(table->KeyAt(i));
2787         if (!ObjectMarking::IsBlackOrGrey(key)) {
2788           table->RemoveEntry(i);
2789         }
2790       }
2791     }
2792     weak_collection_obj = weak_collection->next();
2793     weak_collection->set_next(heap()->undefined_value());
2794   }
2795   heap()->set_encountered_weak_collections(Smi::kZero);
2796 }
2797 
2798 
AbortWeakCollections()2799 void MarkCompactCollector::AbortWeakCollections() {
2800   Object* weak_collection_obj = heap()->encountered_weak_collections();
2801   while (weak_collection_obj != Smi::kZero) {
2802     JSWeakCollection* weak_collection =
2803         reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2804     weak_collection_obj = weak_collection->next();
2805     weak_collection->set_next(heap()->undefined_value());
2806   }
2807   heap()->set_encountered_weak_collections(Smi::kZero);
2808 }
2809 
2810 
ClearWeakCells(Object ** non_live_map_list,DependentCode ** dependent_code_list)2811 void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
2812                                           DependentCode** dependent_code_list) {
2813   Heap* heap = this->heap();
2814   TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
2815   Object* weak_cell_obj = heap->encountered_weak_cells();
2816   Object* the_hole_value = heap->the_hole_value();
2817   DependentCode* dependent_code_head =
2818       DependentCode::cast(heap->empty_fixed_array());
2819   Object* non_live_map_head = Smi::kZero;
2820   while (weak_cell_obj != Smi::kZero) {
2821     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2822     Object* next_weak_cell = weak_cell->next();
2823     bool clear_value = true;
2824     bool clear_next = true;
2825     // We do not insert cleared weak cells into the list, so the value
2826     // cannot be a Smi here.
2827     HeapObject* value = HeapObject::cast(weak_cell->value());
2828     if (!ObjectMarking::IsBlackOrGrey(value)) {
2829       // Cells for new-space objects embedded in optimized code are wrapped in
2830       // WeakCell and put into Heap::weak_object_to_code_table.
2831       // Such cells do not have any strong references but we want to keep them
2832       // alive as long as the cell value is alive.
2833       // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
2834       if (value->IsCell()) {
2835         Object* cell_value = Cell::cast(value)->value();
2836         if (cell_value->IsHeapObject() &&
2837             ObjectMarking::IsBlackOrGrey(HeapObject::cast(cell_value))) {
2838           // Resurrect the cell.
2839           ObjectMarking::WhiteToBlack(value);
2840           Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
2841           RecordSlot(value, slot, *slot);
2842           slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2843           RecordSlot(weak_cell, slot, *slot);
2844           clear_value = false;
2845         }
2846       }
2847       if (value->IsMap()) {
2848         // The map is non-live.
2849         Map* map = Map::cast(value);
2850         // Add dependent code to the dependent_code_list.
2851         DependentCode* candidate = map->dependent_code();
2852         // We rely on the fact that the weak code group comes first.
2853         STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
2854         if (candidate->length() > 0 &&
2855             candidate->group() == DependentCode::kWeakCodeGroup) {
2856           candidate->set_next_link(dependent_code_head);
2857           dependent_code_head = candidate;
2858         }
2859         // Add the weak cell to the non_live_map list.
2860         weak_cell->set_next(non_live_map_head);
2861         non_live_map_head = weak_cell;
2862         clear_value = false;
2863         clear_next = false;
2864       }
2865     } else {
2866       // The value of the weak cell is alive.
2867       Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2868       RecordSlot(weak_cell, slot, *slot);
2869       clear_value = false;
2870     }
2871     if (clear_value) {
2872       weak_cell->clear();
2873     }
2874     if (clear_next) {
2875       weak_cell->clear_next(the_hole_value);
2876     }
2877     weak_cell_obj = next_weak_cell;
2878   }
2879   heap->set_encountered_weak_cells(Smi::kZero);
2880   *non_live_map_list = non_live_map_head;
2881   *dependent_code_list = dependent_code_head;
2882 }
2883 
2884 
AbortWeakCells()2885 void MarkCompactCollector::AbortWeakCells() {
2886   Object* the_hole_value = heap()->the_hole_value();
2887   Object* weak_cell_obj = heap()->encountered_weak_cells();
2888   while (weak_cell_obj != Smi::kZero) {
2889     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2890     weak_cell_obj = weak_cell->next();
2891     weak_cell->clear_next(the_hole_value);
2892   }
2893   heap()->set_encountered_weak_cells(Smi::kZero);
2894 }
2895 
2896 
AbortTransitionArrays()2897 void MarkCompactCollector::AbortTransitionArrays() {
2898   HeapObject* undefined = heap()->undefined_value();
2899   Object* obj = heap()->encountered_transition_arrays();
2900   while (obj != Smi::kZero) {
2901     TransitionArray* array = TransitionArray::cast(obj);
2902     obj = array->next_link();
2903     array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2904   }
2905   heap()->set_encountered_transition_arrays(Smi::kZero);
2906 }
2907 
RecordRelocSlot(Code * host,RelocInfo * rinfo,Object * target)2908 void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
2909                                            Object* target) {
2910   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
2911   Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
2912   if (target_page->IsEvacuationCandidate() &&
2913       (rinfo->host() == NULL ||
2914        !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
2915     RelocInfo::Mode rmode = rinfo->rmode();
2916     Address addr = rinfo->pc();
2917     SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
2918     if (rinfo->IsInConstantPool()) {
2919       addr = rinfo->constant_pool_entry_address();
2920       if (RelocInfo::IsCodeTarget(rmode)) {
2921         slot_type = CODE_ENTRY_SLOT;
2922       } else {
2923         DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2924         slot_type = OBJECT_SLOT;
2925       }
2926     }
2927     RememberedSet<OLD_TO_OLD>::InsertTyped(
2928         source_page, reinterpret_cast<Address>(host), slot_type, addr);
2929   }
2930 }
2931 
UpdateSlot(Object ** slot)2932 static inline SlotCallbackResult UpdateSlot(Object** slot) {
2933   Object* obj = reinterpret_cast<Object*>(
2934       base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
2935 
2936   if (obj->IsHeapObject()) {
2937     HeapObject* heap_obj = HeapObject::cast(obj);
2938     MapWord map_word = heap_obj->map_word();
2939     if (map_word.IsForwardingAddress()) {
2940       DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
2941              MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2942              Page::FromAddress(heap_obj->address())
2943                  ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2944       HeapObject* target = map_word.ToForwardingAddress();
2945       base::NoBarrier_CompareAndSwap(
2946           reinterpret_cast<base::AtomicWord*>(slot),
2947           reinterpret_cast<base::AtomicWord>(obj),
2948           reinterpret_cast<base::AtomicWord>(target));
2949       DCHECK(!heap_obj->GetHeap()->InFromSpace(target) &&
2950              !MarkCompactCollector::IsOnEvacuationCandidate(target));
2951     }
2952   }
2953   return REMOVE_SLOT;
2954 }
2955 
2956 // Visitor for updating pointers from live objects in old spaces to new space.
2957 // It does not expect to encounter pointers to dead objects.
2958 class PointersUpdatingVisitor : public ObjectVisitor {
2959  public:
VisitPointer(Object ** p)2960   void VisitPointer(Object** p) override { UpdateSlot(p); }
2961 
VisitPointers(Object ** start,Object ** end)2962   void VisitPointers(Object** start, Object** end) override {
2963     for (Object** p = start; p < end; p++) UpdateSlot(p);
2964   }
2965 
VisitCell(RelocInfo * rinfo)2966   void VisitCell(RelocInfo* rinfo) override {
2967     UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
2968   }
2969 
VisitEmbeddedPointer(RelocInfo * rinfo)2970   void VisitEmbeddedPointer(RelocInfo* rinfo) override {
2971     UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
2972   }
2973 
VisitCodeTarget(RelocInfo * rinfo)2974   void VisitCodeTarget(RelocInfo* rinfo) override {
2975     UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
2976   }
2977 
VisitCodeEntry(Address entry_address)2978   void VisitCodeEntry(Address entry_address) override {
2979     UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
2980   }
2981 
VisitDebugTarget(RelocInfo * rinfo)2982   void VisitDebugTarget(RelocInfo* rinfo) override {
2983     UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
2984   }
2985 };
2986 
UpdateReferenceInExternalStringTableEntry(Heap * heap,Object ** p)2987 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2988                                                          Object** p) {
2989   MapWord map_word = HeapObject::cast(*p)->map_word();
2990 
2991   if (map_word.IsForwardingAddress()) {
2992     return String::cast(map_word.ToForwardingAddress());
2993   }
2994 
2995   return String::cast(*p);
2996 }
2997 
EvacuatePrologue()2998 void MarkCompactCollector::EvacuatePrologue() {
2999   // New space.
3000   NewSpace* new_space = heap()->new_space();
3001   // Append the list of new space pages to be processed.
3002   for (Page* p : PageRange(new_space->bottom(), new_space->top())) {
3003     new_space_evacuation_pages_.Add(p);
3004   }
3005   new_space->Flip();
3006   new_space->ResetAllocationInfo();
3007 
3008   // Old space.
3009   CHECK(old_space_evacuation_pages_.is_empty());
3010   old_space_evacuation_pages_.Swap(&evacuation_candidates_);
3011 }
3012 
EvacuateEpilogue()3013 void MarkCompactCollector::EvacuateEpilogue() {
3014   // New space.
3015   heap()->new_space()->set_age_mark(heap()->new_space()->top());
3016   // Old space. Deallocate evacuated candidate pages.
3017   ReleaseEvacuationCandidates();
3018 }
3019 
3020 class MarkCompactCollector::Evacuator : public Malloced {
3021  public:
3022   enum EvacuationMode {
3023     kObjectsNewToOld,
3024     kPageNewToOld,
3025     kObjectsOldToOld,
3026     kPageNewToNew,
3027   };
3028 
ComputeEvacuationMode(MemoryChunk * chunk)3029   static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3030     // Note: The order of checks is important in this function.
3031     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3032       return kPageNewToOld;
3033     if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
3034       return kPageNewToNew;
3035     if (chunk->InNewSpace()) return kObjectsNewToOld;
3036     DCHECK(chunk->IsEvacuationCandidate());
3037     return kObjectsOldToOld;
3038   }
3039 
3040   // NewSpacePages with more live bytes than this threshold qualify for fast
3041   // evacuation.
PageEvacuationThreshold()3042   static int PageEvacuationThreshold() {
3043     if (FLAG_page_promotion)
3044       return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
3045     return Page::kAllocatableMemory + kPointerSize;
3046   }
3047 
Evacuator(MarkCompactCollector * collector)3048   explicit Evacuator(MarkCompactCollector* collector)
3049       : collector_(collector),
3050         compaction_spaces_(collector->heap()),
3051         local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
3052         new_space_visitor_(collector->heap(), &compaction_spaces_,
3053                            &local_pretenuring_feedback_),
3054         new_to_new_page_visitor_(collector->heap(),
3055                                  &local_pretenuring_feedback_),
3056         new_to_old_page_visitor_(collector->heap(),
3057                                  &local_pretenuring_feedback_),
3058 
3059         old_space_visitor_(collector->heap(), &compaction_spaces_),
3060         duration_(0.0),
3061         bytes_compacted_(0) {}
3062 
3063   inline bool EvacuatePage(Page* chunk);
3064 
3065   // Merge back locally cached info sequentially. Note that this method needs
3066   // to be called from the main thread.
3067   inline void Finalize();
3068 
compaction_spaces()3069   CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3070 
3071  private:
3072   static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3073 
heap()3074   inline Heap* heap() { return collector_->heap(); }
3075 
ReportCompactionProgress(double duration,intptr_t bytes_compacted)3076   void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3077     duration_ += duration;
3078     bytes_compacted_ += bytes_compacted;
3079   }
3080 
3081   MarkCompactCollector* collector_;
3082 
3083   // Locally cached collector data.
3084   CompactionSpaceCollection compaction_spaces_;
3085   base::HashMap local_pretenuring_feedback_;
3086 
3087   // Visitors for the corresponding spaces.
3088   EvacuateNewSpaceVisitor new_space_visitor_;
3089   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
3090       new_to_new_page_visitor_;
3091   EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
3092       new_to_old_page_visitor_;
3093   EvacuateOldSpaceVisitor old_space_visitor_;
3094 
3095   // Book keeping info.
3096   double duration_;
3097   intptr_t bytes_compacted_;
3098 };
3099 
EvacuatePage(Page * page)3100 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
3101   bool success = false;
3102   DCHECK(page->SweepingDone());
3103   int saved_live_bytes = page->LiveBytes();
3104   double evacuation_time = 0.0;
3105   Heap* heap = page->heap();
3106   {
3107     AlwaysAllocateScope always_allocate(heap->isolate());
3108     TimedScope timed_scope(&evacuation_time);
3109     switch (ComputeEvacuationMode(page)) {
3110       case kObjectsNewToOld:
3111         success = collector_->VisitLiveObjects(page, &new_space_visitor_,
3112                                                kClearMarkbits);
3113         DCHECK(success);
3114         ArrayBufferTracker::ProcessBuffers(
3115             page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3116         break;
3117       case kPageNewToOld:
3118         success = collector_->VisitLiveObjects(page, &new_to_old_page_visitor_,
3119                                                kKeepMarking);
3120         DCHECK(success);
3121         new_to_old_page_visitor_.account_moved_bytes(page->LiveBytes());
3122         // ArrayBufferTracker will be updated during sweeping.
3123         break;
3124       case kPageNewToNew:
3125         success = collector_->VisitLiveObjects(page, &new_to_new_page_visitor_,
3126                                                kKeepMarking);
3127         DCHECK(success);
3128         new_to_new_page_visitor_.account_moved_bytes(page->LiveBytes());
3129         // ArrayBufferTracker will be updated during sweeping.
3130         break;
3131       case kObjectsOldToOld:
3132         success = collector_->VisitLiveObjects(page, &old_space_visitor_,
3133                                                kClearMarkbits);
3134         if (!success) {
3135           // Aborted compaction page. We have to record slots here, since we
3136           // might not have recorded them in first place.
3137           // Note: We mark the page as aborted here to be able to record slots
3138           // for code objects in |RecordMigratedSlotVisitor|.
3139           page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3140           EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
3141           success =
3142               collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
3143           ArrayBufferTracker::ProcessBuffers(
3144               page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3145           DCHECK(success);
3146           // We need to return failure here to indicate that we want this page
3147           // added to the sweeper.
3148           success = false;
3149         } else {
3150           ArrayBufferTracker::ProcessBuffers(
3151               page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3152         }
3153         break;
3154     }
3155   }
3156   ReportCompactionProgress(evacuation_time, saved_live_bytes);
3157   if (FLAG_trace_evacuation) {
3158     PrintIsolate(heap->isolate(),
3159                  "evacuation[%p]: page=%p new_space=%d "
3160                  "page_evacuation=%d executable=%d contains_age_mark=%d "
3161                  "live_bytes=%d time=%f\n",
3162                  static_cast<void*>(this), static_cast<void*>(page),
3163                  page->InNewSpace(),
3164                  page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
3165                      page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
3166                  page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
3167                  page->Contains(heap->new_space()->age_mark()),
3168                  saved_live_bytes, evacuation_time);
3169   }
3170   return success;
3171 }
3172 
Finalize()3173 void MarkCompactCollector::Evacuator::Finalize() {
3174   heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3175   heap()->code_space()->MergeCompactionSpace(
3176       compaction_spaces_.Get(CODE_SPACE));
3177   heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3178   heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3179                                        new_to_old_page_visitor_.moved_bytes());
3180   heap()->IncrementSemiSpaceCopiedObjectSize(
3181       new_space_visitor_.semispace_copied_size() +
3182       new_to_new_page_visitor_.moved_bytes());
3183   heap()->IncrementYoungSurvivorsCounter(
3184       new_space_visitor_.promoted_size() +
3185       new_space_visitor_.semispace_copied_size() +
3186       new_to_old_page_visitor_.moved_bytes() +
3187       new_to_new_page_visitor_.moved_bytes());
3188   heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3189 }
3190 
NumberOfParallelCompactionTasks(int pages,intptr_t live_bytes)3191 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3192                                                           intptr_t live_bytes) {
3193   if (!FLAG_parallel_compaction) return 1;
3194   // Compute the number of needed tasks based on a target compaction time, the
3195   // profiled compaction speed and marked live memory.
3196   //
3197   // The number of parallel compaction tasks is limited by:
3198   // - #evacuation pages
3199   // - #cores
3200   const double kTargetCompactionTimeInMs = .5;
3201 
3202   double compaction_speed =
3203       heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3204 
3205   const int available_cores = Max(
3206       1, static_cast<int>(
3207              V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
3208   int tasks;
3209   if (compaction_speed > 0) {
3210     tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
3211                                  kTargetCompactionTimeInMs);
3212   } else {
3213     tasks = pages;
3214   }
3215   const int tasks_capped_pages = Min(pages, tasks);
3216   return Min(available_cores, tasks_capped_pages);
3217 }
3218 
3219 class EvacuationJobTraits {
3220  public:
3221   typedef int* PerPageData;  // Pointer to number of aborted pages.
3222   typedef MarkCompactCollector::Evacuator* PerTaskData;
3223 
3224   static const bool NeedSequentialFinalization = true;
3225 
ProcessPageInParallel(Heap * heap,PerTaskData evacuator,MemoryChunk * chunk,PerPageData)3226   static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3227                                     MemoryChunk* chunk, PerPageData) {
3228     return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
3229   }
3230 
FinalizePageSequentially(Heap * heap,MemoryChunk * chunk,bool success,PerPageData data)3231   static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
3232                                        bool success, PerPageData data) {
3233     using Evacuator = MarkCompactCollector::Evacuator;
3234     Page* p = static_cast<Page*>(chunk);
3235     switch (Evacuator::ComputeEvacuationMode(p)) {
3236       case Evacuator::kPageNewToOld:
3237         break;
3238       case Evacuator::kPageNewToNew:
3239         DCHECK(success);
3240         break;
3241       case Evacuator::kObjectsNewToOld:
3242         DCHECK(success);
3243         break;
3244       case Evacuator::kObjectsOldToOld:
3245         if (success) {
3246           DCHECK(p->IsEvacuationCandidate());
3247           DCHECK(p->SweepingDone());
3248           p->Unlink();
3249         } else {
3250           // We have partially compacted the page, i.e., some objects may have
3251           // moved, others are still in place.
3252           p->ClearEvacuationCandidate();
3253           // Slots have already been recorded so we just need to add it to the
3254           // sweeper, which will happen after updating pointers.
3255           *data += 1;
3256         }
3257         break;
3258       default:
3259         UNREACHABLE();
3260     }
3261   }
3262 };
3263 
EvacuatePagesInParallel()3264 void MarkCompactCollector::EvacuatePagesInParallel() {
3265   PageParallelJob<EvacuationJobTraits> job(
3266       heap_, heap_->isolate()->cancelable_task_manager(),
3267       &page_parallel_job_semaphore_);
3268 
3269   int abandoned_pages = 0;
3270   intptr_t live_bytes = 0;
3271   for (Page* page : old_space_evacuation_pages_) {
3272     live_bytes += page->LiveBytes();
3273     job.AddPage(page, &abandoned_pages);
3274   }
3275 
3276   const bool reduce_memory = heap()->ShouldReduceMemory();
3277   const Address age_mark = heap()->new_space()->age_mark();
3278   for (Page* page : new_space_evacuation_pages_) {
3279     live_bytes += page->LiveBytes();
3280     if (!reduce_memory && !page->NeverEvacuate() &&
3281         (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
3282         !page->Contains(age_mark) &&
3283         heap()->CanExpandOldGeneration(page->LiveBytes())) {
3284       if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3285         EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3286       } else {
3287         EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
3288       }
3289     }
3290 
3291     job.AddPage(page, &abandoned_pages);
3292   }
3293   DCHECK_GE(job.NumberOfPages(), 1);
3294 
3295   // Used for trace summary.
3296   double compaction_speed = 0;
3297   if (FLAG_trace_evacuation) {
3298     compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3299   }
3300 
3301   const int wanted_num_tasks =
3302       NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
3303   Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
3304   for (int i = 0; i < wanted_num_tasks; i++) {
3305     evacuators[i] = new Evacuator(this);
3306   }
3307   job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
3308   for (int i = 0; i < wanted_num_tasks; i++) {
3309     evacuators[i]->Finalize();
3310     delete evacuators[i];
3311   }
3312   delete[] evacuators;
3313 
3314   if (FLAG_trace_evacuation) {
3315     PrintIsolate(isolate(),
3316                  "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
3317                  "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
3318                  " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
3319                  isolate()->time_millis_since_init(),
3320                  FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
3321                  abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
3322                  V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
3323                  live_bytes, compaction_speed);
3324   }
3325 }
3326 
3327 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3328  public:
RetainAs(Object * object)3329   virtual Object* RetainAs(Object* object) {
3330     if (object->IsHeapObject()) {
3331       HeapObject* heap_object = HeapObject::cast(object);
3332       MapWord map_word = heap_object->map_word();
3333       if (map_word.IsForwardingAddress()) {
3334         return map_word.ToForwardingAddress();
3335       }
3336     }
3337     return object;
3338   }
3339 };
3340 
3341 MarkCompactCollector::Sweeper::ClearOldToNewSlotsMode
GetClearOldToNewSlotsMode(Page * p)3342 MarkCompactCollector::Sweeper::GetClearOldToNewSlotsMode(Page* p) {
3343   AllocationSpace identity = p->owner()->identity();
3344   if (p->old_to_new_slots() &&
3345       (identity == OLD_SPACE || identity == MAP_SPACE)) {
3346     return MarkCompactCollector::Sweeper::CLEAR_REGULAR_SLOTS;
3347   } else if (p->typed_old_to_new_slots() && identity == CODE_SPACE) {
3348     return MarkCompactCollector::Sweeper::CLEAR_TYPED_SLOTS;
3349   }
3350   return MarkCompactCollector::Sweeper::DO_NOT_CLEAR;
3351 }
3352 
RawSweep(Page * p,FreeListRebuildingMode free_list_mode,FreeSpaceTreatmentMode free_space_mode)3353 int MarkCompactCollector::Sweeper::RawSweep(
3354     Page* p, FreeListRebuildingMode free_list_mode,
3355     FreeSpaceTreatmentMode free_space_mode) {
3356   Space* space = p->owner();
3357   DCHECK_NOT_NULL(space);
3358   DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
3359          space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
3360   DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
3361 
3362   // If there are old-to-new slots in that page, we have to filter out slots
3363   // that are in dead memory which is freed by the sweeper.
3364   ClearOldToNewSlotsMode slots_clearing_mode = GetClearOldToNewSlotsMode(p);
3365 
3366   // The free ranges map is used for filtering typed slots.
3367   std::map<uint32_t, uint32_t> free_ranges;
3368 
3369   // Before we sweep objects on the page, we free dead array buffers which
3370   // requires valid mark bits.
3371   ArrayBufferTracker::FreeDead(p);
3372 
3373   Address free_start = p->area_start();
3374   DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3375 
3376   // If we use the skip list for code space pages, we have to lock the skip
3377   // list because it could be accessed concurrently by the runtime or the
3378   // deoptimizer.
3379   const bool rebuild_skip_list =
3380       space->identity() == CODE_SPACE && p->skip_list() != nullptr;
3381   SkipList* skip_list = p->skip_list();
3382   if (rebuild_skip_list) {
3383     skip_list->Clear();
3384   }
3385 
3386   intptr_t freed_bytes = 0;
3387   intptr_t max_freed_bytes = 0;
3388   int curr_region = -1;
3389 
3390   LiveObjectIterator<kBlackObjects> it(p);
3391   HeapObject* object = NULL;
3392 
3393   while ((object = it.Next()) != NULL) {
3394     DCHECK(ObjectMarking::IsBlack(object));
3395     Address free_end = object->address();
3396     if (free_end != free_start) {
3397       CHECK_GT(free_end, free_start);
3398       size_t size = static_cast<size_t>(free_end - free_start);
3399       if (free_space_mode == ZAP_FREE_SPACE) {
3400         memset(free_start, 0xcc, size);
3401       }
3402       if (free_list_mode == REBUILD_FREE_LIST) {
3403         freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
3404             free_start, size);
3405         max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3406       } else {
3407         p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3408                                         ClearRecordedSlots::kNo);
3409       }
3410 
3411       if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
3412         RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
3413                                                SlotSet::KEEP_EMPTY_BUCKETS);
3414       } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
3415         free_ranges.insert(std::pair<uint32_t, uint32_t>(
3416             static_cast<uint32_t>(free_start - p->address()),
3417             static_cast<uint32_t>(free_end - p->address())));
3418       }
3419     }
3420     Map* map = object->synchronized_map();
3421     int size = object->SizeFromMap(map);
3422     if (rebuild_skip_list) {
3423       int new_region_start = SkipList::RegionNumber(free_end);
3424       int new_region_end =
3425           SkipList::RegionNumber(free_end + size - kPointerSize);
3426       if (new_region_start != curr_region || new_region_end != curr_region) {
3427         skip_list->AddObject(free_end, size);
3428         curr_region = new_region_end;
3429       }
3430     }
3431     free_start = free_end + size;
3432   }
3433 
3434   if (free_start != p->area_end()) {
3435     CHECK_GT(p->area_end(), free_start);
3436     size_t size = static_cast<size_t>(p->area_end() - free_start);
3437     if (free_space_mode == ZAP_FREE_SPACE) {
3438       memset(free_start, 0xcc, size);
3439     }
3440     if (free_list_mode == REBUILD_FREE_LIST) {
3441       freed_bytes = reinterpret_cast<PagedSpace*>(space)->UnaccountedFree(
3442           free_start, size);
3443       max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3444     } else {
3445       p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3446                                       ClearRecordedSlots::kNo);
3447     }
3448 
3449     if (slots_clearing_mode == CLEAR_REGULAR_SLOTS) {
3450       RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
3451                                              SlotSet::KEEP_EMPTY_BUCKETS);
3452     } else if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
3453       free_ranges.insert(std::pair<uint32_t, uint32_t>(
3454           static_cast<uint32_t>(free_start - p->address()),
3455           static_cast<uint32_t>(p->area_end() - p->address())));
3456     }
3457   }
3458 
3459   // Clear invalid typed slots after collection all free ranges.
3460   if (slots_clearing_mode == CLEAR_TYPED_SLOTS) {
3461     p->typed_old_to_new_slots()->RemoveInvaldSlots(free_ranges);
3462   }
3463 
3464   // Clear the mark bits of that page and reset live bytes count.
3465   p->ClearLiveness();
3466 
3467   p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3468   if (free_list_mode == IGNORE_FREE_LIST) return 0;
3469   return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
3470 }
3471 
InvalidateCode(Code * code)3472 void MarkCompactCollector::InvalidateCode(Code* code) {
3473   Page* page = Page::FromAddress(code->address());
3474   Address start = code->instruction_start();
3475   Address end = code->address() + code->Size();
3476 
3477   RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end);
3478 
3479   if (heap_->incremental_marking()->IsCompacting() &&
3480       !ShouldSkipEvacuationSlotRecording(code)) {
3481     DCHECK(compacting_);
3482 
3483     // If the object is white than no slots were recorded on it yet.
3484     if (ObjectMarking::IsWhite(code)) return;
3485 
3486     // Ignore all slots that might have been recorded in the body of the
3487     // deoptimized code object. Assumption: no slots will be recorded for
3488     // this object after invalidating it.
3489     RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
3490   }
3491 }
3492 
3493 
3494 // Return true if the given code is deoptimized or will be deoptimized.
WillBeDeoptimized(Code * code)3495 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3496   return code->is_optimized_code() && code->marked_for_deoptimization();
3497 }
3498 
3499 
3500 #ifdef VERIFY_HEAP
VerifyAllBlackObjects(MemoryChunk * page)3501 static void VerifyAllBlackObjects(MemoryChunk* page) {
3502   LiveObjectIterator<kAllLiveObjects> it(page);
3503   HeapObject* object = NULL;
3504   while ((object = it.Next()) != NULL) {
3505     CHECK(ObjectMarking::IsBlack(object));
3506   }
3507 }
3508 #endif  // VERIFY_HEAP
3509 
RecordLiveSlotsOnPage(Page * page)3510 void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
3511   EvacuateRecordOnlyVisitor visitor(heap());
3512   VisitLiveObjects(page, &visitor, kKeepMarking);
3513 }
3514 
3515 template <class Visitor>
VisitLiveObjects(MemoryChunk * page,Visitor * visitor,IterationMode mode)3516 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
3517                                             IterationMode mode) {
3518 #ifdef VERIFY_HEAP
3519   VerifyAllBlackObjects(page);
3520 #endif  // VERIFY_HEAP
3521 
3522   LiveObjectIterator<kBlackObjects> it(page);
3523   HeapObject* object = nullptr;
3524   while ((object = it.Next()) != nullptr) {
3525     DCHECK(ObjectMarking::IsBlack(object));
3526     if (!visitor->Visit(object)) {
3527       if (mode == kClearMarkbits) {
3528         page->markbits()->ClearRange(
3529             page->AddressToMarkbitIndex(page->area_start()),
3530             page->AddressToMarkbitIndex(object->address()));
3531         if (page->old_to_new_slots() != nullptr) {
3532           page->old_to_new_slots()->RemoveRange(
3533               0, static_cast<int>(object->address() - page->address()),
3534               SlotSet::PREFREE_EMPTY_BUCKETS);
3535         }
3536         if (page->typed_old_to_new_slots() != nullptr) {
3537           RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
3538                                                       object->address());
3539         }
3540         RecomputeLiveBytes(page);
3541       }
3542       return false;
3543     }
3544   }
3545   if (mode == kClearMarkbits) {
3546     page->ClearLiveness();
3547   }
3548   return true;
3549 }
3550 
3551 
RecomputeLiveBytes(MemoryChunk * page)3552 void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
3553   LiveObjectIterator<kBlackObjects> it(page);
3554   int new_live_size = 0;
3555   HeapObject* object = nullptr;
3556   while ((object = it.Next()) != nullptr) {
3557     new_live_size += object->Size();
3558   }
3559   page->SetLiveBytes(new_live_size);
3560 }
3561 
AddSweptPageSafe(PagedSpace * space,Page * page)3562 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
3563                                                      Page* page) {
3564   base::LockGuard<base::Mutex> guard(&mutex_);
3565   swept_list_[space->identity()].Add(page);
3566 }
3567 
EvacuateNewSpaceAndCandidates()3568 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3569   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3570   Heap::RelocationLock relocation_lock(heap());
3571 
3572   {
3573     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
3574     EvacuatePrologue();
3575   }
3576 
3577   {
3578     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
3579     EvacuationScope evacuation_scope(this);
3580     EvacuatePagesInParallel();
3581   }
3582 
3583   UpdatePointersAfterEvacuation();
3584 
3585   {
3586     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
3587     if (!heap()->new_space()->Rebalance()) {
3588       FatalProcessOutOfMemory("NewSpace::Rebalance");
3589     }
3590   }
3591 
3592   // Give pages that are queued to be freed back to the OS. Note that filtering
3593   // slots only handles old space (for unboxed doubles), and thus map space can
3594   // still contain stale pointers. We only free the chunks after pointer updates
3595   // to still have access to page headers.
3596   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3597 
3598   {
3599     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3600 
3601     for (Page* p : new_space_evacuation_pages_) {
3602       if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3603         p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3604         sweeper().AddPage(p->owner()->identity(), p);
3605       } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3606         p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3607         p->ForAllFreeListCategories(
3608             [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3609         sweeper().AddPage(p->owner()->identity(), p);
3610       }
3611     }
3612     new_space_evacuation_pages_.Rewind(0);
3613 
3614     for (Page* p : old_space_evacuation_pages_) {
3615       // Important: skip list should be cleared only after roots were updated
3616       // because root iteration traverses the stack and might have to find
3617       // code objects from non-updated pc pointing into evacuation candidate.
3618       SkipList* list = p->skip_list();
3619       if (list != NULL) list->Clear();
3620       if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3621         sweeper().AddPage(p->owner()->identity(), p);
3622         p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3623       }
3624     }
3625   }
3626 
3627   {
3628     TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
3629     EvacuateEpilogue();
3630   }
3631 
3632 #ifdef VERIFY_HEAP
3633   if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
3634     VerifyEvacuation(heap());
3635   }
3636 #endif
3637 }
3638 
3639 template <PointerDirection direction>
3640 class PointerUpdateJobTraits {
3641  public:
3642   typedef int PerPageData;  // Per page data is not used in this job.
3643   typedef int PerTaskData;  // Per task data is not used in this job.
3644 
ProcessPageInParallel(Heap * heap,PerTaskData,MemoryChunk * chunk,PerPageData)3645   static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
3646                                     PerPageData) {
3647     UpdateUntypedPointers(heap, chunk);
3648     UpdateTypedPointers(heap, chunk);
3649     return true;
3650   }
3651   static const bool NeedSequentialFinalization = false;
FinalizePageSequentially(Heap *,MemoryChunk *,bool,PerPageData)3652   static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3653   }
3654 
3655  private:
UpdateUntypedPointers(Heap * heap,MemoryChunk * chunk)3656   static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
3657     if (direction == OLD_TO_NEW) {
3658       RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap](Address slot) {
3659         return CheckAndUpdateOldToNewSlot(heap, slot);
3660       });
3661     } else {
3662       RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
3663         return UpdateSlot(reinterpret_cast<Object**>(slot));
3664       });
3665     }
3666   }
3667 
UpdateTypedPointers(Heap * heap,MemoryChunk * chunk)3668   static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
3669     if (direction == OLD_TO_OLD) {
3670       Isolate* isolate = heap->isolate();
3671       RememberedSet<OLD_TO_OLD>::IterateTyped(
3672           chunk, [isolate](SlotType type, Address host_addr, Address slot) {
3673             return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot,
3674                                                           UpdateSlot);
3675           });
3676     } else {
3677       Isolate* isolate = heap->isolate();
3678       RememberedSet<OLD_TO_NEW>::IterateTyped(
3679           chunk,
3680           [isolate, heap](SlotType type, Address host_addr, Address slot) {
3681             return UpdateTypedSlotHelper::UpdateTypedSlot(
3682                 isolate, type, slot, [heap](Object** slot) {
3683                   return CheckAndUpdateOldToNewSlot(
3684                       heap, reinterpret_cast<Address>(slot));
3685                 });
3686           });
3687     }
3688   }
3689 
CheckAndUpdateOldToNewSlot(Heap * heap,Address slot_address)3690   static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
3691                                                        Address slot_address) {
3692     // There may be concurrent action on slots in dead objects. Concurrent
3693     // sweeper threads may overwrite the slot content with a free space object.
3694     // Moreover, the pointed-to object may also get concurrently overwritten
3695     // with a free space object. The sweeper always gets priority performing
3696     // these writes.
3697     base::NoBarrierAtomicValue<Object*>* slot =
3698         base::NoBarrierAtomicValue<Object*>::FromAddress(slot_address);
3699     Object* slot_reference = slot->Value();
3700     if (heap->InFromSpace(slot_reference)) {
3701       HeapObject* heap_object = reinterpret_cast<HeapObject*>(slot_reference);
3702       DCHECK(heap_object->IsHeapObject());
3703       MapWord map_word = heap_object->map_word();
3704       // There could still be stale pointers in large object space, map space,
3705       // and old space for pages that have been promoted.
3706       if (map_word.IsForwardingAddress()) {
3707         // A sweeper thread may concurrently write a size value which looks like
3708         // a forwarding pointer. We have to ignore these values.
3709         if (map_word.ToRawValue() < Page::kPageSize) {
3710           return REMOVE_SLOT;
3711         }
3712         // Update the corresponding slot only if the slot content did not
3713         // change in the meantime. This may happen when a concurrent sweeper
3714         // thread stored a free space object at that memory location.
3715         slot->TrySetValue(slot_reference, map_word.ToForwardingAddress());
3716       }
3717       // If the object was in from space before and is after executing the
3718       // callback in to space, the object is still live.
3719       // Unfortunately, we do not know about the slot. It could be in a
3720       // just freed free space object.
3721       if (heap->InToSpace(slot->Value())) {
3722         return KEEP_SLOT;
3723       }
3724     } else if (heap->InToSpace(slot_reference)) {
3725       // Slots can point to "to" space if the page has been moved, or if the
3726       // slot has been recorded multiple times in the remembered set. Since
3727       // there is no forwarding information present we need to check the
3728       // markbits to determine liveness.
3729       if (ObjectMarking::IsBlack(reinterpret_cast<HeapObject*>(slot_reference)))
3730         return KEEP_SLOT;
3731     } else {
3732       DCHECK(!heap->InNewSpace(slot_reference));
3733     }
3734     return REMOVE_SLOT;
3735   }
3736 };
3737 
NumberOfPointerUpdateTasks(int pages)3738 int NumberOfPointerUpdateTasks(int pages) {
3739   if (!FLAG_parallel_pointer_update) return 1;
3740   const int available_cores = Max(
3741       1, static_cast<int>(
3742              V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()));
3743   const int kPagesPerTask = 4;
3744   return Min(available_cores, (pages + kPagesPerTask - 1) / kPagesPerTask);
3745 }
3746 
3747 template <PointerDirection direction>
UpdatePointersInParallel(Heap * heap,base::Semaphore * semaphore)3748 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
3749   PageParallelJob<PointerUpdateJobTraits<direction> > job(
3750       heap, heap->isolate()->cancelable_task_manager(), semaphore);
3751   RememberedSet<direction>::IterateMemoryChunks(
3752       heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
3753   int num_pages = job.NumberOfPages();
3754   int num_tasks = NumberOfPointerUpdateTasks(num_pages);
3755   job.Run(num_tasks, [](int i) { return 0; });
3756 }
3757 
3758 class ToSpacePointerUpdateJobTraits {
3759  public:
3760   typedef std::pair<Address, Address> PerPageData;
3761   typedef PointersUpdatingVisitor* PerTaskData;
3762 
ProcessPageInParallel(Heap * heap,PerTaskData visitor,MemoryChunk * chunk,PerPageData limits)3763   static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
3764                                     MemoryChunk* chunk, PerPageData limits) {
3765     if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3766       // New->new promoted pages contain garbage so they require iteration
3767       // using markbits.
3768       ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
3769     } else {
3770       ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
3771     }
3772     return true;
3773   }
3774 
3775   static const bool NeedSequentialFinalization = false;
FinalizePageSequentially(Heap *,MemoryChunk *,bool,PerPageData)3776   static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3777   }
3778 
3779  private:
ProcessPageInParallelVisitAll(Heap * heap,PerTaskData visitor,MemoryChunk * chunk,PerPageData limits)3780   static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
3781                                             MemoryChunk* chunk,
3782                                             PerPageData limits) {
3783     for (Address cur = limits.first; cur < limits.second;) {
3784       HeapObject* object = HeapObject::FromAddress(cur);
3785       Map* map = object->map();
3786       int size = object->SizeFromMap(map);
3787       object->IterateBody(map->instance_type(), size, visitor);
3788       cur += size;
3789     }
3790   }
3791 
ProcessPageInParallelVisitLive(Heap * heap,PerTaskData visitor,MemoryChunk * chunk,PerPageData limits)3792   static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
3793                                              MemoryChunk* chunk,
3794                                              PerPageData limits) {
3795     LiveObjectIterator<kBlackObjects> it(chunk);
3796     HeapObject* object = NULL;
3797     while ((object = it.Next()) != NULL) {
3798       Map* map = object->map();
3799       int size = object->SizeFromMap(map);
3800       object->IterateBody(map->instance_type(), size, visitor);
3801     }
3802   }
3803 };
3804 
UpdateToSpacePointersInParallel(Heap * heap,base::Semaphore * semaphore)3805 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
3806   PageParallelJob<ToSpacePointerUpdateJobTraits> job(
3807       heap, heap->isolate()->cancelable_task_manager(), semaphore);
3808   Address space_start = heap->new_space()->bottom();
3809   Address space_end = heap->new_space()->top();
3810   for (Page* page : PageRange(space_start, space_end)) {
3811     Address start =
3812         page->Contains(space_start) ? space_start : page->area_start();
3813     Address end = page->Contains(space_end) ? space_end : page->area_end();
3814     job.AddPage(page, std::make_pair(start, end));
3815   }
3816   PointersUpdatingVisitor visitor;
3817   int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
3818   job.Run(num_tasks, [&visitor](int i) { return &visitor; });
3819 }
3820 
UpdatePointersAfterEvacuation()3821 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3822   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3823 
3824   PointersUpdatingVisitor updating_visitor;
3825 
3826   {
3827     TRACE_GC(heap()->tracer(),
3828              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
3829     UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_);
3830     // Update roots.
3831     heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3832     UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
3833   }
3834 
3835   {
3836     Heap* heap = this->heap();
3837     TRACE_GC(heap->tracer(),
3838              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
3839     UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_);
3840   }
3841 
3842   {
3843     TRACE_GC(heap()->tracer(),
3844              GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3845     // Update pointers from external string table.
3846     heap_->UpdateReferencesInExternalStringTable(
3847         &UpdateReferenceInExternalStringTableEntry);
3848 
3849     EvacuationWeakObjectRetainer evacuation_object_retainer;
3850     heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3851   }
3852 }
3853 
3854 
ReleaseEvacuationCandidates()3855 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3856   for (Page* p : old_space_evacuation_pages_) {
3857     if (!p->IsEvacuationCandidate()) continue;
3858     PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3859     p->ResetLiveBytes();
3860     CHECK(p->SweepingDone());
3861     space->ReleasePage(p);
3862   }
3863   old_space_evacuation_pages_.Rewind(0);
3864   compacting_ = false;
3865   heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3866 }
3867 
ParallelSweepSpace(AllocationSpace identity,int required_freed_bytes,int max_pages)3868 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
3869                                                       int required_freed_bytes,
3870                                                       int max_pages) {
3871   int max_freed = 0;
3872   int pages_freed = 0;
3873   Page* page = nullptr;
3874   while ((page = GetSweepingPageSafe(identity)) != nullptr) {
3875     int freed = ParallelSweepPage(page, identity);
3876     pages_freed += 1;
3877     DCHECK_GE(freed, 0);
3878     max_freed = Max(max_freed, freed);
3879     if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
3880       return max_freed;
3881     if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
3882   }
3883   return max_freed;
3884 }
3885 
ParallelSweepPage(Page * page,AllocationSpace identity)3886 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
3887                                                      AllocationSpace identity) {
3888   int max_freed = 0;
3889   {
3890     base::LockGuard<base::Mutex> guard(page->mutex());
3891     // If this page was already swept in the meantime, we can return here.
3892     if (page->SweepingDone()) return 0;
3893     DCHECK_EQ(Page::kSweepingPending,
3894               page->concurrent_sweeping_state().Value());
3895     page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3896     const Sweeper::FreeSpaceTreatmentMode free_space_mode =
3897         Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
3898     if (identity == NEW_SPACE) {
3899       RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
3900     } else {
3901       max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
3902     }
3903     DCHECK(page->SweepingDone());
3904 
3905     // After finishing sweeping of a page we clean up its remembered set.
3906     if (page->typed_old_to_new_slots()) {
3907       page->typed_old_to_new_slots()->FreeToBeFreedChunks();
3908     }
3909     if (page->old_to_new_slots()) {
3910       page->old_to_new_slots()->FreeToBeFreedBuckets();
3911     }
3912   }
3913 
3914   {
3915     base::LockGuard<base::Mutex> guard(&mutex_);
3916     swept_list_[identity].Add(page);
3917   }
3918   return max_freed;
3919 }
3920 
AddPage(AllocationSpace space,Page * page)3921 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
3922   DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
3923   PrepareToBeSweptPage(space, page);
3924   sweeping_list_[space].push_back(page);
3925 }
3926 
PrepareToBeSweptPage(AllocationSpace space,Page * page)3927 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
3928                                                          Page* page) {
3929   page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3930   DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes()));
3931   size_t to_sweep = page->area_size() - page->LiveBytes();
3932   if (space != NEW_SPACE)
3933     heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
3934 }
3935 
GetSweepingPageSafe(AllocationSpace space)3936 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
3937     AllocationSpace space) {
3938   base::LockGuard<base::Mutex> guard(&mutex_);
3939   Page* page = nullptr;
3940   if (!sweeping_list_[space].empty()) {
3941     page = sweeping_list_[space].front();
3942     sweeping_list_[space].pop_front();
3943   }
3944   return page;
3945 }
3946 
AddSweepingPageSafe(AllocationSpace space,Page * page)3947 void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space,
3948                                                         Page* page) {
3949   base::LockGuard<base::Mutex> guard(&mutex_);
3950   sweeping_list_[space].push_back(page);
3951 }
3952 
StartSweepSpace(PagedSpace * space)3953 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
3954   space->ClearStats();
3955 
3956   int will_be_swept = 0;
3957   bool unused_page_present = false;
3958 
3959   // Loop needs to support deletion if live bytes == 0 for a page.
3960   for (auto it = space->begin(); it != space->end();) {
3961     Page* p = *(it++);
3962     DCHECK(p->SweepingDone());
3963 
3964     if (p->IsEvacuationCandidate()) {
3965       // Will be processed in EvacuateNewSpaceAndCandidates.
3966       DCHECK(evacuation_candidates_.length() > 0);
3967       continue;
3968     }
3969 
3970     if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3971       // We need to sweep the page to get it into an iterable state again. Note
3972       // that this adds unusable memory into the free list that is later on
3973       // (in the free list) dropped again. Since we only use the flag for
3974       // testing this is fine.
3975       p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3976       Sweeper::RawSweep(p, Sweeper::IGNORE_FREE_LIST,
3977                         Heap::ShouldZapGarbage() ? Sweeper::ZAP_FREE_SPACE
3978                                                  : Sweeper::IGNORE_FREE_SPACE);
3979       continue;
3980     }
3981 
3982     // One unused page is kept, all further are released before sweeping them.
3983     if (p->LiveBytes() == 0) {
3984       if (unused_page_present) {
3985         if (FLAG_gc_verbose) {
3986           PrintIsolate(isolate(), "sweeping: released page: %p",
3987                        static_cast<void*>(p));
3988         }
3989         ArrayBufferTracker::FreeAll(p);
3990         space->ReleasePage(p);
3991         continue;
3992       }
3993       unused_page_present = true;
3994     }
3995 
3996     sweeper().AddPage(space->identity(), p);
3997     will_be_swept++;
3998   }
3999 
4000   if (FLAG_gc_verbose) {
4001     PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
4002                  AllocationSpaceName(space->identity()), will_be_swept);
4003   }
4004 }
4005 
StartSweepSpaces()4006 void MarkCompactCollector::StartSweepSpaces() {
4007   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4008 #ifdef DEBUG
4009   state_ = SWEEP_SPACES;
4010 #endif
4011 
4012   {
4013     {
4014       GCTracer::Scope sweep_scope(heap()->tracer(),
4015                                   GCTracer::Scope::MC_SWEEP_OLD);
4016       StartSweepSpace(heap()->old_space());
4017     }
4018     {
4019       GCTracer::Scope sweep_scope(heap()->tracer(),
4020                                   GCTracer::Scope::MC_SWEEP_CODE);
4021       StartSweepSpace(heap()->code_space());
4022     }
4023     {
4024       GCTracer::Scope sweep_scope(heap()->tracer(),
4025                                   GCTracer::Scope::MC_SWEEP_MAP);
4026       StartSweepSpace(heap()->map_space());
4027     }
4028     sweeper().StartSweeping();
4029   }
4030 
4031   // Deallocate unmarked large objects.
4032   heap_->lo_space()->FreeUnmarkedObjects();
4033 }
4034 
isolate() const4035 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
4036 
4037 
Initialize()4038 void MarkCompactCollector::Initialize() {
4039   MarkCompactMarkingVisitor::Initialize();
4040   IncrementalMarking::Initialize();
4041 }
4042 
RecordCodeEntrySlot(HeapObject * host,Address slot,Code * target)4043 void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
4044                                                Code* target) {
4045   Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4046   Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
4047   if (target_page->IsEvacuationCandidate() &&
4048       !ShouldSkipEvacuationSlotRecording(host)) {
4049     // TODO(ulan): remove this check after investigating crbug.com/414964.
4050     CHECK(target->IsCode());
4051     RememberedSet<OLD_TO_OLD>::InsertTyped(
4052         source_page, reinterpret_cast<Address>(host), CODE_ENTRY_SLOT, slot);
4053   }
4054 }
4055 
4056 
RecordCodeTargetPatch(Address pc,Code * target)4057 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4058   DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4059   if (is_compacting()) {
4060     Code* host =
4061         isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
4062             pc);
4063     if (ObjectMarking::IsBlack(host)) {
4064       RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
4065       // The target is always in old space, we don't have to record the slot in
4066       // the old-to-new remembered set.
4067       DCHECK(!heap()->InNewSpace(target));
4068       RecordRelocSlot(host, &rinfo, target);
4069     }
4070   }
4071 }
4072 
4073 }  // namespace internal
4074 }  // namespace v8
4075