1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/mark-compact.h"
6
7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h"
11 #include "src/compilation-cache.h"
12 #include "src/deoptimizer.h"
13 #include "src/execution.h"
14 #include "src/frames-inl.h"
15 #include "src/gdb-jit.h"
16 #include "src/global-handles.h"
17 #include "src/heap/array-buffer-tracker.h"
18 #include "src/heap/gc-tracer.h"
19 #include "src/heap/incremental-marking.h"
20 #include "src/heap/mark-compact-inl.h"
21 #include "src/heap/object-stats.h"
22 #include "src/heap/objects-visiting-inl.h"
23 #include "src/heap/objects-visiting.h"
24 #include "src/heap/page-parallel-job.h"
25 #include "src/heap/spaces-inl.h"
26 #include "src/ic/ic.h"
27 #include "src/ic/stub-cache.h"
28 #include "src/utils-inl.h"
29 #include "src/v8.h"
30
31 namespace v8 {
32 namespace internal {
33
34
35 const char* Marking::kWhiteBitPattern = "00";
36 const char* Marking::kBlackBitPattern = "11";
37 const char* Marking::kGreyBitPattern = "10";
38 const char* Marking::kImpossibleBitPattern = "01";
39
40
41 // The following has to hold in order for {Marking::MarkBitFrom} to not produce
42 // invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
43 STATIC_ASSERT(Heap::kMinObjectSizeInWords >= 2);
44
45
46 // -------------------------------------------------------------------------
47 // MarkCompactCollector
48
MarkCompactCollector(Heap * heap)49 MarkCompactCollector::MarkCompactCollector(Heap* heap)
50 : // NOLINT
51 heap_(heap),
52 page_parallel_job_semaphore_(0),
53 #ifdef DEBUG
54 state_(IDLE),
55 #endif
56 marking_parity_(ODD_MARKING_PARITY),
57 was_marked_incrementally_(false),
58 evacuation_(false),
59 compacting_(false),
60 black_allocation_(false),
61 have_code_to_deoptimize_(false),
62 marking_deque_memory_(NULL),
63 marking_deque_memory_committed_(0),
64 code_flusher_(nullptr),
65 embedder_heap_tracer_(nullptr),
66 sweeper_(heap) {
67 }
68
69 #ifdef VERIFY_HEAP
70 class VerifyMarkingVisitor : public ObjectVisitor {
71 public:
VerifyMarkingVisitor(Heap * heap)72 explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
73
VisitPointers(Object ** start,Object ** end)74 void VisitPointers(Object** start, Object** end) override {
75 for (Object** current = start; current < end; current++) {
76 if ((*current)->IsHeapObject()) {
77 HeapObject* object = HeapObject::cast(*current);
78 CHECK(heap_->mark_compact_collector()->IsMarked(object));
79 }
80 }
81 }
82
VisitEmbeddedPointer(RelocInfo * rinfo)83 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
84 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
85 if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
86 Object* p = rinfo->target_object();
87 VisitPointer(&p);
88 }
89 }
90
VisitCell(RelocInfo * rinfo)91 void VisitCell(RelocInfo* rinfo) override {
92 Code* code = rinfo->host();
93 DCHECK(rinfo->rmode() == RelocInfo::CELL);
94 if (!code->IsWeakObject(rinfo->target_cell())) {
95 ObjectVisitor::VisitCell(rinfo);
96 }
97 }
98
99 private:
100 Heap* heap_;
101 };
102
103
VerifyMarking(Heap * heap,Address bottom,Address top)104 static void VerifyMarking(Heap* heap, Address bottom, Address top) {
105 VerifyMarkingVisitor visitor(heap);
106 HeapObject* object;
107 Address next_object_must_be_here_or_later = bottom;
108
109 for (Address current = bottom; current < top; current += kPointerSize) {
110 object = HeapObject::FromAddress(current);
111 if (MarkCompactCollector::IsMarked(object)) {
112 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
113 CHECK(current >= next_object_must_be_here_or_later);
114 object->Iterate(&visitor);
115 next_object_must_be_here_or_later = current + object->Size();
116 // The next word for sure belongs to the current object, jump over it.
117 current += kPointerSize;
118 }
119 }
120 }
121
VerifyMarkingBlackPage(Heap * heap,Page * page)122 static void VerifyMarkingBlackPage(Heap* heap, Page* page) {
123 CHECK(page->IsFlagSet(Page::BLACK_PAGE));
124 VerifyMarkingVisitor visitor(heap);
125 HeapObjectIterator it(page);
126 for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
127 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
128 object->Iterate(&visitor);
129 }
130 }
131
VerifyMarking(NewSpace * space)132 static void VerifyMarking(NewSpace* space) {
133 Address end = space->top();
134 // The bottom position is at the start of its page. Allows us to use
135 // page->area_start() as start of range on all pages.
136 CHECK_EQ(space->bottom(), Page::FromAddress(space->bottom())->area_start());
137
138 NewSpacePageRange range(space->bottom(), end);
139 for (auto it = range.begin(); it != range.end();) {
140 Page* page = *(it++);
141 Address limit = it != range.end() ? page->area_end() : end;
142 CHECK(limit == end || !page->Contains(end));
143 VerifyMarking(space->heap(), page->area_start(), limit);
144 }
145 }
146
147
VerifyMarking(PagedSpace * space)148 static void VerifyMarking(PagedSpace* space) {
149 for (Page* p : *space) {
150 if (p->IsFlagSet(Page::BLACK_PAGE)) {
151 VerifyMarkingBlackPage(space->heap(), p);
152 } else {
153 VerifyMarking(space->heap(), p->area_start(), p->area_end());
154 }
155 }
156 }
157
158
VerifyMarking(Heap * heap)159 static void VerifyMarking(Heap* heap) {
160 VerifyMarking(heap->old_space());
161 VerifyMarking(heap->code_space());
162 VerifyMarking(heap->map_space());
163 VerifyMarking(heap->new_space());
164
165 VerifyMarkingVisitor visitor(heap);
166
167 LargeObjectIterator it(heap->lo_space());
168 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
169 if (MarkCompactCollector::IsMarked(obj)) {
170 obj->Iterate(&visitor);
171 }
172 }
173
174 heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
175 }
176
177
178 class VerifyEvacuationVisitor : public ObjectVisitor {
179 public:
VisitPointers(Object ** start,Object ** end)180 void VisitPointers(Object** start, Object** end) override {
181 for (Object** current = start; current < end; current++) {
182 if ((*current)->IsHeapObject()) {
183 HeapObject* object = HeapObject::cast(*current);
184 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
185 }
186 }
187 }
188 };
189
190
VerifyEvacuation(Page * page)191 static void VerifyEvacuation(Page* page) {
192 VerifyEvacuationVisitor visitor;
193 HeapObjectIterator iterator(page);
194 for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
195 heap_object = iterator.Next()) {
196 // We skip free space objects.
197 if (!heap_object->IsFiller()) {
198 heap_object->Iterate(&visitor);
199 }
200 }
201 }
202
203
VerifyEvacuation(NewSpace * space)204 static void VerifyEvacuation(NewSpace* space) {
205 VerifyEvacuationVisitor visitor;
206 NewSpacePageRange range(space->bottom(), space->top());
207 for (auto it = range.begin(); it != range.end();) {
208 Page* page = *(it++);
209 Address current = page->area_start();
210 Address limit = it != range.end() ? page->area_end() : space->top();
211 CHECK(limit == space->top() || !page->Contains(space->top()));
212 while (current < limit) {
213 HeapObject* object = HeapObject::FromAddress(current);
214 object->Iterate(&visitor);
215 current += object->Size();
216 }
217 }
218 }
219
220
VerifyEvacuation(Heap * heap,PagedSpace * space)221 static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
222 if (FLAG_use_allocation_folding && (space == heap->old_space())) {
223 return;
224 }
225 for (Page* p : *space) {
226 if (p->IsEvacuationCandidate()) continue;
227 VerifyEvacuation(p);
228 }
229 }
230
231
VerifyEvacuation(Heap * heap)232 static void VerifyEvacuation(Heap* heap) {
233 VerifyEvacuation(heap, heap->old_space());
234 VerifyEvacuation(heap, heap->code_space());
235 VerifyEvacuation(heap, heap->map_space());
236 VerifyEvacuation(heap->new_space());
237
238 VerifyEvacuationVisitor visitor;
239 heap->IterateStrongRoots(&visitor, VISIT_ALL);
240 }
241 #endif // VERIFY_HEAP
242
243
SetUp()244 void MarkCompactCollector::SetUp() {
245 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
246 DCHECK(strcmp(Marking::kBlackBitPattern, "11") == 0);
247 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0);
248 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
249
250 EnsureMarkingDequeIsReserved();
251 EnsureMarkingDequeIsCommitted(kMinMarkingDequeSize);
252
253 if (FLAG_flush_code) {
254 code_flusher_ = new CodeFlusher(isolate());
255 if (FLAG_trace_code_flushing) {
256 PrintF("[code-flushing is now on]\n");
257 }
258 }
259 }
260
261
TearDown()262 void MarkCompactCollector::TearDown() {
263 AbortCompaction();
264 delete marking_deque_memory_;
265 delete code_flusher_;
266 }
267
268
AddEvacuationCandidate(Page * p)269 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
270 DCHECK(!p->NeverEvacuate());
271 p->MarkEvacuationCandidate();
272 evacuation_candidates_.Add(p);
273 }
274
275
TraceFragmentation(PagedSpace * space)276 static void TraceFragmentation(PagedSpace* space) {
277 int number_of_pages = space->CountTotalPages();
278 intptr_t reserved = (number_of_pages * space->AreaSize());
279 intptr_t free = reserved - space->SizeOfObjects();
280 PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
281 AllocationSpaceName(space->identity()), number_of_pages,
282 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
283 }
284
285
StartCompaction(CompactionMode mode)286 bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
287 if (!compacting_) {
288 DCHECK(evacuation_candidates_.length() == 0);
289
290 CollectEvacuationCandidates(heap()->old_space());
291
292 if (FLAG_compact_code_space) {
293 CollectEvacuationCandidates(heap()->code_space());
294 } else if (FLAG_trace_fragmentation) {
295 TraceFragmentation(heap()->code_space());
296 }
297
298 if (FLAG_trace_fragmentation) {
299 TraceFragmentation(heap()->map_space());
300 }
301
302 heap()->old_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
303 heap()->code_space()->EvictEvacuationCandidatesFromLinearAllocationArea();
304
305 compacting_ = evacuation_candidates_.length() > 0;
306 }
307
308 return compacting_;
309 }
310
ClearInvalidRememberedSetSlots()311 void MarkCompactCollector::ClearInvalidRememberedSetSlots() {
312 {
313 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
314 RememberedSet<OLD_TO_NEW>::ClearInvalidSlots(heap());
315 }
316 // There is not need to filter the old to old set because
317 // it is completely cleared after the mark-compact GC.
318 // The slots that become invalid due to runtime transitions are
319 // cleared eagerly immediately after the transition.
320
321 #ifdef VERIFY_HEAP
322 if (FLAG_verify_heap) {
323 RememberedSet<OLD_TO_NEW>::VerifyValidSlots(heap());
324 RememberedSet<OLD_TO_OLD>::VerifyValidSlots(heap());
325 }
326 #endif
327 }
328
329
CollectGarbage()330 void MarkCompactCollector::CollectGarbage() {
331 // Make sure that Prepare() has been called. The individual steps below will
332 // update the state as they proceed.
333 DCHECK(state_ == PREPARE_GC);
334
335 MarkLiveObjects();
336
337 DCHECK(heap_->incremental_marking()->IsStopped());
338
339 ClearNonLiveReferences();
340
341 #ifdef VERIFY_HEAP
342 if (FLAG_verify_heap) {
343 VerifyMarking(heap_);
344 }
345 #endif
346
347 SweepSpaces();
348
349 EvacuateNewSpaceAndCandidates();
350
351 Finish();
352 }
353
354
355 #ifdef VERIFY_HEAP
VerifyMarkbitsAreClean(PagedSpace * space)356 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
357 for (Page* p : *space) {
358 CHECK(p->markbits()->IsClean());
359 CHECK_EQ(0, p->LiveBytes());
360 }
361 }
362
363
VerifyMarkbitsAreClean(NewSpace * space)364 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
365 for (Page* p : NewSpacePageRange(space->bottom(), space->top())) {
366 CHECK(p->markbits()->IsClean());
367 CHECK_EQ(0, p->LiveBytes());
368 }
369 }
370
371
VerifyMarkbitsAreClean()372 void MarkCompactCollector::VerifyMarkbitsAreClean() {
373 VerifyMarkbitsAreClean(heap_->old_space());
374 VerifyMarkbitsAreClean(heap_->code_space());
375 VerifyMarkbitsAreClean(heap_->map_space());
376 VerifyMarkbitsAreClean(heap_->new_space());
377
378 LargeObjectIterator it(heap_->lo_space());
379 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
380 MarkBit mark_bit = Marking::MarkBitFrom(obj);
381 CHECK(Marking::IsWhite(mark_bit));
382 CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
383 }
384 }
385
386
VerifyWeakEmbeddedObjectsInCode()387 void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
388 HeapObjectIterator code_iterator(heap()->code_space());
389 for (HeapObject* obj = code_iterator.Next(); obj != NULL;
390 obj = code_iterator.Next()) {
391 Code* code = Code::cast(obj);
392 if (!code->is_optimized_code()) continue;
393 if (WillBeDeoptimized(code)) continue;
394 code->VerifyEmbeddedObjectsDependency();
395 }
396 }
397
398
VerifyOmittedMapChecks()399 void MarkCompactCollector::VerifyOmittedMapChecks() {
400 HeapObjectIterator iterator(heap()->map_space());
401 for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
402 Map* map = Map::cast(obj);
403 map->VerifyOmittedMapChecks();
404 }
405 }
406 #endif // VERIFY_HEAP
407
408
ClearMarkbitsInPagedSpace(PagedSpace * space)409 static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
410 for (Page* p : *space) {
411 Bitmap::Clear(p);
412 if (p->IsFlagSet(Page::BLACK_PAGE)) {
413 p->ClearFlag(Page::BLACK_PAGE);
414 }
415 }
416 }
417
418
ClearMarkbitsInNewSpace(NewSpace * space)419 static void ClearMarkbitsInNewSpace(NewSpace* space) {
420 for (Page* page : *space) {
421 Bitmap::Clear(page);
422 }
423 }
424
425
ClearMarkbits()426 void MarkCompactCollector::ClearMarkbits() {
427 ClearMarkbitsInPagedSpace(heap_->code_space());
428 ClearMarkbitsInPagedSpace(heap_->map_space());
429 ClearMarkbitsInPagedSpace(heap_->old_space());
430 ClearMarkbitsInNewSpace(heap_->new_space());
431
432 LargeObjectIterator it(heap_->lo_space());
433 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
434 Marking::MarkWhite(Marking::MarkBitFrom(obj));
435 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
436 chunk->ResetProgressBar();
437 chunk->ResetLiveBytes();
438 if (chunk->IsFlagSet(Page::BLACK_PAGE)) {
439 chunk->ClearFlag(Page::BLACK_PAGE);
440 }
441 }
442 }
443
444 class MarkCompactCollector::Sweeper::SweeperTask : public v8::Task {
445 public:
SweeperTask(Sweeper * sweeper,base::Semaphore * pending_sweeper_tasks,AllocationSpace space_to_start)446 SweeperTask(Sweeper* sweeper, base::Semaphore* pending_sweeper_tasks,
447 AllocationSpace space_to_start)
448 : sweeper_(sweeper),
449 pending_sweeper_tasks_(pending_sweeper_tasks),
450 space_to_start_(space_to_start) {}
451
~SweeperTask()452 virtual ~SweeperTask() {}
453
454 private:
455 // v8::Task overrides.
Run()456 void Run() override {
457 DCHECK_GE(space_to_start_, FIRST_SPACE);
458 DCHECK_LE(space_to_start_, LAST_PAGED_SPACE);
459 const int offset = space_to_start_ - FIRST_SPACE;
460 const int num_spaces = LAST_PAGED_SPACE - FIRST_SPACE + 1;
461 for (int i = 0; i < num_spaces; i++) {
462 const int space_id = FIRST_SPACE + ((i + offset) % num_spaces);
463 DCHECK_GE(space_id, FIRST_SPACE);
464 DCHECK_LE(space_id, LAST_PAGED_SPACE);
465 sweeper_->ParallelSweepSpace(static_cast<AllocationSpace>(space_id), 0);
466 }
467 pending_sweeper_tasks_->Signal();
468 }
469
470 Sweeper* sweeper_;
471 base::Semaphore* pending_sweeper_tasks_;
472 AllocationSpace space_to_start_;
473
474 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
475 };
476
StartSweeping()477 void MarkCompactCollector::Sweeper::StartSweeping() {
478 sweeping_in_progress_ = true;
479 ForAllSweepingSpaces([this](AllocationSpace space) {
480 std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(),
481 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); });
482 });
483 if (FLAG_concurrent_sweeping) {
484 ForAllSweepingSpaces([this](AllocationSpace space) {
485 if (space == NEW_SPACE) return;
486 StartSweepingHelper(space);
487 });
488 }
489 }
490
StartSweepingHelper(AllocationSpace space_to_start)491 void MarkCompactCollector::Sweeper::StartSweepingHelper(
492 AllocationSpace space_to_start) {
493 num_sweeping_tasks_.Increment(1);
494 V8::GetCurrentPlatform()->CallOnBackgroundThread(
495 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start),
496 v8::Platform::kShortRunningTask);
497 }
498
SweepOrWaitUntilSweepingCompleted(Page * page)499 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted(
500 Page* page) {
501 if (!page->SweepingDone()) {
502 ParallelSweepPage(page, page->owner()->identity());
503 if (!page->SweepingDone()) {
504 // We were not able to sweep that page, i.e., a concurrent
505 // sweeper thread currently owns this page. Wait for the sweeper
506 // thread to be done with this page.
507 page->WaitUntilSweepingCompleted();
508 }
509 }
510 }
511
SweepAndRefill(CompactionSpace * space)512 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
513 if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) {
514 sweeper().ParallelSweepSpace(space->identity(), 0);
515 space->RefillFreeList();
516 }
517 }
518
GetSweptPageSafe(PagedSpace * space)519 Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) {
520 base::LockGuard<base::Mutex> guard(&mutex_);
521 SweptList& list = swept_list_[space->identity()];
522 if (list.length() > 0) {
523 return list.RemoveLast();
524 }
525 return nullptr;
526 }
527
EnsureCompleted()528 void MarkCompactCollector::Sweeper::EnsureCompleted() {
529 if (!sweeping_in_progress_) return;
530
531 // If sweeping is not completed or not running at all, we try to complete it
532 // here.
533 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
534 ForAllSweepingSpaces(
535 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
536 }
537
538 if (FLAG_concurrent_sweeping) {
539 while (num_sweeping_tasks_.Value() > 0) {
540 pending_sweeper_tasks_semaphore_.Wait();
541 num_sweeping_tasks_.Increment(-1);
542 }
543 }
544
545 ForAllSweepingSpaces([this](AllocationSpace space) {
546 if (space == NEW_SPACE) {
547 swept_list_[NEW_SPACE].Clear();
548 }
549 DCHECK(sweeping_list_[space].empty());
550 });
551 late_pages_ = false;
552 sweeping_in_progress_ = false;
553 }
554
EnsureNewSpaceCompleted()555 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() {
556 if (!sweeping_in_progress_) return;
557 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
558 for (Page* p : *heap_->new_space()) {
559 SweepOrWaitUntilSweepingCompleted(p);
560 }
561 }
562 }
563
EnsureSweepingCompleted()564 void MarkCompactCollector::EnsureSweepingCompleted() {
565 if (!sweeper().sweeping_in_progress()) return;
566
567 sweeper().EnsureCompleted();
568 heap()->old_space()->RefillFreeList();
569 heap()->code_space()->RefillFreeList();
570 heap()->map_space()->RefillFreeList();
571
572 #ifdef VERIFY_HEAP
573 if (FLAG_verify_heap && !evacuation()) {
574 VerifyEvacuation(heap_);
575 }
576 #endif
577 }
578
IsSweepingCompleted()579 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() {
580 while (pending_sweeper_tasks_semaphore_.WaitFor(
581 base::TimeDelta::FromSeconds(0))) {
582 num_sweeping_tasks_.Increment(-1);
583 }
584 return num_sweeping_tasks_.Value() == 0;
585 }
586
TransferMark(Heap * heap,Address old_start,Address new_start)587 void Marking::TransferMark(Heap* heap, Address old_start, Address new_start) {
588 // This is only used when resizing an object.
589 DCHECK(MemoryChunk::FromAddress(old_start) ==
590 MemoryChunk::FromAddress(new_start));
591
592 if (!heap->incremental_marking()->IsMarking() ||
593 Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE))
594 return;
595
596 // If the mark doesn't move, we don't check the color of the object.
597 // It doesn't matter whether the object is black, since it hasn't changed
598 // size, so the adjustment to the live data count will be zero anyway.
599 if (old_start == new_start) return;
600
601 MarkBit new_mark_bit = MarkBitFrom(new_start);
602 MarkBit old_mark_bit = MarkBitFrom(old_start);
603
604 #ifdef DEBUG
605 ObjectColor old_color = Color(old_mark_bit);
606 #endif
607
608 if (Marking::IsBlack(old_mark_bit)) {
609 Marking::BlackToWhite(old_mark_bit);
610 Marking::MarkBlack(new_mark_bit);
611 return;
612 } else if (Marking::IsGrey(old_mark_bit)) {
613 Marking::GreyToWhite(old_mark_bit);
614 heap->incremental_marking()->WhiteToGreyAndPush(
615 HeapObject::FromAddress(new_start), new_mark_bit);
616 heap->incremental_marking()->RestartIfNotMarking();
617 }
618
619 #ifdef DEBUG
620 ObjectColor new_color = Color(new_mark_bit);
621 DCHECK(new_color == old_color);
622 #endif
623 }
624
625
AllocationSpaceName(AllocationSpace space)626 const char* AllocationSpaceName(AllocationSpace space) {
627 switch (space) {
628 case NEW_SPACE:
629 return "NEW_SPACE";
630 case OLD_SPACE:
631 return "OLD_SPACE";
632 case CODE_SPACE:
633 return "CODE_SPACE";
634 case MAP_SPACE:
635 return "MAP_SPACE";
636 case LO_SPACE:
637 return "LO_SPACE";
638 default:
639 UNREACHABLE();
640 }
641
642 return NULL;
643 }
644
645
ComputeEvacuationHeuristics(int area_size,int * target_fragmentation_percent,int * max_evacuated_bytes)646 void MarkCompactCollector::ComputeEvacuationHeuristics(
647 int area_size, int* target_fragmentation_percent,
648 int* max_evacuated_bytes) {
649 // For memory reducing mode we directly define both constants.
650 const int kTargetFragmentationPercentForReduceMemory = 20;
651 const int kMaxEvacuatedBytesForReduceMemory = 12 * Page::kPageSize;
652
653 // For regular mode (which is latency critical) we define less aggressive
654 // defaults to start and switch to a trace-based (using compaction speed)
655 // approach as soon as we have enough samples.
656 const int kTargetFragmentationPercent = 70;
657 const int kMaxEvacuatedBytes = 4 * Page::kPageSize;
658 // Time to take for a single area (=payload of page). Used as soon as there
659 // exist enough compaction speed samples.
660 const int kTargetMsPerArea = 1;
661
662 if (heap()->ShouldReduceMemory()) {
663 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
664 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
665 } else {
666 const double estimated_compaction_speed =
667 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
668 if (estimated_compaction_speed != 0) {
669 // Estimate the target fragmentation based on traced compaction speed
670 // and a goal for a single page.
671 const double estimated_ms_per_area =
672 1 + area_size / estimated_compaction_speed;
673 *target_fragmentation_percent = static_cast<int>(
674 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
675 if (*target_fragmentation_percent <
676 kTargetFragmentationPercentForReduceMemory) {
677 *target_fragmentation_percent =
678 kTargetFragmentationPercentForReduceMemory;
679 }
680 } else {
681 *target_fragmentation_percent = kTargetFragmentationPercent;
682 }
683 *max_evacuated_bytes = kMaxEvacuatedBytes;
684 }
685 }
686
687
CollectEvacuationCandidates(PagedSpace * space)688 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
689 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
690
691 int number_of_pages = space->CountTotalPages();
692 int area_size = space->AreaSize();
693
694 // Pairs of (live_bytes_in_page, page).
695 typedef std::pair<int, Page*> LiveBytesPagePair;
696 std::vector<LiveBytesPagePair> pages;
697 pages.reserve(number_of_pages);
698
699 for (Page* p : *space) {
700 if (p->NeverEvacuate()) continue;
701 if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
702 // Invariant: Evacuation candidates are just created when marking is
703 // started. This means that sweeping has finished. Furthermore, at the end
704 // of a GC all evacuation candidates are cleared and their slot buffers are
705 // released.
706 CHECK(!p->IsEvacuationCandidate());
707 CHECK_NULL(p->old_to_old_slots());
708 CHECK_NULL(p->typed_old_to_old_slots());
709 CHECK(p->SweepingDone());
710 DCHECK(p->area_size() == area_size);
711 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
712 }
713
714 int candidate_count = 0;
715 int total_live_bytes = 0;
716
717 const bool reduce_memory = heap()->ShouldReduceMemory();
718 if (FLAG_manual_evacuation_candidates_selection) {
719 for (size_t i = 0; i < pages.size(); i++) {
720 Page* p = pages[i].second;
721 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
722 candidate_count++;
723 total_live_bytes += pages[i].first;
724 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
725 AddEvacuationCandidate(p);
726 }
727 }
728 } else if (FLAG_stress_compaction) {
729 for (size_t i = 0; i < pages.size(); i++) {
730 Page* p = pages[i].second;
731 if (i % 2 == 0) {
732 candidate_count++;
733 total_live_bytes += pages[i].first;
734 AddEvacuationCandidate(p);
735 }
736 }
737 } else {
738 // The following approach determines the pages that should be evacuated.
739 //
740 // We use two conditions to decide whether a page qualifies as an evacuation
741 // candidate, or not:
742 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
743 // between live bytes and capacity of this page (= area).
744 // * Evacuation quota: A global quota determining how much bytes should be
745 // compacted.
746 //
747 // The algorithm sorts all pages by live bytes and then iterates through
748 // them starting with the page with the most free memory, adding them to the
749 // set of evacuation candidates as long as both conditions (fragmentation
750 // and quota) hold.
751 int max_evacuated_bytes;
752 int target_fragmentation_percent;
753 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
754 &max_evacuated_bytes);
755
756 const intptr_t free_bytes_threshold =
757 target_fragmentation_percent * (area_size / 100);
758
759 // Sort pages from the most free to the least free, then select
760 // the first n pages for evacuation such that:
761 // - the total size of evacuated objects does not exceed the specified
762 // limit.
763 // - fragmentation of (n+1)-th page does not exceed the specified limit.
764 std::sort(pages.begin(), pages.end(),
765 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
766 return a.first < b.first;
767 });
768 for (size_t i = 0; i < pages.size(); i++) {
769 int live_bytes = pages[i].first;
770 int free_bytes = area_size - live_bytes;
771 if (FLAG_always_compact ||
772 ((free_bytes >= free_bytes_threshold) &&
773 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
774 candidate_count++;
775 total_live_bytes += live_bytes;
776 }
777 if (FLAG_trace_fragmentation_verbose) {
778 PrintIsolate(isolate(),
779 "compaction-selection-page: space=%s free_bytes_page=%d "
780 "fragmentation_limit_kb=%" V8PRIdPTR
781 " fragmentation_limit_percent=%d sum_compaction_kb=%d "
782 "compaction_limit_kb=%d\n",
783 AllocationSpaceName(space->identity()), free_bytes / KB,
784 free_bytes_threshold / KB, target_fragmentation_percent,
785 total_live_bytes / KB, max_evacuated_bytes / KB);
786 }
787 }
788 // How many pages we will allocated for the evacuated objects
789 // in the worst case: ceil(total_live_bytes / area_size)
790 int estimated_new_pages = (total_live_bytes + area_size - 1) / area_size;
791 DCHECK_LE(estimated_new_pages, candidate_count);
792 int estimated_released_pages = candidate_count - estimated_new_pages;
793 // Avoid (compact -> expand) cycles.
794 if ((estimated_released_pages == 0) && !FLAG_always_compact) {
795 candidate_count = 0;
796 }
797 for (int i = 0; i < candidate_count; i++) {
798 AddEvacuationCandidate(pages[i].second);
799 }
800 }
801
802 if (FLAG_trace_fragmentation) {
803 PrintIsolate(isolate(),
804 "compaction-selection: space=%s reduce_memory=%d pages=%d "
805 "total_live_bytes=%d\n",
806 AllocationSpaceName(space->identity()), reduce_memory,
807 candidate_count, total_live_bytes / KB);
808 }
809 }
810
811
AbortCompaction()812 void MarkCompactCollector::AbortCompaction() {
813 if (compacting_) {
814 RememberedSet<OLD_TO_OLD>::ClearAll(heap());
815 for (Page* p : evacuation_candidates_) {
816 p->ClearEvacuationCandidate();
817 }
818 compacting_ = false;
819 evacuation_candidates_.Rewind(0);
820 }
821 DCHECK_EQ(0, evacuation_candidates_.length());
822 }
823
824
Prepare()825 void MarkCompactCollector::Prepare() {
826 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
827
828 #ifdef DEBUG
829 DCHECK(state_ == IDLE);
830 state_ = PREPARE_GC;
831 #endif
832
833 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
834
835 if (sweeping_in_progress()) {
836 // Instead of waiting we could also abort the sweeper threads here.
837 EnsureSweepingCompleted();
838 }
839
840 // If concurrent unmapping tasks are still running, we should wait for
841 // them here.
842 heap()->memory_allocator()->unmapper()->WaitUntilCompleted();
843
844 // Clear marking bits if incremental marking is aborted.
845 if (was_marked_incrementally_ && heap_->ShouldAbortIncrementalMarking()) {
846 heap()->incremental_marking()->Stop();
847 ClearMarkbits();
848 AbortWeakCollections();
849 AbortWeakCells();
850 AbortTransitionArrays();
851 AbortCompaction();
852 if (heap_->UsingEmbedderHeapTracer()) {
853 heap_->mark_compact_collector()->embedder_heap_tracer()->AbortTracing();
854 }
855 was_marked_incrementally_ = false;
856 }
857
858 if (!was_marked_incrementally_) {
859 if (heap_->UsingEmbedderHeapTracer()) {
860 heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
861 }
862 }
863
864 if (UsingEmbedderHeapTracer()) {
865 embedder_heap_tracer()->EnterFinalPause();
866 }
867
868 // Don't start compaction if we are in the middle of incremental
869 // marking cycle. We did not collect any slots.
870 if (!FLAG_never_compact && !was_marked_incrementally_) {
871 StartCompaction(NON_INCREMENTAL_COMPACTION);
872 }
873
874 PagedSpaces spaces(heap());
875 for (PagedSpace* space = spaces.next(); space != NULL;
876 space = spaces.next()) {
877 space->PrepareForMarkCompact();
878 }
879 heap()->account_external_memory_concurrently_freed();
880
881 #ifdef VERIFY_HEAP
882 if (!was_marked_incrementally_ && FLAG_verify_heap) {
883 VerifyMarkbitsAreClean();
884 }
885 #endif
886 }
887
888
Finish()889 void MarkCompactCollector::Finish() {
890 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
891
892 if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) {
893 // If we added some more pages during MC, we need to start at least one
894 // more task as all other tasks might already be finished.
895 sweeper().StartSweepingHelper(OLD_SPACE);
896 }
897
898 // The hashing of weak_object_to_code_table is no longer valid.
899 heap()->weak_object_to_code_table()->Rehash(
900 heap()->isolate()->factory()->undefined_value());
901
902 // Clear the marking state of live large objects.
903 heap_->lo_space()->ClearMarkingStateOfLiveObjects();
904
905 #ifdef DEBUG
906 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
907 state_ = IDLE;
908 #endif
909 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
910
911 // The stub cache is not traversed during GC; clear the cache to
912 // force lazy re-initialization of it. This must be done after the
913 // GC, because it relies on the new address of certain old space
914 // objects (empty string, illegal builtin).
915 isolate()->stub_cache()->Clear();
916
917 if (have_code_to_deoptimize_) {
918 // Some code objects were marked for deoptimization during the GC.
919 Deoptimizer::DeoptimizeMarkedCode(isolate());
920 have_code_to_deoptimize_ = false;
921 }
922
923 heap_->incremental_marking()->ClearIdleMarkingDelayCounter();
924
925 if (marking_parity_ == EVEN_MARKING_PARITY) {
926 marking_parity_ = ODD_MARKING_PARITY;
927 } else {
928 DCHECK(marking_parity_ == ODD_MARKING_PARITY);
929 marking_parity_ = EVEN_MARKING_PARITY;
930 }
931 }
932
933
934 // -------------------------------------------------------------------------
935 // Phase 1: tracing and marking live objects.
936 // before: all objects are in normal state.
937 // after: a live object's map pointer is marked as '00'.
938
939 // Marking all live objects in the heap as part of mark-sweep or mark-compact
940 // collection. Before marking, all objects are in their normal state. After
941 // marking, live objects' map pointers are marked indicating that the object
942 // has been found reachable.
943 //
944 // The marking algorithm is a (mostly) depth-first (because of possible stack
945 // overflow) traversal of the graph of objects reachable from the roots. It
946 // uses an explicit stack of pointers rather than recursion. The young
947 // generation's inactive ('from') space is used as a marking stack. The
948 // objects in the marking stack are the ones that have been reached and marked
949 // but their children have not yet been visited.
950 //
951 // The marking stack can overflow during traversal. In that case, we set an
952 // overflow flag. When the overflow flag is set, we continue marking objects
953 // reachable from the objects on the marking stack, but no longer push them on
954 // the marking stack. Instead, we mark them as both marked and overflowed.
955 // When the stack is in the overflowed state, objects marked as overflowed
956 // have been reached and marked but their children have not been visited yet.
957 // After emptying the marking stack, we clear the overflow flag and traverse
958 // the heap looking for objects marked as overflowed, push them on the stack,
959 // and continue with marking. This process repeats until all reachable
960 // objects have been marked.
961
ProcessJSFunctionCandidates()962 void CodeFlusher::ProcessJSFunctionCandidates() {
963 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
964 Object* undefined = isolate_->heap()->undefined_value();
965
966 JSFunction* candidate = jsfunction_candidates_head_;
967 JSFunction* next_candidate;
968 while (candidate != NULL) {
969 next_candidate = GetNextCandidate(candidate);
970 ClearNextCandidate(candidate, undefined);
971
972 SharedFunctionInfo* shared = candidate->shared();
973
974 Code* code = shared->code();
975 MarkBit code_mark = Marking::MarkBitFrom(code);
976 if (Marking::IsWhite(code_mark)) {
977 if (FLAG_trace_code_flushing && shared->is_compiled()) {
978 PrintF("[code-flushing clears: ");
979 shared->ShortPrint();
980 PrintF(" - age: %d]\n", code->GetAge());
981 }
982 // Always flush the optimized code map if there is one.
983 if (!shared->OptimizedCodeMapIsCleared()) {
984 shared->ClearOptimizedCodeMap();
985 }
986 shared->set_code(lazy_compile);
987 candidate->set_code(lazy_compile);
988 } else {
989 DCHECK(Marking::IsBlack(code_mark));
990 candidate->set_code(code);
991 }
992
993 // We are in the middle of a GC cycle so the write barrier in the code
994 // setter did not record the slot update and we have to do that manually.
995 Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
996 Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
997 isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(
998 candidate, slot, target);
999
1000 Object** shared_code_slot =
1001 HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
1002 isolate_->heap()->mark_compact_collector()->RecordSlot(
1003 shared, shared_code_slot, *shared_code_slot);
1004
1005 candidate = next_candidate;
1006 }
1007
1008 jsfunction_candidates_head_ = NULL;
1009 }
1010
1011
ProcessSharedFunctionInfoCandidates()1012 void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
1013 Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
1014
1015 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1016 SharedFunctionInfo* next_candidate;
1017 while (candidate != NULL) {
1018 next_candidate = GetNextCandidate(candidate);
1019 ClearNextCandidate(candidate);
1020
1021 Code* code = candidate->code();
1022 MarkBit code_mark = Marking::MarkBitFrom(code);
1023 if (Marking::IsWhite(code_mark)) {
1024 if (FLAG_trace_code_flushing && candidate->is_compiled()) {
1025 PrintF("[code-flushing clears: ");
1026 candidate->ShortPrint();
1027 PrintF(" - age: %d]\n", code->GetAge());
1028 }
1029 // Always flush the optimized code map if there is one.
1030 if (!candidate->OptimizedCodeMapIsCleared()) {
1031 candidate->ClearOptimizedCodeMap();
1032 }
1033 candidate->set_code(lazy_compile);
1034 }
1035
1036 Object** code_slot =
1037 HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
1038 isolate_->heap()->mark_compact_collector()->RecordSlot(candidate, code_slot,
1039 *code_slot);
1040
1041 candidate = next_candidate;
1042 }
1043
1044 shared_function_info_candidates_head_ = NULL;
1045 }
1046
1047
EvictCandidate(SharedFunctionInfo * shared_info)1048 void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
1049 // Make sure previous flushing decisions are revisited.
1050 isolate_->heap()->incremental_marking()->IterateBlackObject(shared_info);
1051
1052 if (FLAG_trace_code_flushing) {
1053 PrintF("[code-flushing abandons function-info: ");
1054 shared_info->ShortPrint();
1055 PrintF("]\n");
1056 }
1057
1058 SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
1059 SharedFunctionInfo* next_candidate;
1060 if (candidate == shared_info) {
1061 next_candidate = GetNextCandidate(shared_info);
1062 shared_function_info_candidates_head_ = next_candidate;
1063 ClearNextCandidate(shared_info);
1064 } else {
1065 while (candidate != NULL) {
1066 next_candidate = GetNextCandidate(candidate);
1067
1068 if (next_candidate == shared_info) {
1069 next_candidate = GetNextCandidate(shared_info);
1070 SetNextCandidate(candidate, next_candidate);
1071 ClearNextCandidate(shared_info);
1072 break;
1073 }
1074
1075 candidate = next_candidate;
1076 }
1077 }
1078 }
1079
1080
EvictCandidate(JSFunction * function)1081 void CodeFlusher::EvictCandidate(JSFunction* function) {
1082 DCHECK(!function->next_function_link()->IsUndefined(isolate_));
1083 Object* undefined = isolate_->heap()->undefined_value();
1084
1085 // Make sure previous flushing decisions are revisited.
1086 isolate_->heap()->incremental_marking()->IterateBlackObject(function);
1087 isolate_->heap()->incremental_marking()->IterateBlackObject(
1088 function->shared());
1089
1090 if (FLAG_trace_code_flushing) {
1091 PrintF("[code-flushing abandons closure: ");
1092 function->shared()->ShortPrint();
1093 PrintF("]\n");
1094 }
1095
1096 JSFunction* candidate = jsfunction_candidates_head_;
1097 JSFunction* next_candidate;
1098 if (candidate == function) {
1099 next_candidate = GetNextCandidate(function);
1100 jsfunction_candidates_head_ = next_candidate;
1101 ClearNextCandidate(function, undefined);
1102 } else {
1103 while (candidate != NULL) {
1104 next_candidate = GetNextCandidate(candidate);
1105
1106 if (next_candidate == function) {
1107 next_candidate = GetNextCandidate(function);
1108 SetNextCandidate(candidate, next_candidate);
1109 ClearNextCandidate(function, undefined);
1110 break;
1111 }
1112
1113 candidate = next_candidate;
1114 }
1115 }
1116 }
1117
1118
IteratePointersToFromSpace(ObjectVisitor * v)1119 void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
1120 Heap* heap = isolate_->heap();
1121
1122 JSFunction** slot = &jsfunction_candidates_head_;
1123 JSFunction* candidate = jsfunction_candidates_head_;
1124 while (candidate != NULL) {
1125 if (heap->InFromSpace(candidate)) {
1126 v->VisitPointer(reinterpret_cast<Object**>(slot));
1127 }
1128 candidate = GetNextCandidate(*slot);
1129 slot = GetNextCandidateSlot(*slot);
1130 }
1131 }
1132
1133
1134 class MarkCompactMarkingVisitor
1135 : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
1136 public:
1137 static void Initialize();
1138
INLINE(static void VisitPointer (Heap * heap,HeapObject * object,Object ** p))1139 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
1140 MarkObjectByPointer(heap->mark_compact_collector(), object, p);
1141 }
1142
INLINE(static void VisitPointers (Heap * heap,HeapObject * object,Object ** start,Object ** end))1143 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
1144 Object** start, Object** end)) {
1145 // Mark all objects pointed to in [start, end).
1146 const int kMinRangeForMarkingRecursion = 64;
1147 if (end - start >= kMinRangeForMarkingRecursion) {
1148 if (VisitUnmarkedObjects(heap, object, start, end)) return;
1149 // We are close to a stack overflow, so just mark the objects.
1150 }
1151 MarkCompactCollector* collector = heap->mark_compact_collector();
1152 for (Object** p = start; p < end; p++) {
1153 MarkObjectByPointer(collector, object, p);
1154 }
1155 }
1156
1157 // Marks the object black and pushes it on the marking stack.
INLINE(static void MarkObject (Heap * heap,HeapObject * object))1158 INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
1159 MarkBit mark = Marking::MarkBitFrom(object);
1160 heap->mark_compact_collector()->MarkObject(object, mark);
1161 }
1162
1163 // Marks the object black without pushing it on the marking stack.
1164 // Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush (Heap * heap,HeapObject * object))1165 INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
1166 MarkBit mark_bit = Marking::MarkBitFrom(object);
1167 if (Marking::IsWhite(mark_bit)) {
1168 heap->mark_compact_collector()->SetMark(object, mark_bit);
1169 return true;
1170 }
1171 return false;
1172 }
1173
1174 // Mark object pointed to by p.
INLINE(static void MarkObjectByPointer (MarkCompactCollector * collector,HeapObject * object,Object ** p))1175 INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
1176 HeapObject* object, Object** p)) {
1177 if (!(*p)->IsHeapObject()) return;
1178 HeapObject* target_object = HeapObject::cast(*p);
1179 collector->RecordSlot(object, p, target_object);
1180 MarkBit mark = Marking::MarkBitFrom(target_object);
1181 collector->MarkObject(target_object, mark);
1182 }
1183
1184
1185 // Visit an unmarked object.
INLINE(static void VisitUnmarkedObject (MarkCompactCollector * collector,HeapObject * obj))1186 INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
1187 HeapObject* obj)) {
1188 #ifdef DEBUG
1189 DCHECK(collector->heap()->Contains(obj));
1190 DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
1191 #endif
1192 Map* map = obj->map();
1193 Heap* heap = obj->GetHeap();
1194 MarkBit mark = Marking::MarkBitFrom(obj);
1195 heap->mark_compact_collector()->SetMark(obj, mark);
1196 // Mark the map pointer and the body.
1197 MarkBit map_mark = Marking::MarkBitFrom(map);
1198 heap->mark_compact_collector()->MarkObject(map, map_mark);
1199 IterateBody(map, obj);
1200 }
1201
1202 // Visit all unmarked objects pointed to by [start, end).
1203 // Returns false if the operation fails (lack of stack space).
INLINE(static bool VisitUnmarkedObjects (Heap * heap,HeapObject * object,Object ** start,Object ** end))1204 INLINE(static bool VisitUnmarkedObjects(Heap* heap, HeapObject* object,
1205 Object** start, Object** end)) {
1206 // Return false is we are close to the stack limit.
1207 StackLimitCheck check(heap->isolate());
1208 if (check.HasOverflowed()) return false;
1209
1210 MarkCompactCollector* collector = heap->mark_compact_collector();
1211 // Visit the unmarked objects.
1212 for (Object** p = start; p < end; p++) {
1213 Object* o = *p;
1214 if (!o->IsHeapObject()) continue;
1215 collector->RecordSlot(object, p, o);
1216 HeapObject* obj = HeapObject::cast(o);
1217 MarkBit mark = Marking::MarkBitFrom(obj);
1218 if (Marking::IsBlackOrGrey(mark)) continue;
1219 VisitUnmarkedObject(collector, obj);
1220 }
1221 return true;
1222 }
1223
1224 private:
1225 // Code flushing support.
1226
1227 static const int kRegExpCodeThreshold = 5;
1228
UpdateRegExpCodeAgeAndFlush(Heap * heap,JSRegExp * re,bool is_one_byte)1229 static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
1230 bool is_one_byte) {
1231 // Make sure that the fixed array is in fact initialized on the RegExp.
1232 // We could potentially trigger a GC when initializing the RegExp.
1233 if (HeapObject::cast(re->data())->map()->instance_type() !=
1234 FIXED_ARRAY_TYPE)
1235 return;
1236
1237 // Make sure this is a RegExp that actually contains code.
1238 if (re->TypeTag() != JSRegExp::IRREGEXP) return;
1239
1240 Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
1241 if (!code->IsSmi() &&
1242 HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
1243 // Save a copy that can be reinstated if we need the code again.
1244 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
1245
1246 // Saving a copy might create a pointer into compaction candidate
1247 // that was not observed by marker. This might happen if JSRegExp data
1248 // was marked through the compilation cache before marker reached JSRegExp
1249 // object.
1250 FixedArray* data = FixedArray::cast(re->data());
1251 if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(data))) {
1252 Object** slot =
1253 data->data_start() + JSRegExp::saved_code_index(is_one_byte);
1254 heap->mark_compact_collector()->RecordSlot(data, slot, code);
1255 }
1256
1257 // Set a number in the 0-255 range to guarantee no smi overflow.
1258 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1259 Smi::FromInt(heap->ms_count() & 0xff));
1260 } else if (code->IsSmi()) {
1261 int value = Smi::cast(code)->value();
1262 // The regexp has not been compiled yet or there was a compilation error.
1263 if (value == JSRegExp::kUninitializedValue ||
1264 value == JSRegExp::kCompilationErrorValue) {
1265 return;
1266 }
1267
1268 // Check if we should flush now.
1269 if (value == ((heap->ms_count() - kRegExpCodeThreshold) & 0xff)) {
1270 re->SetDataAt(JSRegExp::code_index(is_one_byte),
1271 Smi::FromInt(JSRegExp::kUninitializedValue));
1272 re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
1273 Smi::FromInt(JSRegExp::kUninitializedValue));
1274 }
1275 }
1276 }
1277
1278
1279 // Works by setting the current sweep_generation (as a smi) in the
1280 // code object place in the data array of the RegExp and keeps a copy
1281 // around that can be reinstated if we reuse the RegExp before flushing.
1282 // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
1283 // we flush the code.
VisitRegExpAndFlushCode(Map * map,HeapObject * object)1284 static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
1285 Heap* heap = map->GetHeap();
1286 MarkCompactCollector* collector = heap->mark_compact_collector();
1287 if (!collector->is_code_flushing_enabled()) {
1288 VisitJSRegExp(map, object);
1289 return;
1290 }
1291 JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
1292 // Flush code or set age on both one byte and two byte code.
1293 UpdateRegExpCodeAgeAndFlush(heap, re, true);
1294 UpdateRegExpCodeAgeAndFlush(heap, re, false);
1295 // Visit the fields of the RegExp, including the updated FixedArray.
1296 VisitJSRegExp(map, object);
1297 }
1298 };
1299
1300
Initialize()1301 void MarkCompactMarkingVisitor::Initialize() {
1302 StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
1303
1304 table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
1305
1306 if (FLAG_track_gc_object_stats) {
1307 MarkCompactObjectStatsVisitor::Initialize(&table_);
1308 }
1309 }
1310
1311
1312 class CodeMarkingVisitor : public ThreadVisitor {
1313 public:
CodeMarkingVisitor(MarkCompactCollector * collector)1314 explicit CodeMarkingVisitor(MarkCompactCollector* collector)
1315 : collector_(collector) {}
1316
VisitThread(Isolate * isolate,ThreadLocalTop * top)1317 void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
1318 collector_->PrepareThreadForCodeFlushing(isolate, top);
1319 }
1320
1321 private:
1322 MarkCompactCollector* collector_;
1323 };
1324
1325
1326 class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
1327 public:
SharedFunctionInfoMarkingVisitor(MarkCompactCollector * collector)1328 explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
1329 : collector_(collector) {}
1330
VisitPointers(Object ** start,Object ** end)1331 void VisitPointers(Object** start, Object** end) override {
1332 for (Object** p = start; p < end; p++) VisitPointer(p);
1333 }
1334
VisitPointer(Object ** slot)1335 void VisitPointer(Object** slot) override {
1336 Object* obj = *slot;
1337 if (obj->IsSharedFunctionInfo()) {
1338 SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
1339 MarkBit shared_mark = Marking::MarkBitFrom(shared);
1340 MarkBit code_mark = Marking::MarkBitFrom(shared->code());
1341 collector_->MarkObject(shared->code(), code_mark);
1342 collector_->MarkObject(shared, shared_mark);
1343 }
1344 }
1345
1346 private:
1347 MarkCompactCollector* collector_;
1348 };
1349
1350
PrepareThreadForCodeFlushing(Isolate * isolate,ThreadLocalTop * top)1351 void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
1352 ThreadLocalTop* top) {
1353 for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
1354 // Note: for the frame that has a pending lazy deoptimization
1355 // StackFrame::unchecked_code will return a non-optimized code object for
1356 // the outermost function and StackFrame::LookupCode will return
1357 // actual optimized code object.
1358 StackFrame* frame = it.frame();
1359 Code* code = frame->unchecked_code();
1360 MarkBit code_mark = Marking::MarkBitFrom(code);
1361 MarkObject(code, code_mark);
1362 if (frame->is_optimized()) {
1363 Code* optimized_code = frame->LookupCode();
1364 MarkBit optimized_code_mark = Marking::MarkBitFrom(optimized_code);
1365 MarkObject(optimized_code, optimized_code_mark);
1366 }
1367 }
1368 }
1369
1370
PrepareForCodeFlushing()1371 void MarkCompactCollector::PrepareForCodeFlushing() {
1372 // If code flushing is disabled, there is no need to prepare for it.
1373 if (!is_code_flushing_enabled()) return;
1374
1375 // Make sure we are not referencing the code from the stack.
1376 DCHECK(this == heap()->mark_compact_collector());
1377 PrepareThreadForCodeFlushing(heap()->isolate(),
1378 heap()->isolate()->thread_local_top());
1379
1380 // Iterate the archived stacks in all threads to check if
1381 // the code is referenced.
1382 CodeMarkingVisitor code_marking_visitor(this);
1383 heap()->isolate()->thread_manager()->IterateArchivedThreads(
1384 &code_marking_visitor);
1385
1386 SharedFunctionInfoMarkingVisitor visitor(this);
1387 heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
1388 heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
1389
1390 ProcessMarkingDeque();
1391 }
1392
1393
1394 // Visitor class for marking heap roots.
1395 class RootMarkingVisitor : public ObjectVisitor {
1396 public:
RootMarkingVisitor(Heap * heap)1397 explicit RootMarkingVisitor(Heap* heap)
1398 : collector_(heap->mark_compact_collector()) {}
1399
VisitPointer(Object ** p)1400 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
1401
VisitPointers(Object ** start,Object ** end)1402 void VisitPointers(Object** start, Object** end) override {
1403 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
1404 }
1405
1406 // Skip the weak next code link in a code object, which is visited in
1407 // ProcessTopOptimizedFrame.
VisitNextCodeLink(Object ** p)1408 void VisitNextCodeLink(Object** p) override {}
1409
1410 private:
MarkObjectByPointer(Object ** p)1411 void MarkObjectByPointer(Object** p) {
1412 if (!(*p)->IsHeapObject()) return;
1413
1414 HeapObject* object = HeapObject::cast(*p);
1415
1416 if (collector_->heap()->PurgeLeftTrimmedObject(p)) return;
1417
1418 MarkBit mark_bit = Marking::MarkBitFrom(object);
1419 if (Marking::IsBlackOrGrey(mark_bit)) return;
1420
1421 Map* map = object->map();
1422 // Mark the object.
1423 collector_->SetMark(object, mark_bit);
1424
1425 // Mark the map pointer and body, and push them on the marking stack.
1426 MarkBit map_mark = Marking::MarkBitFrom(map);
1427 collector_->MarkObject(map, map_mark);
1428 MarkCompactMarkingVisitor::IterateBody(map, object);
1429
1430 // Mark all the objects reachable from the map and body. May leave
1431 // overflowed objects in the heap.
1432 collector_->EmptyMarkingDeque();
1433 }
1434
1435 MarkCompactCollector* collector_;
1436 };
1437
1438
1439 // Helper class for pruning the string table.
1440 template <bool finalize_external_strings, bool record_slots>
1441 class StringTableCleaner : public ObjectVisitor {
1442 public:
StringTableCleaner(Heap * heap,HeapObject * table)1443 StringTableCleaner(Heap* heap, HeapObject* table)
1444 : heap_(heap), pointers_removed_(0), table_(table) {
1445 DCHECK(!record_slots || table != nullptr);
1446 }
1447
VisitPointers(Object ** start,Object ** end)1448 void VisitPointers(Object** start, Object** end) override {
1449 // Visit all HeapObject pointers in [start, end).
1450 MarkCompactCollector* collector = heap_->mark_compact_collector();
1451 for (Object** p = start; p < end; p++) {
1452 Object* o = *p;
1453 if (o->IsHeapObject()) {
1454 if (Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(o)))) {
1455 if (finalize_external_strings) {
1456 DCHECK(o->IsExternalString());
1457 heap_->FinalizeExternalString(String::cast(*p));
1458 } else {
1459 pointers_removed_++;
1460 }
1461 // Set the entry to the_hole_value (as deleted).
1462 *p = heap_->the_hole_value();
1463 } else if (record_slots) {
1464 // StringTable contains only old space strings.
1465 DCHECK(!heap_->InNewSpace(o));
1466 collector->RecordSlot(table_, p, o);
1467 }
1468 }
1469 }
1470 }
1471
PointersRemoved()1472 int PointersRemoved() {
1473 DCHECK(!finalize_external_strings);
1474 return pointers_removed_;
1475 }
1476
1477 private:
1478 Heap* heap_;
1479 int pointers_removed_;
1480 HeapObject* table_;
1481 };
1482
1483 typedef StringTableCleaner<false, true> InternalizedStringTableCleaner;
1484 typedef StringTableCleaner<true, false> ExternalStringTableCleaner;
1485
1486 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1487 // are retained.
1488 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1489 public:
RetainAs(Object * object)1490 virtual Object* RetainAs(Object* object) {
1491 MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(object));
1492 DCHECK(!Marking::IsGrey(mark_bit));
1493 if (Marking::IsBlack(mark_bit)) {
1494 return object;
1495 } else if (object->IsAllocationSite() &&
1496 !(AllocationSite::cast(object)->IsZombie())) {
1497 // "dead" AllocationSites need to live long enough for a traversal of new
1498 // space. These sites get a one-time reprieve.
1499 AllocationSite* site = AllocationSite::cast(object);
1500 site->MarkZombie();
1501 site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
1502 return object;
1503 } else {
1504 return NULL;
1505 }
1506 }
1507 };
1508
1509
1510 // Fill the marking stack with overflowed objects returned by the given
1511 // iterator. Stop when the marking stack is filled or the end of the space
1512 // is reached, whichever comes first.
1513 template <class T>
DiscoverGreyObjectsWithIterator(T * it)1514 void MarkCompactCollector::DiscoverGreyObjectsWithIterator(T* it) {
1515 // The caller should ensure that the marking stack is initially not full,
1516 // so that we don't waste effort pointlessly scanning for objects.
1517 DCHECK(!marking_deque()->IsFull());
1518
1519 Map* filler_map = heap()->one_pointer_filler_map();
1520 for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
1521 MarkBit markbit = Marking::MarkBitFrom(object);
1522 if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
1523 Marking::GreyToBlack(markbit);
1524 PushBlack(object);
1525 if (marking_deque()->IsFull()) return;
1526 }
1527 }
1528 }
1529
DiscoverGreyObjectsOnPage(MemoryChunk * p)1530 void MarkCompactCollector::DiscoverGreyObjectsOnPage(MemoryChunk* p) {
1531 DCHECK(!marking_deque()->IsFull());
1532 LiveObjectIterator<kGreyObjects> it(p);
1533 HeapObject* object = NULL;
1534 while ((object = it.Next()) != NULL) {
1535 MarkBit markbit = Marking::MarkBitFrom(object);
1536 DCHECK(Marking::IsGrey(markbit));
1537 Marking::GreyToBlack(markbit);
1538 PushBlack(object);
1539 if (marking_deque()->IsFull()) return;
1540 }
1541 }
1542
1543 class RecordMigratedSlotVisitor final : public ObjectVisitor {
1544 public:
RecordMigratedSlotVisitor(MarkCompactCollector * collector)1545 explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
1546 : collector_(collector) {}
1547
VisitPointer(Object ** p)1548 inline void VisitPointer(Object** p) final {
1549 RecordMigratedSlot(*p, reinterpret_cast<Address>(p));
1550 }
1551
VisitPointers(Object ** start,Object ** end)1552 inline void VisitPointers(Object** start, Object** end) final {
1553 while (start < end) {
1554 RecordMigratedSlot(*start, reinterpret_cast<Address>(start));
1555 ++start;
1556 }
1557 }
1558
VisitCodeEntry(Address code_entry_slot)1559 inline void VisitCodeEntry(Address code_entry_slot) final {
1560 Address code_entry = Memory::Address_at(code_entry_slot);
1561 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
1562 RememberedSet<OLD_TO_OLD>::InsertTyped(Page::FromAddress(code_entry_slot),
1563 nullptr, CODE_ENTRY_SLOT,
1564 code_entry_slot);
1565 }
1566 }
1567
VisitCodeTarget(RelocInfo * rinfo)1568 inline void VisitCodeTarget(RelocInfo* rinfo) final {
1569 DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
1570 Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1571 Code* host = rinfo->host();
1572 collector_->RecordRelocSlot(host, rinfo, target);
1573 }
1574
VisitDebugTarget(RelocInfo * rinfo)1575 inline void VisitDebugTarget(RelocInfo* rinfo) final {
1576 DCHECK(RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
1577 rinfo->IsPatchedDebugBreakSlotSequence());
1578 Code* target = Code::GetCodeFromTargetAddress(rinfo->debug_call_address());
1579 Code* host = rinfo->host();
1580 collector_->RecordRelocSlot(host, rinfo, target);
1581 }
1582
VisitEmbeddedPointer(RelocInfo * rinfo)1583 inline void VisitEmbeddedPointer(RelocInfo* rinfo) final {
1584 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1585 HeapObject* object = HeapObject::cast(rinfo->target_object());
1586 Code* host = rinfo->host();
1587 collector_->RecordRelocSlot(host, rinfo, object);
1588 }
1589
VisitCell(RelocInfo * rinfo)1590 inline void VisitCell(RelocInfo* rinfo) final {
1591 DCHECK(rinfo->rmode() == RelocInfo::CELL);
1592 Cell* cell = rinfo->target_cell();
1593 Code* host = rinfo->host();
1594 collector_->RecordRelocSlot(host, rinfo, cell);
1595 }
1596
1597 // Entries that will never move.
VisitCodeAgeSequence(RelocInfo * rinfo)1598 inline void VisitCodeAgeSequence(RelocInfo* rinfo) final {
1599 DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
1600 Code* stub = rinfo->code_age_stub();
1601 USE(stub);
1602 DCHECK(!Page::FromAddress(stub->address())->IsEvacuationCandidate());
1603 }
1604
1605 // Entries that are skipped for recording.
VisitExternalReference(RelocInfo * rinfo)1606 inline void VisitExternalReference(RelocInfo* rinfo) final {}
VisitExternalReference(Address * p)1607 inline void VisitExternalReference(Address* p) final {}
VisitRuntimeEntry(RelocInfo * rinfo)1608 inline void VisitRuntimeEntry(RelocInfo* rinfo) final {}
VisitExternalOneByteString(v8::String::ExternalOneByteStringResource ** resource)1609 inline void VisitExternalOneByteString(
1610 v8::String::ExternalOneByteStringResource** resource) final {}
VisitExternalTwoByteString(v8::String::ExternalStringResource ** resource)1611 inline void VisitExternalTwoByteString(
1612 v8::String::ExternalStringResource** resource) final {}
VisitInternalReference(RelocInfo * rinfo)1613 inline void VisitInternalReference(RelocInfo* rinfo) final {}
VisitEmbedderReference(Object ** p,uint16_t class_id)1614 inline void VisitEmbedderReference(Object** p, uint16_t class_id) final {}
1615
1616 private:
RecordMigratedSlot(Object * value,Address slot)1617 inline void RecordMigratedSlot(Object* value, Address slot) {
1618 if (value->IsHeapObject()) {
1619 Page* p = Page::FromAddress(reinterpret_cast<Address>(value));
1620 if (p->InNewSpace()) {
1621 RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot), slot);
1622 } else if (p->IsEvacuationCandidate()) {
1623 RememberedSet<OLD_TO_OLD>::Insert(Page::FromAddress(slot), slot);
1624 }
1625 }
1626 }
1627
1628 MarkCompactCollector* collector_;
1629 };
1630
1631 class MarkCompactCollector::HeapObjectVisitor {
1632 public:
~HeapObjectVisitor()1633 virtual ~HeapObjectVisitor() {}
1634 virtual bool Visit(HeapObject* object) = 0;
1635 };
1636
1637 class MarkCompactCollector::EvacuateVisitorBase
1638 : public MarkCompactCollector::HeapObjectVisitor {
1639 protected:
1640 enum MigrationMode { kFast, kProfiled };
1641
EvacuateVisitorBase(Heap * heap,CompactionSpaceCollection * compaction_spaces)1642 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces)
1643 : heap_(heap),
1644 compaction_spaces_(compaction_spaces),
1645 profiling_(
1646 heap->isolate()->is_profiling() ||
1647 heap->isolate()->logger()->is_logging_code_events() ||
1648 heap->isolate()->heap_profiler()->is_tracking_object_moves()) {}
1649
TryEvacuateObject(PagedSpace * target_space,HeapObject * object,HeapObject ** target_object)1650 inline bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1651 HeapObject** target_object) {
1652 #ifdef VERIFY_HEAP
1653 if (AbortCompactionForTesting(object)) return false;
1654 #endif // VERIFY_HEAP
1655 int size = object->Size();
1656 AllocationAlignment alignment = object->RequiredAlignment();
1657 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1658 if (allocation.To(target_object)) {
1659 MigrateObject(*target_object, object, size, target_space->identity());
1660 return true;
1661 }
1662 return false;
1663 }
1664
MigrateObject(HeapObject * dst,HeapObject * src,int size,AllocationSpace dest)1665 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1666 AllocationSpace dest) {
1667 if (profiling_) {
1668 MigrateObject<kProfiled>(dst, src, size, dest);
1669 } else {
1670 MigrateObject<kFast>(dst, src, size, dest);
1671 }
1672 }
1673
1674 template <MigrationMode mode>
MigrateObject(HeapObject * dst,HeapObject * src,int size,AllocationSpace dest)1675 inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1676 AllocationSpace dest) {
1677 Address dst_addr = dst->address();
1678 Address src_addr = src->address();
1679 DCHECK(heap_->AllowedToBeMigrated(src, dest));
1680 DCHECK(dest != LO_SPACE);
1681 if (dest == OLD_SPACE) {
1682 DCHECK_OBJECT_SIZE(size);
1683 DCHECK(IsAligned(size, kPointerSize));
1684 heap_->CopyBlock(dst_addr, src_addr, size);
1685 if ((mode == kProfiled) && FLAG_ignition && dst->IsBytecodeArray()) {
1686 PROFILE(heap_->isolate(),
1687 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1688 }
1689 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1690 dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
1691 } else if (dest == CODE_SPACE) {
1692 DCHECK_CODEOBJECT_SIZE(size, heap_->code_space());
1693 if (mode == kProfiled) {
1694 PROFILE(heap_->isolate(),
1695 CodeMoveEvent(AbstractCode::cast(src), dst_addr));
1696 }
1697 heap_->CopyBlock(dst_addr, src_addr, size);
1698 Code::cast(dst)->Relocate(dst_addr - src_addr);
1699 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1700 dst->IterateBodyFast(dst->map()->instance_type(), size, &visitor);
1701 } else {
1702 DCHECK_OBJECT_SIZE(size);
1703 DCHECK(dest == NEW_SPACE);
1704 heap_->CopyBlock(dst_addr, src_addr, size);
1705 }
1706 if (mode == kProfiled) {
1707 heap_->OnMoveEvent(dst, src, size);
1708 }
1709 Memory::Address_at(src_addr) = dst_addr;
1710 }
1711
1712 #ifdef VERIFY_HEAP
AbortCompactionForTesting(HeapObject * object)1713 bool AbortCompactionForTesting(HeapObject* object) {
1714 if (FLAG_stress_compaction) {
1715 const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1716 Page::kPageAlignmentMask & ~kPointerAlignmentMask;
1717 if ((reinterpret_cast<uintptr_t>(object->address()) &
1718 Page::kPageAlignmentMask) == mask) {
1719 Page* page = Page::FromAddress(object->address());
1720 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1721 page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1722 } else {
1723 page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1724 return true;
1725 }
1726 }
1727 }
1728 return false;
1729 }
1730 #endif // VERIFY_HEAP
1731
1732 Heap* heap_;
1733 CompactionSpaceCollection* compaction_spaces_;
1734 bool profiling_;
1735 };
1736
1737 class MarkCompactCollector::EvacuateNewSpaceVisitor final
1738 : public MarkCompactCollector::EvacuateVisitorBase {
1739 public:
1740 static const intptr_t kLabSize = 4 * KB;
1741 static const intptr_t kMaxLabObjectSize = 256;
1742
EvacuateNewSpaceVisitor(Heap * heap,CompactionSpaceCollection * compaction_spaces,base::HashMap * local_pretenuring_feedback)1743 explicit EvacuateNewSpaceVisitor(Heap* heap,
1744 CompactionSpaceCollection* compaction_spaces,
1745 base::HashMap* local_pretenuring_feedback)
1746 : EvacuateVisitorBase(heap, compaction_spaces),
1747 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1748 space_to_allocate_(NEW_SPACE),
1749 promoted_size_(0),
1750 semispace_copied_size_(0),
1751 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1752
Visit(HeapObject * object)1753 inline bool Visit(HeapObject* object) override {
1754 heap_->UpdateAllocationSite<Heap::kCached>(object,
1755 local_pretenuring_feedback_);
1756 int size = object->Size();
1757 HeapObject* target_object = nullptr;
1758 if (heap_->ShouldBePromoted<DEFAULT_PROMOTION>(object->address(), size) &&
1759 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1760 &target_object)) {
1761 promoted_size_ += size;
1762 return true;
1763 }
1764 HeapObject* target = nullptr;
1765 AllocationSpace space = AllocateTargetObject(object, &target);
1766 MigrateObject(HeapObject::cast(target), object, size, space);
1767 semispace_copied_size_ += size;
1768 return true;
1769 }
1770
promoted_size()1771 intptr_t promoted_size() { return promoted_size_; }
semispace_copied_size()1772 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1773
1774 private:
1775 enum NewSpaceAllocationMode {
1776 kNonstickyBailoutOldSpace,
1777 kStickyBailoutOldSpace,
1778 };
1779
AllocateTargetObject(HeapObject * old_object,HeapObject ** target_object)1780 inline AllocationSpace AllocateTargetObject(HeapObject* old_object,
1781 HeapObject** target_object) {
1782 const int size = old_object->Size();
1783 AllocationAlignment alignment = old_object->RequiredAlignment();
1784 AllocationResult allocation;
1785 AllocationSpace space_allocated_in = space_to_allocate_;
1786 if (space_to_allocate_ == NEW_SPACE) {
1787 if (size > kMaxLabObjectSize) {
1788 allocation =
1789 AllocateInNewSpace(size, alignment, kNonstickyBailoutOldSpace);
1790 } else {
1791 allocation = AllocateInLab(size, alignment);
1792 }
1793 }
1794 if (allocation.IsRetry() || (space_to_allocate_ == OLD_SPACE)) {
1795 allocation = AllocateInOldSpace(size, alignment);
1796 space_allocated_in = OLD_SPACE;
1797 }
1798 bool ok = allocation.To(target_object);
1799 DCHECK(ok);
1800 USE(ok);
1801 return space_allocated_in;
1802 }
1803
NewLocalAllocationBuffer()1804 inline bool NewLocalAllocationBuffer() {
1805 AllocationResult result =
1806 AllocateInNewSpace(kLabSize, kWordAligned, kStickyBailoutOldSpace);
1807 LocalAllocationBuffer saved_old_buffer = buffer_;
1808 buffer_ = LocalAllocationBuffer::FromResult(heap_, result, kLabSize);
1809 if (buffer_.IsValid()) {
1810 buffer_.TryMerge(&saved_old_buffer);
1811 return true;
1812 }
1813 return false;
1814 }
1815
AllocateInNewSpace(int size_in_bytes,AllocationAlignment alignment,NewSpaceAllocationMode mode)1816 inline AllocationResult AllocateInNewSpace(int size_in_bytes,
1817 AllocationAlignment alignment,
1818 NewSpaceAllocationMode mode) {
1819 AllocationResult allocation =
1820 heap_->new_space()->AllocateRawSynchronized(size_in_bytes, alignment);
1821 if (allocation.IsRetry()) {
1822 if (!heap_->new_space()->AddFreshPageSynchronized()) {
1823 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1824 } else {
1825 allocation = heap_->new_space()->AllocateRawSynchronized(size_in_bytes,
1826 alignment);
1827 if (allocation.IsRetry()) {
1828 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1829 }
1830 }
1831 }
1832 return allocation;
1833 }
1834
AllocateInOldSpace(int size_in_bytes,AllocationAlignment alignment)1835 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1836 AllocationAlignment alignment) {
1837 AllocationResult allocation =
1838 compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes,
1839 alignment);
1840 if (allocation.IsRetry()) {
1841 v8::internal::Heap::FatalProcessOutOfMemory(
1842 "MarkCompactCollector: semi-space copy, fallback in old gen", true);
1843 }
1844 return allocation;
1845 }
1846
AllocateInLab(int size_in_bytes,AllocationAlignment alignment)1847 inline AllocationResult AllocateInLab(int size_in_bytes,
1848 AllocationAlignment alignment) {
1849 AllocationResult allocation;
1850 if (!buffer_.IsValid()) {
1851 if (!NewLocalAllocationBuffer()) {
1852 space_to_allocate_ = OLD_SPACE;
1853 return AllocationResult::Retry(OLD_SPACE);
1854 }
1855 }
1856 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1857 if (allocation.IsRetry()) {
1858 if (!NewLocalAllocationBuffer()) {
1859 space_to_allocate_ = OLD_SPACE;
1860 return AllocationResult::Retry(OLD_SPACE);
1861 } else {
1862 allocation = buffer_.AllocateRawAligned(size_in_bytes, alignment);
1863 if (allocation.IsRetry()) {
1864 space_to_allocate_ = OLD_SPACE;
1865 return AllocationResult::Retry(OLD_SPACE);
1866 }
1867 }
1868 }
1869 return allocation;
1870 }
1871
1872 LocalAllocationBuffer buffer_;
1873 AllocationSpace space_to_allocate_;
1874 intptr_t promoted_size_;
1875 intptr_t semispace_copied_size_;
1876 base::HashMap* local_pretenuring_feedback_;
1877 };
1878
1879 class MarkCompactCollector::EvacuateNewSpacePageVisitor final
1880 : public MarkCompactCollector::HeapObjectVisitor {
1881 public:
EvacuateNewSpacePageVisitor(Heap * heap)1882 explicit EvacuateNewSpacePageVisitor(Heap* heap)
1883 : heap_(heap), promoted_size_(0), semispace_copied_size_(0) {}
1884
MoveToOldSpace(Page * page,PagedSpace * owner)1885 static void MoveToOldSpace(Page* page, PagedSpace* owner) {
1886 page->Unlink();
1887 Page* new_page = Page::ConvertNewToOld(page, owner);
1888 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1889 }
1890
MoveToToSpace(Page * page)1891 static void MoveToToSpace(Page* page) {
1892 page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1893 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1894 }
1895
Visit(HeapObject * object)1896 inline bool Visit(HeapObject* object) {
1897 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1898 object->IterateBodyFast(&visitor);
1899 promoted_size_ += object->Size();
1900 return true;
1901 }
1902
promoted_size()1903 intptr_t promoted_size() { return promoted_size_; }
semispace_copied_size()1904 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1905
account_semispace_copied(intptr_t copied)1906 void account_semispace_copied(intptr_t copied) {
1907 semispace_copied_size_ += copied;
1908 }
1909
1910 private:
1911 Heap* heap_;
1912 intptr_t promoted_size_;
1913 intptr_t semispace_copied_size_;
1914 };
1915
1916 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1917 : public MarkCompactCollector::EvacuateVisitorBase {
1918 public:
EvacuateOldSpaceVisitor(Heap * heap,CompactionSpaceCollection * compaction_spaces)1919 EvacuateOldSpaceVisitor(Heap* heap,
1920 CompactionSpaceCollection* compaction_spaces)
1921 : EvacuateVisitorBase(heap, compaction_spaces) {}
1922
Visit(HeapObject * object)1923 inline bool Visit(HeapObject* object) override {
1924 CompactionSpace* target_space = compaction_spaces_->Get(
1925 Page::FromAddress(object->address())->owner()->identity());
1926 HeapObject* target_object = nullptr;
1927 if (TryEvacuateObject(target_space, object, &target_object)) {
1928 DCHECK(object->map_word().IsForwardingAddress());
1929 return true;
1930 }
1931 return false;
1932 }
1933 };
1934
1935 class MarkCompactCollector::EvacuateRecordOnlyVisitor final
1936 : public MarkCompactCollector::HeapObjectVisitor {
1937 public:
EvacuateRecordOnlyVisitor(Heap * heap)1938 explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
1939
Visit(HeapObject * object)1940 inline bool Visit(HeapObject* object) {
1941 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1942 object->IterateBody(&visitor);
1943 return true;
1944 }
1945
1946 private:
1947 Heap* heap_;
1948 };
1949
DiscoverGreyObjectsInSpace(PagedSpace * space)1950 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1951 for (Page* p : *space) {
1952 if (!p->IsFlagSet(Page::BLACK_PAGE)) {
1953 DiscoverGreyObjectsOnPage(p);
1954 }
1955 if (marking_deque()->IsFull()) return;
1956 }
1957 }
1958
1959
DiscoverGreyObjectsInNewSpace()1960 void MarkCompactCollector::DiscoverGreyObjectsInNewSpace() {
1961 NewSpace* space = heap()->new_space();
1962 for (Page* page : NewSpacePageRange(space->bottom(), space->top())) {
1963 DiscoverGreyObjectsOnPage(page);
1964 if (marking_deque()->IsFull()) return;
1965 }
1966 }
1967
1968
IsUnmarkedHeapObject(Object ** p)1969 bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
1970 Object* o = *p;
1971 if (!o->IsHeapObject()) return false;
1972 HeapObject* heap_object = HeapObject::cast(o);
1973 MarkBit mark = Marking::MarkBitFrom(heap_object);
1974 return Marking::IsWhite(mark);
1975 }
1976
1977
IsUnmarkedHeapObjectWithHeap(Heap * heap,Object ** p)1978 bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
1979 Object** p) {
1980 Object* o = *p;
1981 DCHECK(o->IsHeapObject());
1982 HeapObject* heap_object = HeapObject::cast(o);
1983 MarkBit mark = Marking::MarkBitFrom(heap_object);
1984 return Marking::IsWhite(mark);
1985 }
1986
1987
MarkStringTable(RootMarkingVisitor * visitor)1988 void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
1989 StringTable* string_table = heap()->string_table();
1990 // Mark the string table itself.
1991 MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
1992 if (Marking::IsWhite(string_table_mark)) {
1993 // String table could have already been marked by visiting the handles list.
1994 SetMark(string_table, string_table_mark);
1995 }
1996 // Explicitly mark the prefix.
1997 string_table->IteratePrefix(visitor);
1998 ProcessMarkingDeque();
1999 }
2000
2001
MarkAllocationSite(AllocationSite * site)2002 void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
2003 MarkBit mark_bit = Marking::MarkBitFrom(site);
2004 SetMark(site, mark_bit);
2005 }
2006
2007
MarkRoots(RootMarkingVisitor * visitor)2008 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
2009 // Mark the heap roots including global variables, stack variables,
2010 // etc., and all objects reachable from them.
2011 heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
2012
2013 // Handle the string table specially.
2014 MarkStringTable(visitor);
2015
2016 // There may be overflowed objects in the heap. Visit them now.
2017 while (marking_deque_.overflowed()) {
2018 RefillMarkingDeque();
2019 EmptyMarkingDeque();
2020 }
2021 }
2022
2023
MarkImplicitRefGroups(MarkObjectFunction mark_object)2024 void MarkCompactCollector::MarkImplicitRefGroups(
2025 MarkObjectFunction mark_object) {
2026 List<ImplicitRefGroup*>* ref_groups =
2027 isolate()->global_handles()->implicit_ref_groups();
2028
2029 int last = 0;
2030 for (int i = 0; i < ref_groups->length(); i++) {
2031 ImplicitRefGroup* entry = ref_groups->at(i);
2032 DCHECK(entry != NULL);
2033
2034 if (!IsMarked(*entry->parent)) {
2035 (*ref_groups)[last++] = entry;
2036 continue;
2037 }
2038
2039 Object*** children = entry->children;
2040 // A parent object is marked, so mark all child heap objects.
2041 for (size_t j = 0; j < entry->length; ++j) {
2042 if ((*children[j])->IsHeapObject()) {
2043 mark_object(heap(), HeapObject::cast(*children[j]));
2044 }
2045 }
2046
2047 // Once the entire group has been marked, dispose it because it's
2048 // not needed anymore.
2049 delete entry;
2050 }
2051 ref_groups->Rewind(last);
2052 }
2053
2054
2055 // Mark all objects reachable from the objects on the marking stack.
2056 // Before: the marking stack contains zero or more heap object pointers.
2057 // After: the marking stack is empty, and all objects reachable from the
2058 // marking stack have been marked, or are overflowed in the heap.
EmptyMarkingDeque()2059 void MarkCompactCollector::EmptyMarkingDeque() {
2060 Map* filler_map = heap_->one_pointer_filler_map();
2061 while (!marking_deque_.IsEmpty()) {
2062 HeapObject* object = marking_deque_.Pop();
2063 // Explicitly skip one word fillers. Incremental markbit patterns are
2064 // correct only for objects that occupy at least two words.
2065 Map* map = object->map();
2066 if (map == filler_map) continue;
2067
2068 DCHECK(object->IsHeapObject());
2069 DCHECK(heap()->Contains(object));
2070 DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
2071
2072 MarkBit map_mark = Marking::MarkBitFrom(map);
2073 MarkObject(map, map_mark);
2074
2075 MarkCompactMarkingVisitor::IterateBody(map, object);
2076 }
2077 }
2078
2079
2080 // Sweep the heap for overflowed objects, clear their overflow bits, and
2081 // push them on the marking stack. Stop early if the marking stack fills
2082 // before sweeping completes. If sweeping completes, there are no remaining
2083 // overflowed objects in the heap so the overflow flag on the markings stack
2084 // is cleared.
RefillMarkingDeque()2085 void MarkCompactCollector::RefillMarkingDeque() {
2086 isolate()->CountUsage(v8::Isolate::UseCounterFeature::kMarkDequeOverflow);
2087 DCHECK(marking_deque_.overflowed());
2088
2089 DiscoverGreyObjectsInNewSpace();
2090 if (marking_deque_.IsFull()) return;
2091
2092 DiscoverGreyObjectsInSpace(heap()->old_space());
2093 if (marking_deque_.IsFull()) return;
2094
2095 DiscoverGreyObjectsInSpace(heap()->code_space());
2096 if (marking_deque_.IsFull()) return;
2097
2098 DiscoverGreyObjectsInSpace(heap()->map_space());
2099 if (marking_deque_.IsFull()) return;
2100
2101 LargeObjectIterator lo_it(heap()->lo_space());
2102 DiscoverGreyObjectsWithIterator(&lo_it);
2103 if (marking_deque_.IsFull()) return;
2104
2105 marking_deque_.ClearOverflowed();
2106 }
2107
2108
2109 // Mark all objects reachable (transitively) from objects on the marking
2110 // stack. Before: the marking stack contains zero or more heap object
2111 // pointers. After: the marking stack is empty and there are no overflowed
2112 // objects in the heap.
ProcessMarkingDeque()2113 void MarkCompactCollector::ProcessMarkingDeque() {
2114 EmptyMarkingDeque();
2115 while (marking_deque_.overflowed()) {
2116 RefillMarkingDeque();
2117 EmptyMarkingDeque();
2118 }
2119 }
2120
2121 // Mark all objects reachable (transitively) from objects on the marking
2122 // stack including references only considered in the atomic marking pause.
ProcessEphemeralMarking(ObjectVisitor * visitor,bool only_process_harmony_weak_collections)2123 void MarkCompactCollector::ProcessEphemeralMarking(
2124 ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
2125 DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
2126 bool work_to_do = true;
2127 while (work_to_do) {
2128 if (UsingEmbedderHeapTracer()) {
2129 RegisterWrappersWithEmbedderHeapTracer();
2130 embedder_heap_tracer()->AdvanceTracing(
2131 0, EmbedderHeapTracer::AdvanceTracingActions(
2132 EmbedderHeapTracer::ForceCompletionAction::FORCE_COMPLETION));
2133 }
2134 if (!only_process_harmony_weak_collections) {
2135 isolate()->global_handles()->IterateObjectGroups(
2136 visitor, &IsUnmarkedHeapObjectWithHeap);
2137 MarkImplicitRefGroups(&MarkCompactMarkingVisitor::MarkObject);
2138 }
2139 ProcessWeakCollections();
2140 work_to_do = !marking_deque_.IsEmpty();
2141 ProcessMarkingDeque();
2142 }
2143 }
2144
ProcessTopOptimizedFrame(ObjectVisitor * visitor)2145 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
2146 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
2147 !it.done(); it.Advance()) {
2148 if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
2149 return;
2150 }
2151 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2152 Code* code = it.frame()->LookupCode();
2153 if (!code->CanDeoptAt(it.frame()->pc())) {
2154 Code::BodyDescriptor::IterateBody(code, visitor);
2155 }
2156 ProcessMarkingDeque();
2157 return;
2158 }
2159 }
2160 }
2161
2162
EnsureMarkingDequeIsReserved()2163 void MarkCompactCollector::EnsureMarkingDequeIsReserved() {
2164 DCHECK(!marking_deque_.in_use());
2165 if (marking_deque_memory_ == NULL) {
2166 marking_deque_memory_ = new base::VirtualMemory(kMaxMarkingDequeSize);
2167 marking_deque_memory_committed_ = 0;
2168 }
2169 if (marking_deque_memory_ == NULL) {
2170 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsReserved");
2171 }
2172 }
2173
2174
EnsureMarkingDequeIsCommitted(size_t max_size)2175 void MarkCompactCollector::EnsureMarkingDequeIsCommitted(size_t max_size) {
2176 // If the marking deque is too small, we try to allocate a bigger one.
2177 // If that fails, make do with a smaller one.
2178 CHECK(!marking_deque_.in_use());
2179 for (size_t size = max_size; size >= kMinMarkingDequeSize; size >>= 1) {
2180 base::VirtualMemory* memory = marking_deque_memory_;
2181 size_t currently_committed = marking_deque_memory_committed_;
2182
2183 if (currently_committed == size) return;
2184
2185 if (currently_committed > size) {
2186 bool success = marking_deque_memory_->Uncommit(
2187 reinterpret_cast<Address>(marking_deque_memory_->address()) + size,
2188 currently_committed - size);
2189 if (success) {
2190 marking_deque_memory_committed_ = size;
2191 return;
2192 }
2193 UNREACHABLE();
2194 }
2195
2196 bool success = memory->Commit(
2197 reinterpret_cast<Address>(memory->address()) + currently_committed,
2198 size - currently_committed,
2199 false); // Not executable.
2200 if (success) {
2201 marking_deque_memory_committed_ = size;
2202 return;
2203 }
2204 }
2205 V8::FatalProcessOutOfMemory("EnsureMarkingDequeIsCommitted");
2206 }
2207
2208
InitializeMarkingDeque()2209 void MarkCompactCollector::InitializeMarkingDeque() {
2210 DCHECK(!marking_deque_.in_use());
2211 DCHECK(marking_deque_memory_committed_ > 0);
2212 Address addr = static_cast<Address>(marking_deque_memory_->address());
2213 size_t size = marking_deque_memory_committed_;
2214 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
2215 marking_deque_.Initialize(addr, addr + size);
2216 }
2217
2218
Initialize(Address low,Address high)2219 void MarkingDeque::Initialize(Address low, Address high) {
2220 DCHECK(!in_use_);
2221 HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
2222 HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
2223 array_ = obj_low;
2224 mask_ = base::bits::RoundDownToPowerOfTwo32(
2225 static_cast<uint32_t>(obj_high - obj_low)) -
2226 1;
2227 top_ = bottom_ = 0;
2228 overflowed_ = false;
2229 in_use_ = true;
2230 }
2231
2232
Uninitialize(bool aborting)2233 void MarkingDeque::Uninitialize(bool aborting) {
2234 if (!aborting) {
2235 DCHECK(IsEmpty());
2236 DCHECK(!overflowed_);
2237 }
2238 DCHECK(in_use_);
2239 top_ = bottom_ = 0xdecbad;
2240 in_use_ = false;
2241 }
2242
SetEmbedderHeapTracer(EmbedderHeapTracer * tracer)2243 void MarkCompactCollector::SetEmbedderHeapTracer(EmbedderHeapTracer* tracer) {
2244 DCHECK_NOT_NULL(tracer);
2245 CHECK_NULL(embedder_heap_tracer_);
2246 embedder_heap_tracer_ = tracer;
2247 }
2248
RegisterWrappersWithEmbedderHeapTracer()2249 void MarkCompactCollector::RegisterWrappersWithEmbedderHeapTracer() {
2250 DCHECK(UsingEmbedderHeapTracer());
2251 if (wrappers_to_trace_.empty()) {
2252 return;
2253 }
2254 embedder_heap_tracer()->RegisterV8References(wrappers_to_trace_);
2255 wrappers_to_trace_.clear();
2256 }
2257
TracePossibleWrapper(JSObject * js_object)2258 void MarkCompactCollector::TracePossibleWrapper(JSObject* js_object) {
2259 DCHECK(js_object->WasConstructedFromApiFunction());
2260 if (js_object->GetInternalFieldCount() >= 2 &&
2261 js_object->GetInternalField(0) &&
2262 js_object->GetInternalField(0) != heap_->undefined_value() &&
2263 js_object->GetInternalField(1) != heap_->undefined_value()) {
2264 DCHECK(reinterpret_cast<intptr_t>(js_object->GetInternalField(0)) % 2 == 0);
2265 wrappers_to_trace_.push_back(std::pair<void*, void*>(
2266 reinterpret_cast<void*>(js_object->GetInternalField(0)),
2267 reinterpret_cast<void*>(js_object->GetInternalField(1))));
2268 }
2269 }
2270
RegisterExternallyReferencedObject(Object ** object)2271 void MarkCompactCollector::RegisterExternallyReferencedObject(Object** object) {
2272 DCHECK(in_use());
2273 HeapObject* heap_object = HeapObject::cast(*object);
2274 DCHECK(heap_->Contains(heap_object));
2275 MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
2276 MarkObject(heap_object, mark_bit);
2277 }
2278
MarkLiveObjects()2279 void MarkCompactCollector::MarkLiveObjects() {
2280 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
2281 double start_time = 0.0;
2282 if (FLAG_print_cumulative_gc_stat) {
2283 start_time = heap_->MonotonicallyIncreasingTimeInMs();
2284 }
2285 // The recursive GC marker detects when it is nearing stack overflow,
2286 // and switches to a different marking system. JS interrupts interfere
2287 // with the C stack limit check.
2288 PostponeInterruptsScope postpone(isolate());
2289
2290 {
2291 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
2292 IncrementalMarking* incremental_marking = heap_->incremental_marking();
2293 if (was_marked_incrementally_) {
2294 incremental_marking->Finalize();
2295 } else {
2296 // Abort any pending incremental activities e.g. incremental sweeping.
2297 incremental_marking->Stop();
2298 if (FLAG_track_gc_object_stats) {
2299 // Clear object stats collected during incremental marking.
2300 heap()->object_stats_->ClearObjectStats();
2301 }
2302 if (marking_deque_.in_use()) {
2303 marking_deque_.Uninitialize(true);
2304 }
2305 }
2306 }
2307
2308 #ifdef DEBUG
2309 DCHECK(state_ == PREPARE_GC);
2310 state_ = MARK_LIVE_OBJECTS;
2311 #endif
2312
2313 EnsureMarkingDequeIsCommittedAndInitialize(
2314 MarkCompactCollector::kMaxMarkingDequeSize);
2315
2316 {
2317 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_PREPARE_CODE_FLUSH);
2318 PrepareForCodeFlushing();
2319 }
2320
2321 RootMarkingVisitor root_visitor(heap());
2322
2323 {
2324 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2325 MarkRoots(&root_visitor);
2326 ProcessTopOptimizedFrame(&root_visitor);
2327 }
2328
2329 {
2330 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
2331
2332 // The objects reachable from the roots are marked, yet unreachable
2333 // objects are unmarked. Mark objects reachable due to host
2334 // application specific logic or through Harmony weak maps.
2335 {
2336 TRACE_GC(heap()->tracer(),
2337 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERAL);
2338 ProcessEphemeralMarking(&root_visitor, false);
2339 }
2340
2341 // The objects reachable from the roots, weak maps or object groups
2342 // are marked. Objects pointed to only by weak global handles cannot be
2343 // immediately reclaimed. Instead, we have to mark them as pending and mark
2344 // objects reachable from them.
2345 //
2346 // First we identify nonlive weak handles and mark them as pending
2347 // destruction.
2348 {
2349 TRACE_GC(heap()->tracer(),
2350 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
2351 heap()->isolate()->global_handles()->IdentifyWeakHandles(
2352 &IsUnmarkedHeapObject);
2353 ProcessMarkingDeque();
2354 }
2355 // Then we mark the objects.
2356
2357 {
2358 TRACE_GC(heap()->tracer(),
2359 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
2360 heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
2361 ProcessMarkingDeque();
2362 }
2363
2364 // Repeat Harmony weak maps marking to mark unmarked objects reachable from
2365 // the weak roots we just marked as pending destruction.
2366 //
2367 // We only process harmony collections, as all object groups have been fully
2368 // processed and no weakly reachable node can discover new objects groups.
2369 {
2370 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
2371 ProcessEphemeralMarking(&root_visitor, true);
2372 if (UsingEmbedderHeapTracer()) {
2373 embedder_heap_tracer()->TraceEpilogue();
2374 }
2375 }
2376 }
2377
2378 if (FLAG_print_cumulative_gc_stat) {
2379 heap_->tracer()->AddMarkingTime(heap_->MonotonicallyIncreasingTimeInMs() -
2380 start_time);
2381 }
2382 if (FLAG_track_gc_object_stats) {
2383 if (FLAG_trace_gc_object_stats) {
2384 heap()->object_stats_->TraceObjectStats();
2385 }
2386 heap()->object_stats_->CheckpointObjectStats();
2387 }
2388 }
2389
2390
ClearNonLiveReferences()2391 void MarkCompactCollector::ClearNonLiveReferences() {
2392 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
2393
2394 {
2395 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
2396
2397 // Prune the string table removing all strings only pointed to by the
2398 // string table. Cannot use string_table() here because the string
2399 // table is marked.
2400 StringTable* string_table = heap()->string_table();
2401 InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
2402 string_table->IterateElements(&internalized_visitor);
2403 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
2404
2405 ExternalStringTableCleaner external_visitor(heap(), nullptr);
2406 heap()->external_string_table_.Iterate(&external_visitor);
2407 heap()->external_string_table_.CleanUp();
2408 }
2409
2410 {
2411 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
2412 // Process the weak references.
2413 MarkCompactWeakObjectRetainer mark_compact_object_retainer;
2414 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
2415 }
2416
2417 {
2418 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_GLOBAL_HANDLES);
2419
2420 // Remove object groups after marking phase.
2421 heap()->isolate()->global_handles()->RemoveObjectGroups();
2422 heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
2423 }
2424
2425 // Flush code from collected candidates.
2426 if (is_code_flushing_enabled()) {
2427 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_CODE_FLUSH);
2428 code_flusher_->ProcessCandidates();
2429 }
2430
2431
2432 DependentCode* dependent_code_list;
2433 Object* non_live_map_list;
2434 ClearWeakCells(&non_live_map_list, &dependent_code_list);
2435
2436 {
2437 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
2438 ClearSimpleMapTransitions(non_live_map_list);
2439 ClearFullMapTransitions();
2440 }
2441
2442 MarkDependentCodeForDeoptimization(dependent_code_list);
2443
2444 ClearWeakCollections();
2445
2446 ClearInvalidRememberedSetSlots();
2447 }
2448
2449
MarkDependentCodeForDeoptimization(DependentCode * list_head)2450 void MarkCompactCollector::MarkDependentCodeForDeoptimization(
2451 DependentCode* list_head) {
2452 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_DEPENDENT_CODE);
2453 Isolate* isolate = this->isolate();
2454 DependentCode* current = list_head;
2455 while (current->length() > 0) {
2456 have_code_to_deoptimize_ |= current->MarkCodeForDeoptimization(
2457 isolate, DependentCode::kWeakCodeGroup);
2458 current = current->next_link();
2459 }
2460
2461 WeakHashTable* table = heap_->weak_object_to_code_table();
2462 uint32_t capacity = table->Capacity();
2463 for (uint32_t i = 0; i < capacity; i++) {
2464 uint32_t key_index = table->EntryToIndex(i);
2465 Object* key = table->get(key_index);
2466 if (!table->IsKey(isolate, key)) continue;
2467 uint32_t value_index = table->EntryToValueIndex(i);
2468 Object* value = table->get(value_index);
2469 DCHECK(key->IsWeakCell());
2470 if (WeakCell::cast(key)->cleared()) {
2471 have_code_to_deoptimize_ |=
2472 DependentCode::cast(value)->MarkCodeForDeoptimization(
2473 isolate, DependentCode::kWeakCodeGroup);
2474 table->set(key_index, heap_->the_hole_value());
2475 table->set(value_index, heap_->the_hole_value());
2476 table->ElementRemoved();
2477 }
2478 }
2479 }
2480
2481
ClearSimpleMapTransitions(Object * non_live_map_list)2482 void MarkCompactCollector::ClearSimpleMapTransitions(
2483 Object* non_live_map_list) {
2484 Object* the_hole_value = heap()->the_hole_value();
2485 Object* weak_cell_obj = non_live_map_list;
2486 while (weak_cell_obj != Smi::FromInt(0)) {
2487 WeakCell* weak_cell = WeakCell::cast(weak_cell_obj);
2488 Map* map = Map::cast(weak_cell->value());
2489 DCHECK(Marking::IsWhite(Marking::MarkBitFrom(map)));
2490 Object* potential_parent = map->constructor_or_backpointer();
2491 if (potential_parent->IsMap()) {
2492 Map* parent = Map::cast(potential_parent);
2493 if (Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent)) &&
2494 parent->raw_transitions() == weak_cell) {
2495 ClearSimpleMapTransition(parent, map);
2496 }
2497 }
2498 weak_cell->clear();
2499 weak_cell_obj = weak_cell->next();
2500 weak_cell->clear_next(the_hole_value);
2501 }
2502 }
2503
2504
ClearSimpleMapTransition(Map * map,Map * dead_transition)2505 void MarkCompactCollector::ClearSimpleMapTransition(Map* map,
2506 Map* dead_transition) {
2507 // A previously existing simple transition (stored in a WeakCell) is going
2508 // to be cleared. Clear the useless cell pointer, and take ownership
2509 // of the descriptor array.
2510 map->set_raw_transitions(Smi::FromInt(0));
2511 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2512 DescriptorArray* descriptors = map->instance_descriptors();
2513 if (descriptors == dead_transition->instance_descriptors() &&
2514 number_of_own_descriptors > 0) {
2515 TrimDescriptorArray(map, descriptors);
2516 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2517 map->set_owns_descriptors(true);
2518 }
2519 }
2520
2521
ClearFullMapTransitions()2522 void MarkCompactCollector::ClearFullMapTransitions() {
2523 HeapObject* undefined = heap()->undefined_value();
2524 Object* obj = heap()->encountered_transition_arrays();
2525 while (obj != Smi::FromInt(0)) {
2526 TransitionArray* array = TransitionArray::cast(obj);
2527 int num_transitions = array->number_of_entries();
2528 DCHECK_EQ(TransitionArray::NumberOfTransitions(array), num_transitions);
2529 if (num_transitions > 0) {
2530 Map* map = array->GetTarget(0);
2531 Map* parent = Map::cast(map->constructor_or_backpointer());
2532 bool parent_is_alive =
2533 Marking::IsBlackOrGrey(Marking::MarkBitFrom(parent));
2534 DescriptorArray* descriptors =
2535 parent_is_alive ? parent->instance_descriptors() : nullptr;
2536 bool descriptors_owner_died =
2537 CompactTransitionArray(parent, array, descriptors);
2538 if (descriptors_owner_died) {
2539 TrimDescriptorArray(parent, descriptors);
2540 }
2541 }
2542 obj = array->next_link();
2543 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2544 }
2545 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2546 }
2547
2548
CompactTransitionArray(Map * map,TransitionArray * transitions,DescriptorArray * descriptors)2549 bool MarkCompactCollector::CompactTransitionArray(
2550 Map* map, TransitionArray* transitions, DescriptorArray* descriptors) {
2551 int num_transitions = transitions->number_of_entries();
2552 bool descriptors_owner_died = false;
2553 int transition_index = 0;
2554 // Compact all live transitions to the left.
2555 for (int i = 0; i < num_transitions; ++i) {
2556 Map* target = transitions->GetTarget(i);
2557 DCHECK_EQ(target->constructor_or_backpointer(), map);
2558 if (Marking::IsWhite(Marking::MarkBitFrom(target))) {
2559 if (descriptors != nullptr &&
2560 target->instance_descriptors() == descriptors) {
2561 descriptors_owner_died = true;
2562 }
2563 } else {
2564 if (i != transition_index) {
2565 Name* key = transitions->GetKey(i);
2566 transitions->SetKey(transition_index, key);
2567 Object** key_slot = transitions->GetKeySlot(transition_index);
2568 RecordSlot(transitions, key_slot, key);
2569 // Target slots do not need to be recorded since maps are not compacted.
2570 transitions->SetTarget(transition_index, transitions->GetTarget(i));
2571 }
2572 transition_index++;
2573 }
2574 }
2575 // If there are no transitions to be cleared, return.
2576 if (transition_index == num_transitions) {
2577 DCHECK(!descriptors_owner_died);
2578 return false;
2579 }
2580 // Note that we never eliminate a transition array, though we might right-trim
2581 // such that number_of_transitions() == 0. If this assumption changes,
2582 // TransitionArray::Insert() will need to deal with the case that a transition
2583 // array disappeared during GC.
2584 int trim = TransitionArray::Capacity(transitions) - transition_index;
2585 if (trim > 0) {
2586 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2587 transitions, trim * TransitionArray::kTransitionSize);
2588 transitions->SetNumberOfTransitions(transition_index);
2589 }
2590 return descriptors_owner_died;
2591 }
2592
2593
TrimDescriptorArray(Map * map,DescriptorArray * descriptors)2594 void MarkCompactCollector::TrimDescriptorArray(Map* map,
2595 DescriptorArray* descriptors) {
2596 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2597 if (number_of_own_descriptors == 0) {
2598 DCHECK(descriptors == heap_->empty_descriptor_array());
2599 return;
2600 }
2601
2602 int number_of_descriptors = descriptors->number_of_descriptors_storage();
2603 int to_trim = number_of_descriptors - number_of_own_descriptors;
2604 if (to_trim > 0) {
2605 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2606 descriptors, to_trim * DescriptorArray::kDescriptorSize);
2607 descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
2608
2609 if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
2610 descriptors->Sort();
2611
2612 if (FLAG_unbox_double_fields) {
2613 LayoutDescriptor* layout_descriptor = map->layout_descriptor();
2614 layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2615 number_of_own_descriptors);
2616 SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2617 }
2618 }
2619 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2620 map->set_owns_descriptors(true);
2621 }
2622
2623
TrimEnumCache(Map * map,DescriptorArray * descriptors)2624 void MarkCompactCollector::TrimEnumCache(Map* map,
2625 DescriptorArray* descriptors) {
2626 int live_enum = map->EnumLength();
2627 if (live_enum == kInvalidEnumCacheSentinel) {
2628 live_enum =
2629 map->NumberOfDescribedProperties(OWN_DESCRIPTORS, ENUMERABLE_STRINGS);
2630 }
2631 if (live_enum == 0) return descriptors->ClearEnumCache();
2632
2633 FixedArray* enum_cache = descriptors->GetEnumCache();
2634
2635 int to_trim = enum_cache->length() - live_enum;
2636 if (to_trim <= 0) return;
2637 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(
2638 descriptors->GetEnumCache(), to_trim);
2639
2640 if (!descriptors->HasEnumIndicesCache()) return;
2641 FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
2642 heap_->RightTrimFixedArray<Heap::SEQUENTIAL_TO_SWEEPER>(enum_indices_cache,
2643 to_trim);
2644 }
2645
2646
ProcessWeakCollections()2647 void MarkCompactCollector::ProcessWeakCollections() {
2648 Object* weak_collection_obj = heap()->encountered_weak_collections();
2649 while (weak_collection_obj != Smi::FromInt(0)) {
2650 JSWeakCollection* weak_collection =
2651 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2652 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2653 if (weak_collection->table()->IsHashTable()) {
2654 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2655 for (int i = 0; i < table->Capacity(); i++) {
2656 if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
2657 Object** key_slot =
2658 table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
2659 RecordSlot(table, key_slot, *key_slot);
2660 Object** value_slot =
2661 table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
2662 MarkCompactMarkingVisitor::MarkObjectByPointer(this, table,
2663 value_slot);
2664 }
2665 }
2666 }
2667 weak_collection_obj = weak_collection->next();
2668 }
2669 }
2670
2671
ClearWeakCollections()2672 void MarkCompactCollector::ClearWeakCollections() {
2673 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2674 Object* weak_collection_obj = heap()->encountered_weak_collections();
2675 while (weak_collection_obj != Smi::FromInt(0)) {
2676 JSWeakCollection* weak_collection =
2677 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2678 DCHECK(MarkCompactCollector::IsMarked(weak_collection));
2679 if (weak_collection->table()->IsHashTable()) {
2680 ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
2681 for (int i = 0; i < table->Capacity(); i++) {
2682 HeapObject* key = HeapObject::cast(table->KeyAt(i));
2683 if (!MarkCompactCollector::IsMarked(key)) {
2684 table->RemoveEntry(i);
2685 }
2686 }
2687 }
2688 weak_collection_obj = weak_collection->next();
2689 weak_collection->set_next(heap()->undefined_value());
2690 }
2691 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2692 }
2693
2694
AbortWeakCollections()2695 void MarkCompactCollector::AbortWeakCollections() {
2696 Object* weak_collection_obj = heap()->encountered_weak_collections();
2697 while (weak_collection_obj != Smi::FromInt(0)) {
2698 JSWeakCollection* weak_collection =
2699 reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
2700 weak_collection_obj = weak_collection->next();
2701 weak_collection->set_next(heap()->undefined_value());
2702 }
2703 heap()->set_encountered_weak_collections(Smi::FromInt(0));
2704 }
2705
2706
ClearWeakCells(Object ** non_live_map_list,DependentCode ** dependent_code_list)2707 void MarkCompactCollector::ClearWeakCells(Object** non_live_map_list,
2708 DependentCode** dependent_code_list) {
2709 Heap* heap = this->heap();
2710 TRACE_GC(heap->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_CELLS);
2711 Object* weak_cell_obj = heap->encountered_weak_cells();
2712 Object* the_hole_value = heap->the_hole_value();
2713 DependentCode* dependent_code_head =
2714 DependentCode::cast(heap->empty_fixed_array());
2715 Object* non_live_map_head = Smi::FromInt(0);
2716 while (weak_cell_obj != Smi::FromInt(0)) {
2717 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2718 Object* next_weak_cell = weak_cell->next();
2719 bool clear_value = true;
2720 bool clear_next = true;
2721 // We do not insert cleared weak cells into the list, so the value
2722 // cannot be a Smi here.
2723 HeapObject* value = HeapObject::cast(weak_cell->value());
2724 if (!MarkCompactCollector::IsMarked(value)) {
2725 // Cells for new-space objects embedded in optimized code are wrapped in
2726 // WeakCell and put into Heap::weak_object_to_code_table.
2727 // Such cells do not have any strong references but we want to keep them
2728 // alive as long as the cell value is alive.
2729 // TODO(ulan): remove this once we remove Heap::weak_object_to_code_table.
2730 if (value->IsCell()) {
2731 Object* cell_value = Cell::cast(value)->value();
2732 if (cell_value->IsHeapObject() &&
2733 MarkCompactCollector::IsMarked(HeapObject::cast(cell_value))) {
2734 // Resurrect the cell.
2735 MarkBit mark = Marking::MarkBitFrom(value);
2736 SetMark(value, mark);
2737 Object** slot = HeapObject::RawField(value, Cell::kValueOffset);
2738 RecordSlot(value, slot, *slot);
2739 slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2740 RecordSlot(weak_cell, slot, *slot);
2741 clear_value = false;
2742 }
2743 }
2744 if (value->IsMap()) {
2745 // The map is non-live.
2746 Map* map = Map::cast(value);
2747 // Add dependent code to the dependent_code_list.
2748 DependentCode* candidate = map->dependent_code();
2749 // We rely on the fact that the weak code group comes first.
2750 STATIC_ASSERT(DependentCode::kWeakCodeGroup == 0);
2751 if (candidate->length() > 0 &&
2752 candidate->group() == DependentCode::kWeakCodeGroup) {
2753 candidate->set_next_link(dependent_code_head);
2754 dependent_code_head = candidate;
2755 }
2756 // Add the weak cell to the non_live_map list.
2757 weak_cell->set_next(non_live_map_head);
2758 non_live_map_head = weak_cell;
2759 clear_value = false;
2760 clear_next = false;
2761 }
2762 } else {
2763 // The value of the weak cell is alive.
2764 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
2765 RecordSlot(weak_cell, slot, *slot);
2766 clear_value = false;
2767 }
2768 if (clear_value) {
2769 weak_cell->clear();
2770 }
2771 if (clear_next) {
2772 weak_cell->clear_next(the_hole_value);
2773 }
2774 weak_cell_obj = next_weak_cell;
2775 }
2776 heap->set_encountered_weak_cells(Smi::FromInt(0));
2777 *non_live_map_list = non_live_map_head;
2778 *dependent_code_list = dependent_code_head;
2779 }
2780
2781
AbortWeakCells()2782 void MarkCompactCollector::AbortWeakCells() {
2783 Object* the_hole_value = heap()->the_hole_value();
2784 Object* weak_cell_obj = heap()->encountered_weak_cells();
2785 while (weak_cell_obj != Smi::FromInt(0)) {
2786 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2787 weak_cell_obj = weak_cell->next();
2788 weak_cell->clear_next(the_hole_value);
2789 }
2790 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2791 }
2792
2793
AbortTransitionArrays()2794 void MarkCompactCollector::AbortTransitionArrays() {
2795 HeapObject* undefined = heap()->undefined_value();
2796 Object* obj = heap()->encountered_transition_arrays();
2797 while (obj != Smi::FromInt(0)) {
2798 TransitionArray* array = TransitionArray::cast(obj);
2799 obj = array->next_link();
2800 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2801 }
2802 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2803 }
2804
SlotTypeForRMode(RelocInfo::Mode rmode)2805 static inline SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
2806 if (RelocInfo::IsCodeTarget(rmode)) {
2807 return CODE_TARGET_SLOT;
2808 } else if (RelocInfo::IsCell(rmode)) {
2809 return CELL_TARGET_SLOT;
2810 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
2811 return EMBEDDED_OBJECT_SLOT;
2812 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
2813 return DEBUG_TARGET_SLOT;
2814 }
2815 UNREACHABLE();
2816 return NUMBER_OF_SLOT_TYPES;
2817 }
2818
RecordRelocSlot(Code * host,RelocInfo * rinfo,Object * target)2819 void MarkCompactCollector::RecordRelocSlot(Code* host, RelocInfo* rinfo,
2820 Object* target) {
2821 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
2822 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
2823 RelocInfo::Mode rmode = rinfo->rmode();
2824 if (target_page->IsEvacuationCandidate() &&
2825 (rinfo->host() == NULL ||
2826 !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
2827 Address addr = rinfo->pc();
2828 SlotType slot_type = SlotTypeForRMode(rmode);
2829 if (rinfo->IsInConstantPool()) {
2830 addr = rinfo->constant_pool_entry_address();
2831 if (RelocInfo::IsCodeTarget(rmode)) {
2832 slot_type = CODE_ENTRY_SLOT;
2833 } else {
2834 DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2835 slot_type = OBJECT_SLOT;
2836 }
2837 }
2838 RememberedSet<OLD_TO_OLD>::InsertTyped(
2839 source_page, reinterpret_cast<Address>(host), slot_type, addr);
2840 }
2841 }
2842
UpdateSlot(Object ** slot)2843 static inline SlotCallbackResult UpdateSlot(Object** slot) {
2844 Object* obj = reinterpret_cast<Object*>(
2845 base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
2846
2847 if (obj->IsHeapObject()) {
2848 HeapObject* heap_obj = HeapObject::cast(obj);
2849 MapWord map_word = heap_obj->map_word();
2850 if (map_word.IsForwardingAddress()) {
2851 DCHECK(heap_obj->GetHeap()->InFromSpace(heap_obj) ||
2852 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2853 Page::FromAddress(heap_obj->address())
2854 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2855 HeapObject* target = map_word.ToForwardingAddress();
2856 base::NoBarrier_CompareAndSwap(
2857 reinterpret_cast<base::AtomicWord*>(slot),
2858 reinterpret_cast<base::AtomicWord>(obj),
2859 reinterpret_cast<base::AtomicWord>(target));
2860 DCHECK(!heap_obj->GetHeap()->InFromSpace(target) &&
2861 !MarkCompactCollector::IsOnEvacuationCandidate(target));
2862 }
2863 }
2864 return REMOVE_SLOT;
2865 }
2866
2867 // Visitor for updating pointers from live objects in old spaces to new space.
2868 // It does not expect to encounter pointers to dead objects.
2869 class PointersUpdatingVisitor : public ObjectVisitor {
2870 public:
VisitPointer(Object ** p)2871 void VisitPointer(Object** p) override { UpdateSlot(p); }
2872
VisitPointers(Object ** start,Object ** end)2873 void VisitPointers(Object** start, Object** end) override {
2874 for (Object** p = start; p < end; p++) UpdateSlot(p);
2875 }
2876
VisitCell(RelocInfo * rinfo)2877 void VisitCell(RelocInfo* rinfo) override {
2878 UpdateTypedSlotHelper::UpdateCell(rinfo, UpdateSlot);
2879 }
2880
VisitEmbeddedPointer(RelocInfo * rinfo)2881 void VisitEmbeddedPointer(RelocInfo* rinfo) override {
2882 UpdateTypedSlotHelper::UpdateEmbeddedPointer(rinfo, UpdateSlot);
2883 }
2884
VisitCodeTarget(RelocInfo * rinfo)2885 void VisitCodeTarget(RelocInfo* rinfo) override {
2886 UpdateTypedSlotHelper::UpdateCodeTarget(rinfo, UpdateSlot);
2887 }
2888
VisitCodeEntry(Address entry_address)2889 void VisitCodeEntry(Address entry_address) override {
2890 UpdateTypedSlotHelper::UpdateCodeEntry(entry_address, UpdateSlot);
2891 }
2892
VisitDebugTarget(RelocInfo * rinfo)2893 void VisitDebugTarget(RelocInfo* rinfo) override {
2894 UpdateTypedSlotHelper::UpdateDebugTarget(rinfo, UpdateSlot);
2895 }
2896 };
2897
UpdateReferenceInExternalStringTableEntry(Heap * heap,Object ** p)2898 static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
2899 Object** p) {
2900 MapWord map_word = HeapObject::cast(*p)->map_word();
2901
2902 if (map_word.IsForwardingAddress()) {
2903 return String::cast(map_word.ToForwardingAddress());
2904 }
2905
2906 return String::cast(*p);
2907 }
2908
IsSlotInBlackObject(MemoryChunk * p,Address slot)2909 bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
2910 Space* owner = p->owner();
2911 DCHECK(owner != heap_->lo_space() && owner != nullptr);
2912 USE(owner);
2913
2914 // If we are on a black page, we cannot find the actual object start
2915 // easiliy. We just return true but do not set the out_object.
2916 if (p->IsFlagSet(Page::BLACK_PAGE)) {
2917 return true;
2918 }
2919
2920 uint32_t mark_bit_index = p->AddressToMarkbitIndex(slot);
2921 unsigned int cell_index = mark_bit_index >> Bitmap::kBitsPerCellLog2;
2922 MarkBit::CellType index_mask = 1u << Bitmap::IndexInCell(mark_bit_index);
2923 MarkBit::CellType* cells = p->markbits()->cells();
2924 Address base_address = p->area_start();
2925 unsigned int base_address_cell_index = Bitmap::IndexToCell(
2926 Bitmap::CellAlignIndex(p->AddressToMarkbitIndex(base_address)));
2927
2928 // Check if the slot points to the start of an object. This can happen e.g.
2929 // when we left trim a fixed array. Such slots are invalid and we can remove
2930 // them.
2931 if (index_mask > 1) {
2932 if ((cells[cell_index] & index_mask) != 0 &&
2933 (cells[cell_index] & (index_mask >> 1)) == 0) {
2934 return false;
2935 }
2936 } else {
2937 // Left trimming moves the mark bits so we cannot be in the very first cell.
2938 DCHECK(cell_index != base_address_cell_index);
2939 if ((cells[cell_index] & index_mask) != 0 &&
2940 (cells[cell_index - 1] & (1u << Bitmap::kBitIndexMask)) == 0) {
2941 return false;
2942 }
2943 }
2944
2945 // Check if the object is in the current cell.
2946 MarkBit::CellType slot_mask;
2947 if ((cells[cell_index] == 0) ||
2948 (base::bits::CountTrailingZeros32(cells[cell_index]) >
2949 base::bits::CountTrailingZeros32(cells[cell_index] | index_mask))) {
2950 // If we are already in the first cell, there is no live object.
2951 if (cell_index == base_address_cell_index) return false;
2952
2953 // If not, find a cell in a preceding cell slot that has a mark bit set.
2954 do {
2955 cell_index--;
2956 } while (cell_index > base_address_cell_index && cells[cell_index] == 0);
2957
2958 // The slot must be in a dead object if there are no preceding cells that
2959 // have mark bits set.
2960 if (cells[cell_index] == 0) {
2961 return false;
2962 }
2963
2964 // The object is in a preceding cell. Set the mask to find any object.
2965 slot_mask = ~0u;
2966 } else {
2967 // We are interested in object mark bits right before the slot.
2968 slot_mask = index_mask + (index_mask - 1);
2969 }
2970
2971 MarkBit::CellType current_cell = cells[cell_index];
2972 CHECK(current_cell != 0);
2973
2974 // Find the last live object in the cell.
2975 unsigned int leading_zeros =
2976 base::bits::CountLeadingZeros32(current_cell & slot_mask);
2977 CHECK(leading_zeros != Bitmap::kBitsPerCell);
2978 int offset = static_cast<int>(Bitmap::kBitIndexMask - leading_zeros) - 1;
2979
2980 base_address += (cell_index - base_address_cell_index) *
2981 Bitmap::kBitsPerCell * kPointerSize;
2982 Address address = base_address + offset * kPointerSize;
2983 HeapObject* object = HeapObject::FromAddress(address);
2984 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
2985 CHECK(object->address() < reinterpret_cast<Address>(slot));
2986 if ((object->address() + kPointerSize) <= slot &&
2987 (object->address() + object->Size()) > slot) {
2988 // If the slot is within the last found object in the cell, the slot is
2989 // in a live object.
2990 // Slots pointing to the first word of an object are invalid and removed.
2991 // This can happen when we move the object header while left trimming.
2992 return true;
2993 }
2994 return false;
2995 }
2996
FindBlackObjectBySlotSlow(Address slot)2997 HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
2998 Page* p = Page::FromAddress(slot);
2999 Space* owner = p->owner();
3000 if (owner == heap_->lo_space() || owner == nullptr) {
3001 Object* large_object = heap_->lo_space()->FindObject(slot);
3002 // This object has to exist, otherwise we would not have recorded a slot
3003 // for it.
3004 CHECK(large_object->IsHeapObject());
3005 HeapObject* large_heap_object = HeapObject::cast(large_object);
3006
3007 if (IsMarked(large_heap_object)) {
3008 return large_heap_object;
3009 }
3010 return nullptr;
3011 }
3012
3013 if (p->IsFlagSet(Page::BLACK_PAGE)) {
3014 HeapObjectIterator it(p);
3015 HeapObject* object = nullptr;
3016 while ((object = it.Next()) != nullptr) {
3017 int size = object->Size();
3018 if (object->address() > slot) return nullptr;
3019 if (object->address() <= slot && slot < (object->address() + size)) {
3020 return object;
3021 }
3022 }
3023 } else {
3024 LiveObjectIterator<kBlackObjects> it(p);
3025 HeapObject* object = nullptr;
3026 while ((object = it.Next()) != nullptr) {
3027 int size = object->Size();
3028 if (object->address() > slot) return nullptr;
3029 if (object->address() <= slot && slot < (object->address() + size)) {
3030 return object;
3031 }
3032 }
3033 }
3034 return nullptr;
3035 }
3036
3037
EvacuateNewSpacePrologue()3038 void MarkCompactCollector::EvacuateNewSpacePrologue() {
3039 NewSpace* new_space = heap()->new_space();
3040 // Append the list of new space pages to be processed.
3041 for (Page* p : NewSpacePageRange(new_space->bottom(), new_space->top())) {
3042 newspace_evacuation_candidates_.Add(p);
3043 }
3044 new_space->Flip();
3045 new_space->ResetAllocationInfo();
3046 }
3047
3048 class MarkCompactCollector::Evacuator : public Malloced {
3049 public:
3050 enum EvacuationMode {
3051 kObjectsNewToOld,
3052 kPageNewToOld,
3053 kObjectsOldToOld,
3054 kPageNewToNew,
3055 };
3056
ComputeEvacuationMode(MemoryChunk * chunk)3057 static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3058 // Note: The order of checks is important in this function.
3059 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3060 return kPageNewToOld;
3061 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
3062 return kPageNewToNew;
3063 if (chunk->InNewSpace()) return kObjectsNewToOld;
3064 DCHECK(chunk->IsEvacuationCandidate());
3065 return kObjectsOldToOld;
3066 }
3067
3068 // NewSpacePages with more live bytes than this threshold qualify for fast
3069 // evacuation.
PageEvacuationThreshold()3070 static int PageEvacuationThreshold() {
3071 if (FLAG_page_promotion)
3072 return FLAG_page_promotion_threshold * Page::kAllocatableMemory / 100;
3073 return Page::kAllocatableMemory + kPointerSize;
3074 }
3075
Evacuator(MarkCompactCollector * collector)3076 explicit Evacuator(MarkCompactCollector* collector)
3077 : collector_(collector),
3078 compaction_spaces_(collector->heap()),
3079 local_pretenuring_feedback_(base::HashMap::PointersMatch,
3080 kInitialLocalPretenuringFeedbackCapacity),
3081 new_space_visitor_(collector->heap(), &compaction_spaces_,
3082 &local_pretenuring_feedback_),
3083 new_space_page_visitor(collector->heap()),
3084 old_space_visitor_(collector->heap(), &compaction_spaces_),
3085 duration_(0.0),
3086 bytes_compacted_(0) {}
3087
3088 inline bool EvacuatePage(Page* chunk);
3089
3090 // Merge back locally cached info sequentially. Note that this method needs
3091 // to be called from the main thread.
3092 inline void Finalize();
3093
compaction_spaces()3094 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3095
3096 private:
3097 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3098
heap()3099 inline Heap* heap() { return collector_->heap(); }
3100
ReportCompactionProgress(double duration,intptr_t bytes_compacted)3101 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3102 duration_ += duration;
3103 bytes_compacted_ += bytes_compacted;
3104 }
3105
3106 MarkCompactCollector* collector_;
3107
3108 // Locally cached collector data.
3109 CompactionSpaceCollection compaction_spaces_;
3110 base::HashMap local_pretenuring_feedback_;
3111
3112 // Visitors for the corresponding spaces.
3113 EvacuateNewSpaceVisitor new_space_visitor_;
3114 EvacuateNewSpacePageVisitor new_space_page_visitor;
3115 EvacuateOldSpaceVisitor old_space_visitor_;
3116
3117 // Book keeping info.
3118 double duration_;
3119 intptr_t bytes_compacted_;
3120 };
3121
EvacuatePage(Page * page)3122 bool MarkCompactCollector::Evacuator::EvacuatePage(Page* page) {
3123 bool success = false;
3124 DCHECK(page->SweepingDone());
3125 int saved_live_bytes = page->LiveBytes();
3126 double evacuation_time = 0.0;
3127 Heap* heap = page->heap();
3128 {
3129 AlwaysAllocateScope always_allocate(heap->isolate());
3130 TimedScope timed_scope(&evacuation_time);
3131 switch (ComputeEvacuationMode(page)) {
3132 case kObjectsNewToOld:
3133 success = collector_->VisitLiveObjects(page, &new_space_visitor_,
3134 kClearMarkbits);
3135 ArrayBufferTracker::ProcessBuffers(
3136 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3137 DCHECK(success);
3138 break;
3139 case kPageNewToOld:
3140 success = collector_->VisitLiveObjects(page, &new_space_page_visitor,
3141 kKeepMarking);
3142 // ArrayBufferTracker will be updated during sweeping.
3143 DCHECK(success);
3144 break;
3145 case kPageNewToNew:
3146 new_space_page_visitor.account_semispace_copied(page->LiveBytes());
3147 // ArrayBufferTracker will be updated during sweeping.
3148 success = true;
3149 break;
3150 case kObjectsOldToOld:
3151 success = collector_->VisitLiveObjects(page, &old_space_visitor_,
3152 kClearMarkbits);
3153 if (!success) {
3154 // Aborted compaction page. We have to record slots here, since we
3155 // might not have recorded them in first place.
3156 // Note: We mark the page as aborted here to be able to record slots
3157 // for code objects in |RecordMigratedSlotVisitor|.
3158 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3159 EvacuateRecordOnlyVisitor record_visitor(collector_->heap());
3160 success =
3161 collector_->VisitLiveObjects(page, &record_visitor, kKeepMarking);
3162 ArrayBufferTracker::ProcessBuffers(
3163 page, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3164 DCHECK(success);
3165 // We need to return failure here to indicate that we want this page
3166 // added to the sweeper.
3167 success = false;
3168 } else {
3169 ArrayBufferTracker::ProcessBuffers(
3170 page, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3171 }
3172 break;
3173 default:
3174 UNREACHABLE();
3175 }
3176 }
3177 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3178 if (FLAG_trace_evacuation) {
3179 PrintIsolate(heap->isolate(),
3180 "evacuation[%p]: page=%p new_space=%d "
3181 "page_evacuation=%d executable=%d contains_age_mark=%d "
3182 "live_bytes=%d time=%f\n",
3183 static_cast<void*>(this), static_cast<void*>(page),
3184 page->InNewSpace(),
3185 page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
3186 page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
3187 page->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
3188 page->Contains(heap->new_space()->age_mark()),
3189 saved_live_bytes, evacuation_time);
3190 }
3191 return success;
3192 }
3193
Finalize()3194 void MarkCompactCollector::Evacuator::Finalize() {
3195 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3196 heap()->code_space()->MergeCompactionSpace(
3197 compaction_spaces_.Get(CODE_SPACE));
3198 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3199 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3200 new_space_page_visitor.promoted_size());
3201 heap()->IncrementSemiSpaceCopiedObjectSize(
3202 new_space_visitor_.semispace_copied_size() +
3203 new_space_page_visitor.semispace_copied_size());
3204 heap()->IncrementYoungSurvivorsCounter(
3205 new_space_visitor_.promoted_size() +
3206 new_space_visitor_.semispace_copied_size() +
3207 new_space_page_visitor.promoted_size() +
3208 new_space_page_visitor.semispace_copied_size());
3209 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3210 }
3211
NumberOfParallelCompactionTasks(int pages,intptr_t live_bytes)3212 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3213 intptr_t live_bytes) {
3214 if (!FLAG_parallel_compaction) return 1;
3215 // Compute the number of needed tasks based on a target compaction time, the
3216 // profiled compaction speed and marked live memory.
3217 //
3218 // The number of parallel compaction tasks is limited by:
3219 // - #evacuation pages
3220 // - (#cores - 1)
3221 const double kTargetCompactionTimeInMs = 1;
3222 const int kNumSweepingTasks = 3;
3223
3224 double compaction_speed =
3225 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3226
3227 const int available_cores = Max(
3228 1, static_cast<int>(
3229 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) -
3230 kNumSweepingTasks - 1);
3231 int tasks;
3232 if (compaction_speed > 0) {
3233 tasks = 1 + static_cast<int>(live_bytes / compaction_speed /
3234 kTargetCompactionTimeInMs);
3235 } else {
3236 tasks = pages;
3237 }
3238 const int tasks_capped_pages = Min(pages, tasks);
3239 return Min(available_cores, tasks_capped_pages);
3240 }
3241
3242 class EvacuationJobTraits {
3243 public:
3244 typedef int* PerPageData; // Pointer to number of aborted pages.
3245 typedef MarkCompactCollector::Evacuator* PerTaskData;
3246
3247 static const bool NeedSequentialFinalization = true;
3248
ProcessPageInParallel(Heap * heap,PerTaskData evacuator,MemoryChunk * chunk,PerPageData)3249 static bool ProcessPageInParallel(Heap* heap, PerTaskData evacuator,
3250 MemoryChunk* chunk, PerPageData) {
3251 return evacuator->EvacuatePage(reinterpret_cast<Page*>(chunk));
3252 }
3253
FinalizePageSequentially(Heap * heap,MemoryChunk * chunk,bool success,PerPageData data)3254 static void FinalizePageSequentially(Heap* heap, MemoryChunk* chunk,
3255 bool success, PerPageData data) {
3256 using Evacuator = MarkCompactCollector::Evacuator;
3257 Page* p = static_cast<Page*>(chunk);
3258 switch (Evacuator::ComputeEvacuationMode(p)) {
3259 case Evacuator::kPageNewToOld:
3260 break;
3261 case Evacuator::kPageNewToNew:
3262 DCHECK(success);
3263 break;
3264 case Evacuator::kObjectsNewToOld:
3265 DCHECK(success);
3266 break;
3267 case Evacuator::kObjectsOldToOld:
3268 if (success) {
3269 DCHECK(p->IsEvacuationCandidate());
3270 DCHECK(p->SweepingDone());
3271 p->Unlink();
3272 } else {
3273 // We have partially compacted the page, i.e., some objects may have
3274 // moved, others are still in place.
3275 p->ClearEvacuationCandidate();
3276 // Slots have already been recorded so we just need to add it to the
3277 // sweeper, which will happen after updating pointers.
3278 *data += 1;
3279 }
3280 break;
3281 default:
3282 UNREACHABLE();
3283 }
3284 }
3285 };
3286
EvacuatePagesInParallel()3287 void MarkCompactCollector::EvacuatePagesInParallel() {
3288 PageParallelJob<EvacuationJobTraits> job(
3289 heap_, heap_->isolate()->cancelable_task_manager(),
3290 &page_parallel_job_semaphore_);
3291
3292 int abandoned_pages = 0;
3293 intptr_t live_bytes = 0;
3294 for (Page* page : evacuation_candidates_) {
3295 live_bytes += page->LiveBytes();
3296 job.AddPage(page, &abandoned_pages);
3297 }
3298
3299 const Address age_mark = heap()->new_space()->age_mark();
3300 for (Page* page : newspace_evacuation_candidates_) {
3301 live_bytes += page->LiveBytes();
3302 if (!page->NeverEvacuate() &&
3303 (page->LiveBytes() > Evacuator::PageEvacuationThreshold()) &&
3304 !page->Contains(age_mark)) {
3305 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
3306 EvacuateNewSpacePageVisitor::MoveToOldSpace(page, heap()->old_space());
3307 } else {
3308 EvacuateNewSpacePageVisitor::MoveToToSpace(page);
3309 }
3310 }
3311
3312 job.AddPage(page, &abandoned_pages);
3313 }
3314 DCHECK_GE(job.NumberOfPages(), 1);
3315
3316 // Used for trace summary.
3317 double compaction_speed = 0;
3318 if (FLAG_trace_evacuation) {
3319 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3320 }
3321
3322 const int wanted_num_tasks =
3323 NumberOfParallelCompactionTasks(job.NumberOfPages(), live_bytes);
3324 Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
3325 for (int i = 0; i < wanted_num_tasks; i++) {
3326 evacuators[i] = new Evacuator(this);
3327 }
3328 job.Run(wanted_num_tasks, [evacuators](int i) { return evacuators[i]; });
3329 for (int i = 0; i < wanted_num_tasks; i++) {
3330 evacuators[i]->Finalize();
3331 delete evacuators[i];
3332 }
3333 delete[] evacuators;
3334
3335 if (FLAG_trace_evacuation) {
3336 PrintIsolate(isolate(),
3337 "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
3338 "aborted=%d wanted_tasks=%d tasks=%d cores=%" PRIuS
3339 " live_bytes=%" V8PRIdPTR " compaction_speed=%.f\n",
3340 isolate()->time_millis_since_init(),
3341 FLAG_parallel_compaction ? "yes" : "no", job.NumberOfPages(),
3342 abandoned_pages, wanted_num_tasks, job.NumberOfTasks(),
3343 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads(),
3344 live_bytes, compaction_speed);
3345 }
3346 }
3347
3348 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3349 public:
RetainAs(Object * object)3350 virtual Object* RetainAs(Object* object) {
3351 if (object->IsHeapObject()) {
3352 HeapObject* heap_object = HeapObject::cast(object);
3353 MapWord map_word = heap_object->map_word();
3354 if (map_word.IsForwardingAddress()) {
3355 return map_word.ToForwardingAddress();
3356 }
3357 }
3358 return object;
3359 }
3360 };
3361
3362 template <MarkCompactCollector::Sweeper::SweepingMode sweeping_mode,
3363 MarkCompactCollector::Sweeper::SweepingParallelism parallelism,
3364 MarkCompactCollector::Sweeper::SkipListRebuildingMode skip_list_mode,
3365 MarkCompactCollector::Sweeper::FreeListRebuildingMode free_list_mode,
3366 MarkCompactCollector::Sweeper::FreeSpaceTreatmentMode free_space_mode>
RawSweep(PagedSpace * space,Page * p,ObjectVisitor * v)3367 int MarkCompactCollector::Sweeper::RawSweep(PagedSpace* space, Page* p,
3368 ObjectVisitor* v) {
3369 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
3370 DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
3371 DCHECK((space == nullptr) || (space->identity() != CODE_SPACE) ||
3372 (skip_list_mode == REBUILD_SKIP_LIST));
3373 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3374 DCHECK(parallelism == SWEEP_ON_MAIN_THREAD || sweeping_mode == SWEEP_ONLY);
3375
3376 // Before we sweep objects on the page, we free dead array buffers which
3377 // requires valid mark bits.
3378 ArrayBufferTracker::FreeDead(p);
3379
3380 Address free_start = p->area_start();
3381 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3382
3383 // If we use the skip list for code space pages, we have to lock the skip
3384 // list because it could be accessed concurrently by the runtime or the
3385 // deoptimizer.
3386 SkipList* skip_list = p->skip_list();
3387 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
3388 skip_list->Clear();
3389 }
3390
3391 intptr_t freed_bytes = 0;
3392 intptr_t max_freed_bytes = 0;
3393 int curr_region = -1;
3394
3395 LiveObjectIterator<kBlackObjects> it(p);
3396 HeapObject* object = NULL;
3397 while ((object = it.Next()) != NULL) {
3398 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3399 Address free_end = object->address();
3400 if (free_end != free_start) {
3401 int size = static_cast<int>(free_end - free_start);
3402 if (free_space_mode == ZAP_FREE_SPACE) {
3403 memset(free_start, 0xcc, size);
3404 }
3405 if (free_list_mode == REBUILD_FREE_LIST) {
3406 freed_bytes = space->UnaccountedFree(free_start, size);
3407 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3408 } else {
3409 p->heap()->CreateFillerObjectAt(free_start, size,
3410 ClearRecordedSlots::kNo);
3411 }
3412 }
3413 Map* map = object->synchronized_map();
3414 int size = object->SizeFromMap(map);
3415 if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
3416 object->IterateBody(map->instance_type(), size, v);
3417 }
3418 if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
3419 int new_region_start = SkipList::RegionNumber(free_end);
3420 int new_region_end =
3421 SkipList::RegionNumber(free_end + size - kPointerSize);
3422 if (new_region_start != curr_region || new_region_end != curr_region) {
3423 skip_list->AddObject(free_end, size);
3424 curr_region = new_region_end;
3425 }
3426 }
3427 free_start = free_end + size;
3428 }
3429
3430 // Clear the mark bits of that page and reset live bytes count.
3431 Bitmap::Clear(p);
3432
3433 if (free_start != p->area_end()) {
3434 int size = static_cast<int>(p->area_end() - free_start);
3435 if (free_space_mode == ZAP_FREE_SPACE) {
3436 memset(free_start, 0xcc, size);
3437 }
3438 if (free_list_mode == REBUILD_FREE_LIST) {
3439 freed_bytes = space->UnaccountedFree(free_start, size);
3440 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3441 } else {
3442 p->heap()->CreateFillerObjectAt(free_start, size,
3443 ClearRecordedSlots::kNo);
3444 }
3445 }
3446 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3447 if (free_list_mode == IGNORE_FREE_LIST) return 0;
3448 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3449 }
3450
InvalidateCode(Code * code)3451 void MarkCompactCollector::InvalidateCode(Code* code) {
3452 if (heap_->incremental_marking()->IsCompacting() &&
3453 !ShouldSkipEvacuationSlotRecording(code)) {
3454 DCHECK(compacting_);
3455
3456 // If the object is white than no slots were recorded on it yet.
3457 MarkBit mark_bit = Marking::MarkBitFrom(code);
3458 if (Marking::IsWhite(mark_bit)) return;
3459
3460 // Ignore all slots that might have been recorded in the body of the
3461 // deoptimized code object. Assumption: no slots will be recorded for
3462 // this object after invalidating it.
3463 Page* page = Page::FromAddress(code->address());
3464 Address start = code->instruction_start();
3465 Address end = code->address() + code->Size();
3466 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(page, start, end);
3467 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, start, end);
3468 }
3469 }
3470
3471
3472 // Return true if the given code is deoptimized or will be deoptimized.
WillBeDeoptimized(Code * code)3473 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3474 return code->is_optimized_code() && code->marked_for_deoptimization();
3475 }
3476
3477
3478 #ifdef VERIFY_HEAP
VerifyAllBlackObjects(MemoryChunk * page)3479 static void VerifyAllBlackObjects(MemoryChunk* page) {
3480 LiveObjectIterator<kAllLiveObjects> it(page);
3481 HeapObject* object = NULL;
3482 while ((object = it.Next()) != NULL) {
3483 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3484 }
3485 }
3486 #endif // VERIFY_HEAP
3487
3488 template <class Visitor>
VisitLiveObjects(MemoryChunk * page,Visitor * visitor,IterationMode mode)3489 bool MarkCompactCollector::VisitLiveObjects(MemoryChunk* page, Visitor* visitor,
3490 IterationMode mode) {
3491 #ifdef VERIFY_HEAP
3492 VerifyAllBlackObjects(page);
3493 #endif // VERIFY_HEAP
3494
3495 LiveObjectIterator<kBlackObjects> it(page);
3496 HeapObject* object = nullptr;
3497 while ((object = it.Next()) != nullptr) {
3498 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3499 if (!visitor->Visit(object)) {
3500 if (mode == kClearMarkbits) {
3501 page->markbits()->ClearRange(
3502 page->AddressToMarkbitIndex(page->area_start()),
3503 page->AddressToMarkbitIndex(object->address()));
3504 if (page->old_to_new_slots() != nullptr) {
3505 page->old_to_new_slots()->RemoveRange(
3506 0, static_cast<int>(object->address() - page->address()));
3507 }
3508 RecomputeLiveBytes(page);
3509 }
3510 return false;
3511 }
3512 }
3513 if (mode == kClearMarkbits) {
3514 Bitmap::Clear(page);
3515 }
3516 return true;
3517 }
3518
3519
RecomputeLiveBytes(MemoryChunk * page)3520 void MarkCompactCollector::RecomputeLiveBytes(MemoryChunk* page) {
3521 LiveObjectIterator<kBlackObjects> it(page);
3522 int new_live_size = 0;
3523 HeapObject* object = nullptr;
3524 while ((object = it.Next()) != nullptr) {
3525 new_live_size += object->Size();
3526 }
3527 page->SetLiveBytes(new_live_size);
3528 }
3529
3530
VisitLiveObjectsBody(Page * page,ObjectVisitor * visitor)3531 void MarkCompactCollector::VisitLiveObjectsBody(Page* page,
3532 ObjectVisitor* visitor) {
3533 #ifdef VERIFY_HEAP
3534 VerifyAllBlackObjects(page);
3535 #endif // VERIFY_HEAP
3536
3537 LiveObjectIterator<kBlackObjects> it(page);
3538 HeapObject* object = NULL;
3539 while ((object = it.Next()) != NULL) {
3540 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3541 Map* map = object->synchronized_map();
3542 int size = object->SizeFromMap(map);
3543 object->IterateBody(map->instance_type(), size, visitor);
3544 }
3545 }
3546
AddSweptPageSafe(PagedSpace * space,Page * page)3547 void MarkCompactCollector::Sweeper::AddSweptPageSafe(PagedSpace* space,
3548 Page* page) {
3549 base::LockGuard<base::Mutex> guard(&mutex_);
3550 swept_list_[space->identity()].Add(page);
3551 }
3552
EvacuateNewSpaceAndCandidates()3553 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3554 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3555 Heap::RelocationLock relocation_lock(heap());
3556
3557 {
3558 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
3559 EvacuationScope evacuation_scope(this);
3560
3561 EvacuateNewSpacePrologue();
3562 EvacuatePagesInParallel();
3563 heap()->new_space()->set_age_mark(heap()->new_space()->top());
3564 }
3565
3566 UpdatePointersAfterEvacuation();
3567
3568 if (!heap()->new_space()->Rebalance()) {
3569 FatalProcessOutOfMemory("NewSpace::Rebalance");
3570 }
3571
3572 // Give pages that are queued to be freed back to the OS. Note that filtering
3573 // slots only handles old space (for unboxed doubles), and thus map space can
3574 // still contain stale pointers. We only free the chunks after pointer updates
3575 // to still have access to page headers.
3576 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3577
3578 {
3579 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3580
3581 for (Page* p : newspace_evacuation_candidates_) {
3582 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3583 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
3584 sweeper().AddLatePage(p->owner()->identity(), p);
3585 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
3586 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
3587 p->ForAllFreeListCategories(
3588 [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
3589 sweeper().AddLatePage(p->owner()->identity(), p);
3590 }
3591 }
3592 newspace_evacuation_candidates_.Rewind(0);
3593
3594 for (Page* p : evacuation_candidates_) {
3595 // Important: skip list should be cleared only after roots were updated
3596 // because root iteration traverses the stack and might have to find
3597 // code objects from non-updated pc pointing into evacuation candidate.
3598 SkipList* list = p->skip_list();
3599 if (list != NULL) list->Clear();
3600 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3601 sweeper().AddLatePage(p->owner()->identity(), p);
3602 p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
3603 }
3604 }
3605
3606 // Deallocate evacuated candidate pages.
3607 ReleaseEvacuationCandidates();
3608 }
3609
3610 #ifdef VERIFY_HEAP
3611 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) {
3612 VerifyEvacuation(heap());
3613 }
3614 #endif
3615 }
3616
3617 template <PointerDirection direction>
3618 class PointerUpdateJobTraits {
3619 public:
3620 typedef int PerPageData; // Per page data is not used in this job.
3621 typedef int PerTaskData; // Per task data is not used in this job.
3622
ProcessPageInParallel(Heap * heap,PerTaskData,MemoryChunk * chunk,PerPageData)3623 static bool ProcessPageInParallel(Heap* heap, PerTaskData, MemoryChunk* chunk,
3624 PerPageData) {
3625 UpdateUntypedPointers(heap, chunk);
3626 UpdateTypedPointers(heap, chunk);
3627 return true;
3628 }
3629 static const bool NeedSequentialFinalization = false;
FinalizePageSequentially(Heap *,MemoryChunk *,bool,PerPageData)3630 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3631 }
3632
3633 private:
UpdateUntypedPointers(Heap * heap,MemoryChunk * chunk)3634 static void UpdateUntypedPointers(Heap* heap, MemoryChunk* chunk) {
3635 if (direction == OLD_TO_NEW) {
3636 RememberedSet<OLD_TO_NEW>::Iterate(chunk, [heap, chunk](Address slot) {
3637 return CheckAndUpdateOldToNewSlot(heap, slot);
3638 });
3639 } else {
3640 RememberedSet<OLD_TO_OLD>::Iterate(chunk, [](Address slot) {
3641 return UpdateSlot(reinterpret_cast<Object**>(slot));
3642 });
3643 }
3644 }
3645
UpdateTypedPointers(Heap * heap,MemoryChunk * chunk)3646 static void UpdateTypedPointers(Heap* heap, MemoryChunk* chunk) {
3647 if (direction == OLD_TO_OLD) {
3648 Isolate* isolate = heap->isolate();
3649 RememberedSet<OLD_TO_OLD>::IterateTyped(
3650 chunk, [isolate](SlotType type, Address host_addr, Address slot) {
3651 return UpdateTypedSlotHelper::UpdateTypedSlot(isolate, type, slot,
3652 UpdateSlot);
3653 });
3654 } else {
3655 Isolate* isolate = heap->isolate();
3656 RememberedSet<OLD_TO_NEW>::IterateTyped(
3657 chunk,
3658 [isolate, heap](SlotType type, Address host_addr, Address slot) {
3659 return UpdateTypedSlotHelper::UpdateTypedSlot(
3660 isolate, type, slot, [heap](Object** slot) {
3661 return CheckAndUpdateOldToNewSlot(
3662 heap, reinterpret_cast<Address>(slot));
3663 });
3664 });
3665 }
3666 }
3667
CheckAndUpdateOldToNewSlot(Heap * heap,Address slot_address)3668 static SlotCallbackResult CheckAndUpdateOldToNewSlot(Heap* heap,
3669 Address slot_address) {
3670 Object** slot = reinterpret_cast<Object**>(slot_address);
3671 if (heap->InFromSpace(*slot)) {
3672 HeapObject* heap_object = reinterpret_cast<HeapObject*>(*slot);
3673 DCHECK(heap_object->IsHeapObject());
3674 MapWord map_word = heap_object->map_word();
3675 // There could still be stale pointers in large object space, map space,
3676 // and old space for pages that have been promoted.
3677 if (map_word.IsForwardingAddress()) {
3678 // Update the corresponding slot.
3679 *slot = map_word.ToForwardingAddress();
3680 }
3681 // If the object was in from space before and is after executing the
3682 // callback in to space, the object is still live.
3683 // Unfortunately, we do not know about the slot. It could be in a
3684 // just freed free space object.
3685 if (heap->InToSpace(*slot)) {
3686 return KEEP_SLOT;
3687 }
3688 } else if (heap->InToSpace(*slot)) {
3689 DCHECK(Page::FromAddress(reinterpret_cast<HeapObject*>(*slot)->address())
3690 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
3691 // Slots can be in "to" space after a page has been moved. Since there is
3692 // no forwarding information present we need to check the markbits to
3693 // determine liveness.
3694 if (Marking::IsBlack(
3695 Marking::MarkBitFrom(reinterpret_cast<HeapObject*>(*slot))))
3696 return KEEP_SLOT;
3697 } else {
3698 DCHECK(!heap->InNewSpace(*slot));
3699 }
3700 return REMOVE_SLOT;
3701 }
3702 };
3703
NumberOfPointerUpdateTasks(int pages)3704 int NumberOfPointerUpdateTasks(int pages) {
3705 if (!FLAG_parallel_pointer_update) return 1;
3706 const int kMaxTasks = 4;
3707 const int kPagesPerTask = 4;
3708 return Min(kMaxTasks, (pages + kPagesPerTask - 1) / kPagesPerTask);
3709 }
3710
3711 template <PointerDirection direction>
UpdatePointersInParallel(Heap * heap,base::Semaphore * semaphore)3712 void UpdatePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
3713 PageParallelJob<PointerUpdateJobTraits<direction> > job(
3714 heap, heap->isolate()->cancelable_task_manager(), semaphore);
3715 RememberedSet<direction>::IterateMemoryChunks(
3716 heap, [&job](MemoryChunk* chunk) { job.AddPage(chunk, 0); });
3717 int num_pages = job.NumberOfPages();
3718 int num_tasks = NumberOfPointerUpdateTasks(num_pages);
3719 job.Run(num_tasks, [](int i) { return 0; });
3720 }
3721
3722 class ToSpacePointerUpdateJobTraits {
3723 public:
3724 typedef std::pair<Address, Address> PerPageData;
3725 typedef PointersUpdatingVisitor* PerTaskData;
3726
ProcessPageInParallel(Heap * heap,PerTaskData visitor,MemoryChunk * chunk,PerPageData limits)3727 static bool ProcessPageInParallel(Heap* heap, PerTaskData visitor,
3728 MemoryChunk* chunk, PerPageData limits) {
3729 if (chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3730 // New->new promoted pages contain garbage so they require iteration
3731 // using markbits.
3732 ProcessPageInParallelVisitLive(heap, visitor, chunk, limits);
3733 } else {
3734 ProcessPageInParallelVisitAll(heap, visitor, chunk, limits);
3735 }
3736 return true;
3737 }
3738
3739 static const bool NeedSequentialFinalization = false;
FinalizePageSequentially(Heap *,MemoryChunk *,bool,PerPageData)3740 static void FinalizePageSequentially(Heap*, MemoryChunk*, bool, PerPageData) {
3741 }
3742
3743 private:
ProcessPageInParallelVisitAll(Heap * heap,PerTaskData visitor,MemoryChunk * chunk,PerPageData limits)3744 static void ProcessPageInParallelVisitAll(Heap* heap, PerTaskData visitor,
3745 MemoryChunk* chunk,
3746 PerPageData limits) {
3747 for (Address cur = limits.first; cur < limits.second;) {
3748 HeapObject* object = HeapObject::FromAddress(cur);
3749 Map* map = object->map();
3750 int size = object->SizeFromMap(map);
3751 object->IterateBody(map->instance_type(), size, visitor);
3752 cur += size;
3753 }
3754 }
3755
ProcessPageInParallelVisitLive(Heap * heap,PerTaskData visitor,MemoryChunk * chunk,PerPageData limits)3756 static void ProcessPageInParallelVisitLive(Heap* heap, PerTaskData visitor,
3757 MemoryChunk* chunk,
3758 PerPageData limits) {
3759 LiveObjectIterator<kBlackObjects> it(chunk);
3760 HeapObject* object = NULL;
3761 while ((object = it.Next()) != NULL) {
3762 Map* map = object->map();
3763 int size = object->SizeFromMap(map);
3764 object->IterateBody(map->instance_type(), size, visitor);
3765 }
3766 }
3767 };
3768
UpdateToSpacePointersInParallel(Heap * heap,base::Semaphore * semaphore)3769 void UpdateToSpacePointersInParallel(Heap* heap, base::Semaphore* semaphore) {
3770 PageParallelJob<ToSpacePointerUpdateJobTraits> job(
3771 heap, heap->isolate()->cancelable_task_manager(), semaphore);
3772 Address space_start = heap->new_space()->bottom();
3773 Address space_end = heap->new_space()->top();
3774 for (Page* page : NewSpacePageRange(space_start, space_end)) {
3775 Address start =
3776 page->Contains(space_start) ? space_start : page->area_start();
3777 Address end = page->Contains(space_end) ? space_end : page->area_end();
3778 job.AddPage(page, std::make_pair(start, end));
3779 }
3780 PointersUpdatingVisitor visitor;
3781 int num_tasks = FLAG_parallel_pointer_update ? job.NumberOfPages() : 1;
3782 job.Run(num_tasks, [&visitor](int i) { return &visitor; });
3783 }
3784
UpdatePointersAfterEvacuation()3785 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3786 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3787
3788 PointersUpdatingVisitor updating_visitor;
3789
3790 {
3791 TRACE_GC(heap()->tracer(),
3792 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW);
3793 UpdateToSpacePointersInParallel(heap_, &page_parallel_job_semaphore_);
3794 // Update roots.
3795 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3796 UpdatePointersInParallel<OLD_TO_NEW>(heap_, &page_parallel_job_semaphore_);
3797 }
3798
3799 {
3800 Heap* heap = this->heap();
3801 TRACE_GC(heap->tracer(),
3802 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_EVACUATED);
3803 UpdatePointersInParallel<OLD_TO_OLD>(heap_, &page_parallel_job_semaphore_);
3804 }
3805
3806 {
3807 TRACE_GC(heap()->tracer(),
3808 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3809 // Update pointers from external string table.
3810 heap_->UpdateReferencesInExternalStringTable(
3811 &UpdateReferenceInExternalStringTableEntry);
3812
3813 EvacuationWeakObjectRetainer evacuation_object_retainer;
3814 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3815 }
3816 }
3817
3818
ReleaseEvacuationCandidates()3819 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3820 for (Page* p : evacuation_candidates_) {
3821 if (!p->IsEvacuationCandidate()) continue;
3822 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3823 p->ResetLiveBytes();
3824 CHECK(p->SweepingDone());
3825 space->ReleasePage(p);
3826 }
3827 evacuation_candidates_.Rewind(0);
3828 compacting_ = false;
3829 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3830 }
3831
ParallelSweepSpace(AllocationSpace identity,int required_freed_bytes,int max_pages)3832 int MarkCompactCollector::Sweeper::ParallelSweepSpace(AllocationSpace identity,
3833 int required_freed_bytes,
3834 int max_pages) {
3835 int max_freed = 0;
3836 int pages_freed = 0;
3837 Page* page = nullptr;
3838 while ((page = GetSweepingPageSafe(identity)) != nullptr) {
3839 int freed = ParallelSweepPage(page, identity);
3840 pages_freed += 1;
3841 DCHECK_GE(freed, 0);
3842 max_freed = Max(max_freed, freed);
3843 if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
3844 return max_freed;
3845 if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
3846 }
3847 return max_freed;
3848 }
3849
ParallelSweepPage(Page * page,AllocationSpace identity)3850 int MarkCompactCollector::Sweeper::ParallelSweepPage(Page* page,
3851 AllocationSpace identity) {
3852 int max_freed = 0;
3853 if (page->mutex()->TryLock()) {
3854 // If this page was already swept in the meantime, we can return here.
3855 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
3856 page->mutex()->Unlock();
3857 return 0;
3858 }
3859 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3860 if (identity == NEW_SPACE) {
3861 RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3862 IGNORE_FREE_LIST, IGNORE_FREE_SPACE>(nullptr, page, nullptr);
3863 } else if (identity == OLD_SPACE) {
3864 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3865 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3866 heap_->paged_space(identity), page, nullptr);
3867 } else if (identity == CODE_SPACE) {
3868 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
3869 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3870 heap_->paged_space(identity), page, nullptr);
3871 } else {
3872 max_freed = RawSweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3873 REBUILD_FREE_LIST, IGNORE_FREE_SPACE>(
3874 heap_->paged_space(identity), page, nullptr);
3875 }
3876 {
3877 base::LockGuard<base::Mutex> guard(&mutex_);
3878 swept_list_[identity].Add(page);
3879 }
3880 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3881 page->mutex()->Unlock();
3882 }
3883 return max_freed;
3884 }
3885
AddPage(AllocationSpace space,Page * page)3886 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) {
3887 DCHECK(!sweeping_in_progress_);
3888 PrepareToBeSweptPage(space, page);
3889 sweeping_list_[space].push_back(page);
3890 }
3891
AddLatePage(AllocationSpace space,Page * page)3892 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space,
3893 Page* page) {
3894 DCHECK(sweeping_in_progress_);
3895 PrepareToBeSweptPage(space, page);
3896 late_pages_ = true;
3897 AddSweepingPageSafe(space, page);
3898 }
3899
PrepareToBeSweptPage(AllocationSpace space,Page * page)3900 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space,
3901 Page* page) {
3902 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3903 int to_sweep = page->area_size() - page->LiveBytes();
3904 if (space != NEW_SPACE)
3905 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep);
3906 }
3907
GetSweepingPageSafe(AllocationSpace space)3908 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe(
3909 AllocationSpace space) {
3910 base::LockGuard<base::Mutex> guard(&mutex_);
3911 Page* page = nullptr;
3912 if (!sweeping_list_[space].empty()) {
3913 page = sweeping_list_[space].front();
3914 sweeping_list_[space].pop_front();
3915 }
3916 return page;
3917 }
3918
AddSweepingPageSafe(AllocationSpace space,Page * page)3919 void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space,
3920 Page* page) {
3921 base::LockGuard<base::Mutex> guard(&mutex_);
3922 sweeping_list_[space].push_back(page);
3923 }
3924
StartSweepSpace(PagedSpace * space)3925 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
3926 Address space_top = space->top();
3927 space->ClearStats();
3928
3929 int will_be_swept = 0;
3930 bool unused_page_present = false;
3931
3932 // Loop needs to support deletion if live bytes == 0 for a page.
3933 for (auto it = space->begin(); it != space->end();) {
3934 Page* p = *(it++);
3935 DCHECK(p->SweepingDone());
3936
3937 if (p->IsEvacuationCandidate()) {
3938 // Will be processed in EvacuateNewSpaceAndCandidates.
3939 DCHECK(evacuation_candidates_.length() > 0);
3940 continue;
3941 }
3942
3943 // We can not sweep black pages, since all mark bits are set for these
3944 // pages.
3945 if (p->IsFlagSet(Page::BLACK_PAGE)) {
3946 Bitmap::Clear(p);
3947 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3948 p->ClearFlag(Page::BLACK_PAGE);
3949 // Area above the high watermark is free.
3950 Address free_start = p->HighWaterMark();
3951 // Check if the space top was in this page, which means that the
3952 // high watermark is not up-to-date.
3953 if (free_start < space_top && space_top <= p->area_end()) {
3954 free_start = space_top;
3955 }
3956 int size = static_cast<int>(p->area_end() - free_start);
3957 space->Free(free_start, size);
3958 continue;
3959 }
3960
3961 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3962 // We need to sweep the page to get it into an iterable state again. Note
3963 // that this adds unusable memory into the free list that is later on
3964 // (in the free list) dropped again. Since we only use the flag for
3965 // testing this is fine.
3966 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3967 Sweeper::RawSweep<Sweeper::SWEEP_ONLY, Sweeper::SWEEP_ON_MAIN_THREAD,
3968 Sweeper::IGNORE_SKIP_LIST, Sweeper::IGNORE_FREE_LIST,
3969 Sweeper::IGNORE_FREE_SPACE>(space, p, nullptr);
3970 continue;
3971 }
3972
3973 // One unused page is kept, all further are released before sweeping them.
3974 if (p->LiveBytes() == 0) {
3975 if (unused_page_present) {
3976 if (FLAG_gc_verbose) {
3977 PrintIsolate(isolate(), "sweeping: released page: %p",
3978 static_cast<void*>(p));
3979 }
3980 ArrayBufferTracker::FreeAll(p);
3981 space->ReleasePage(p);
3982 continue;
3983 }
3984 unused_page_present = true;
3985 }
3986
3987 sweeper().AddPage(space->identity(), p);
3988 will_be_swept++;
3989 }
3990
3991 if (FLAG_gc_verbose) {
3992 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
3993 AllocationSpaceName(space->identity()), will_be_swept);
3994 }
3995 }
3996
3997
SweepSpaces()3998 void MarkCompactCollector::SweepSpaces() {
3999 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4000 double start_time = 0.0;
4001 if (FLAG_print_cumulative_gc_stat) {
4002 start_time = heap_->MonotonicallyIncreasingTimeInMs();
4003 }
4004
4005 #ifdef DEBUG
4006 state_ = SWEEP_SPACES;
4007 #endif
4008
4009 {
4010 {
4011 GCTracer::Scope sweep_scope(heap()->tracer(),
4012 GCTracer::Scope::MC_SWEEP_OLD);
4013 StartSweepSpace(heap()->old_space());
4014 }
4015 {
4016 GCTracer::Scope sweep_scope(heap()->tracer(),
4017 GCTracer::Scope::MC_SWEEP_CODE);
4018 StartSweepSpace(heap()->code_space());
4019 }
4020 {
4021 GCTracer::Scope sweep_scope(heap()->tracer(),
4022 GCTracer::Scope::MC_SWEEP_MAP);
4023 StartSweepSpace(heap()->map_space());
4024 }
4025 sweeper().StartSweeping();
4026 }
4027
4028 // Deallocate unmarked large objects.
4029 heap_->lo_space()->FreeUnmarkedObjects();
4030
4031 if (FLAG_print_cumulative_gc_stat) {
4032 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
4033 start_time);
4034 }
4035 }
4036
isolate() const4037 Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
4038
4039
Initialize()4040 void MarkCompactCollector::Initialize() {
4041 MarkCompactMarkingVisitor::Initialize();
4042 IncrementalMarking::Initialize();
4043 }
4044
RecordCodeEntrySlot(HeapObject * host,Address slot,Code * target)4045 void MarkCompactCollector::RecordCodeEntrySlot(HeapObject* host, Address slot,
4046 Code* target) {
4047 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
4048 Page* source_page = Page::FromAddress(reinterpret_cast<Address>(host));
4049 if (target_page->IsEvacuationCandidate() &&
4050 !ShouldSkipEvacuationSlotRecording(host)) {
4051 // TODO(ulan): remove this check after investigating crbug.com/414964.
4052 CHECK(target->IsCode());
4053 RememberedSet<OLD_TO_OLD>::InsertTyped(
4054 source_page, reinterpret_cast<Address>(host), CODE_ENTRY_SLOT, slot);
4055 }
4056 }
4057
4058
RecordCodeTargetPatch(Address pc,Code * target)4059 void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
4060 DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
4061 if (is_compacting()) {
4062 Code* host =
4063 isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
4064 pc);
4065 MarkBit mark_bit = Marking::MarkBitFrom(host);
4066 if (Marking::IsBlack(mark_bit)) {
4067 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
4068 RecordRelocSlot(host, &rinfo, target);
4069 }
4070 }
4071 }
4072
4073 } // namespace internal
4074 } // namespace v8
4075