1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/mark-compact.h"
6
7 #include <unordered_map>
8 #include <unordered_set>
9
10 #include "src/base/logging.h"
11 #include "src/base/optional.h"
12 #include "src/base/utils/random-number-generator.h"
13 #include "src/codegen/compilation-cache.h"
14 #include "src/common/globals.h"
15 #include "src/deoptimizer/deoptimizer.h"
16 #include "src/execution/execution.h"
17 #include "src/execution/frames-inl.h"
18 #include "src/execution/isolate-utils-inl.h"
19 #include "src/execution/isolate-utils.h"
20 #include "src/execution/vm-state-inl.h"
21 #include "src/handles/global-handles.h"
22 #include "src/heap/array-buffer-sweeper.h"
23 #include "src/heap/basic-memory-chunk.h"
24 #include "src/heap/code-object-registry.h"
25 #include "src/heap/concurrent-allocator.h"
26 #include "src/heap/evacuation-allocator-inl.h"
27 #include "src/heap/gc-tracer-inl.h"
28 #include "src/heap/gc-tracer.h"
29 #include "src/heap/heap.h"
30 #include "src/heap/incremental-marking-inl.h"
31 #include "src/heap/index-generator.h"
32 #include "src/heap/invalidated-slots-inl.h"
33 #include "src/heap/large-spaces.h"
34 #include "src/heap/mark-compact-inl.h"
35 #include "src/heap/marking-barrier.h"
36 #include "src/heap/marking-visitor-inl.h"
37 #include "src/heap/marking-visitor.h"
38 #include "src/heap/memory-chunk-layout.h"
39 #include "src/heap/memory-measurement-inl.h"
40 #include "src/heap/memory-measurement.h"
41 #include "src/heap/object-stats.h"
42 #include "src/heap/objects-visiting-inl.h"
43 #include "src/heap/parallel-work-item.h"
44 #include "src/heap/read-only-heap.h"
45 #include "src/heap/read-only-spaces.h"
46 #include "src/heap/safepoint.h"
47 #include "src/heap/slot-set.h"
48 #include "src/heap/spaces-inl.h"
49 #include "src/heap/sweeper.h"
50 #include "src/heap/weak-object-worklists.h"
51 #include "src/ic/stub-cache.h"
52 #include "src/init/v8.h"
53 #include "src/logging/tracing-flags.h"
54 #include "src/objects/embedder-data-array-inl.h"
55 #include "src/objects/foreign.h"
56 #include "src/objects/hash-table-inl.h"
57 #include "src/objects/instance-type.h"
58 #include "src/objects/js-array-buffer-inl.h"
59 #include "src/objects/js-objects-inl.h"
60 #include "src/objects/maybe-object.h"
61 #include "src/objects/objects.h"
62 #include "src/objects/slots-inl.h"
63 #include "src/objects/smi.h"
64 #include "src/objects/transitions-inl.h"
65 #include "src/objects/visitors.h"
66 #include "src/snapshot/shared-heap-serializer.h"
67 #include "src/tasks/cancelable-task.h"
68 #include "src/tracing/tracing-category-observer.h"
69 #include "src/utils/utils-inl.h"
70
71 namespace v8 {
72 namespace internal {
73
74 const char* Marking::kWhiteBitPattern = "00";
75 const char* Marking::kBlackBitPattern = "11";
76 const char* Marking::kGreyBitPattern = "10";
77 const char* Marking::kImpossibleBitPattern = "01";
78
79 // The following has to hold in order for {MarkingState::MarkBitFrom} to not
80 // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
81 STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
82
83 // =============================================================================
84 // Verifiers
85 // =============================================================================
86
87 #ifdef VERIFY_HEAP
88 namespace {
89
90 class MarkingVerifier : public ObjectVisitorWithCageBases, public RootVisitor {
91 public:
92 virtual void Run() = 0;
93
94 protected:
MarkingVerifier(Heap * heap)95 explicit MarkingVerifier(Heap* heap)
96 : ObjectVisitorWithCageBases(heap), heap_(heap) {}
97
98 virtual ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
99 const MemoryChunk* chunk) = 0;
100
101 virtual void VerifyMap(Map map) = 0;
102 virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
103 virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
104 virtual void VerifyCodePointer(CodeObjectSlot slot) = 0;
105 virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
106
107 virtual bool IsMarked(HeapObject object) = 0;
108
109 virtual bool IsBlackOrGrey(HeapObject object) = 0;
110
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)111 void VisitPointers(HeapObject host, ObjectSlot start,
112 ObjectSlot end) override {
113 VerifyPointers(start, end);
114 }
115
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)116 void VisitPointers(HeapObject host, MaybeObjectSlot start,
117 MaybeObjectSlot end) override {
118 VerifyPointers(start, end);
119 }
120
VisitCodePointer(HeapObject host,CodeObjectSlot slot)121 void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
122 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
123 VerifyCodePointer(slot);
124 }
125
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)126 void VisitRootPointers(Root root, const char* description,
127 FullObjectSlot start, FullObjectSlot end) override {
128 VerifyRootPointers(start, end);
129 }
130
VisitMapPointer(HeapObject object)131 void VisitMapPointer(HeapObject object) override {
132 VerifyMap(object.map(cage_base()));
133 }
134
135 void VerifyRoots();
136 void VerifyMarkingOnPage(const Page* page, Address start, Address end);
137 void VerifyMarking(NewSpace* new_space);
138 void VerifyMarking(PagedSpace* paged_space);
139 void VerifyMarking(LargeObjectSpace* lo_space);
140
141 Heap* heap_;
142 };
143
VerifyRoots()144 void MarkingVerifier::VerifyRoots() {
145 heap_->IterateRootsIncludingClients(this,
146 base::EnumSet<SkipRoot>{SkipRoot::kWeak});
147 }
148
VerifyMarkingOnPage(const Page * page,Address start,Address end)149 void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
150 Address end) {
151 Address next_object_must_be_here_or_later = start;
152
153 for (auto object_and_size :
154 LiveObjectRange<kAllLiveObjects>(page, bitmap(page))) {
155 HeapObject object = object_and_size.first;
156 size_t size = object_and_size.second;
157 Address current = object.address();
158 if (current < start) continue;
159 if (current >= end) break;
160 CHECK(IsMarked(object));
161 CHECK(current >= next_object_must_be_here_or_later);
162 object.Iterate(cage_base(), this);
163 next_object_must_be_here_or_later = current + size;
164 // The object is either part of a black area of black allocation or a
165 // regular black object
166 CHECK(bitmap(page)->AllBitsSetInRange(
167 page->AddressToMarkbitIndex(current),
168 page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
169 bitmap(page)->AllBitsClearInRange(
170 page->AddressToMarkbitIndex(current + kTaggedSize * 2),
171 page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
172 current = next_object_must_be_here_or_later;
173 }
174 }
175
VerifyMarking(NewSpace * space)176 void MarkingVerifier::VerifyMarking(NewSpace* space) {
177 if (!space) return;
178 Address end = space->top();
179 // The bottom position is at the start of its page. Allows us to use
180 // page->area_start() as start of range on all pages.
181 CHECK_EQ(space->first_allocatable_address(),
182 space->first_page()->area_start());
183
184 PageRange range(space->first_allocatable_address(), end);
185 for (auto it = range.begin(); it != range.end();) {
186 Page* page = *(it++);
187 Address limit = it != range.end() ? page->area_end() : end;
188 CHECK(limit == end || !page->Contains(end));
189 VerifyMarkingOnPage(page, page->area_start(), limit);
190 }
191 }
192
VerifyMarking(PagedSpace * space)193 void MarkingVerifier::VerifyMarking(PagedSpace* space) {
194 for (Page* p : *space) {
195 VerifyMarkingOnPage(p, p->area_start(), p->area_end());
196 }
197 }
198
VerifyMarking(LargeObjectSpace * lo_space)199 void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
200 if (!lo_space) return;
201 LargeObjectSpaceObjectIterator it(lo_space);
202 for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
203 if (IsBlackOrGrey(obj)) {
204 obj.Iterate(cage_base(), this);
205 }
206 }
207 }
208
209 class FullMarkingVerifier : public MarkingVerifier {
210 public:
FullMarkingVerifier(Heap * heap)211 explicit FullMarkingVerifier(Heap* heap)
212 : MarkingVerifier(heap),
213 marking_state_(
214 heap->mark_compact_collector()->non_atomic_marking_state()) {}
215
Run()216 void Run() override {
217 VerifyRoots();
218 VerifyMarking(heap_->new_space());
219 VerifyMarking(heap_->new_lo_space());
220 VerifyMarking(heap_->old_space());
221 VerifyMarking(heap_->code_space());
222 if (heap_->map_space()) VerifyMarking(heap_->map_space());
223 VerifyMarking(heap_->lo_space());
224 VerifyMarking(heap_->code_lo_space());
225 }
226
227 protected:
bitmap(const MemoryChunk * chunk)228 ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
229 const MemoryChunk* chunk) override {
230 return marking_state_->bitmap(chunk);
231 }
232
IsMarked(HeapObject object)233 bool IsMarked(HeapObject object) override {
234 return marking_state_->IsBlack(object);
235 }
236
IsBlackOrGrey(HeapObject object)237 bool IsBlackOrGrey(HeapObject object) override {
238 return marking_state_->IsBlackOrGrey(object);
239 }
240
VerifyMap(Map map)241 void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
242
VerifyPointers(ObjectSlot start,ObjectSlot end)243 void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
244 VerifyPointersImpl(start, end);
245 }
246
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)247 void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
248 VerifyPointersImpl(start, end);
249 }
250
VerifyCodePointer(CodeObjectSlot slot)251 void VerifyCodePointer(CodeObjectSlot slot) override {
252 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
253 Object maybe_code = slot.load(code_cage_base());
254 HeapObject code;
255 // The slot might contain smi during CodeDataContainer creation, so skip it.
256 if (maybe_code.GetHeapObject(&code)) {
257 VerifyHeapObjectImpl(code);
258 }
259 }
260
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)261 void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
262 VerifyPointersImpl(start, end);
263 }
264
VisitCodeTarget(Code host,RelocInfo * rinfo)265 void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
266 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
267 VerifyHeapObjectImpl(target);
268 }
269
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)270 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
271 DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
272 HeapObject target_object = rinfo->target_object(cage_base());
273 if (!host.IsWeakObject(target_object)) {
274 VerifyHeapObjectImpl(target_object);
275 }
276 }
277
278 private:
VerifyHeapObjectImpl(HeapObject heap_object)279 V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
280 if (heap_->IsShared() !=
281 BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
282 return;
283
284 if (heap_->ShouldBeInSharedOldSpace(heap_object)) {
285 CHECK(heap_->SharedHeapContains(heap_object));
286 }
287
288 CHECK(marking_state_->IsBlackOrGrey(heap_object));
289 }
290
291 template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)292 V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
293 for (TSlot slot = start; slot < end; ++slot) {
294 typename TSlot::TObject object = slot.load(cage_base());
295 HeapObject heap_object;
296 if (object.GetHeapObjectIfStrong(&heap_object)) {
297 VerifyHeapObjectImpl(heap_object);
298 }
299 }
300 }
301
302 MarkCompactCollector::NonAtomicMarkingState* marking_state_;
303 };
304
305 class EvacuationVerifier : public ObjectVisitorWithCageBases,
306 public RootVisitor {
307 public:
308 virtual void Run() = 0;
309
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)310 void VisitPointers(HeapObject host, ObjectSlot start,
311 ObjectSlot end) override {
312 VerifyPointers(start, end);
313 }
314
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)315 void VisitPointers(HeapObject host, MaybeObjectSlot start,
316 MaybeObjectSlot end) override {
317 VerifyPointers(start, end);
318 }
319
VisitCodePointer(HeapObject host,CodeObjectSlot slot)320 void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
321 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
322 VerifyCodePointer(slot);
323 }
324
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)325 void VisitRootPointers(Root root, const char* description,
326 FullObjectSlot start, FullObjectSlot end) override {
327 VerifyRootPointers(start, end);
328 }
329
VisitMapPointer(HeapObject object)330 void VisitMapPointer(HeapObject object) override {
331 VerifyMap(object.map(cage_base()));
332 }
333
334 protected:
EvacuationVerifier(Heap * heap)335 explicit EvacuationVerifier(Heap* heap)
336 : ObjectVisitorWithCageBases(heap), heap_(heap) {}
337
heap()338 inline Heap* heap() { return heap_; }
339
340 virtual void VerifyMap(Map map) = 0;
341 virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
342 virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
343 virtual void VerifyCodePointer(CodeObjectSlot slot) = 0;
344 virtual void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) = 0;
345
346 void VerifyRoots();
347 void VerifyEvacuationOnPage(Address start, Address end);
348 void VerifyEvacuation(NewSpace* new_space);
349 void VerifyEvacuation(PagedSpace* paged_space);
350
351 Heap* heap_;
352 };
353
VerifyRoots()354 void EvacuationVerifier::VerifyRoots() {
355 heap_->IterateRootsIncludingClients(this,
356 base::EnumSet<SkipRoot>{SkipRoot::kWeak});
357 }
358
VerifyEvacuationOnPage(Address start,Address end)359 void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
360 Address current = start;
361 while (current < end) {
362 HeapObject object = HeapObject::FromAddress(current);
363 if (!object.IsFreeSpaceOrFiller(cage_base())) {
364 object.Iterate(cage_base(), this);
365 }
366 current += object.Size(cage_base());
367 }
368 }
369
VerifyEvacuation(NewSpace * space)370 void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
371 if (!space) return;
372 PageRange range(space->first_allocatable_address(), space->top());
373 for (auto it = range.begin(); it != range.end();) {
374 Page* page = *(it++);
375 Address current = page->area_start();
376 Address limit = it != range.end() ? page->area_end() : space->top();
377 CHECK(limit == space->top() || !page->Contains(space->top()));
378 VerifyEvacuationOnPage(current, limit);
379 }
380 }
381
VerifyEvacuation(PagedSpace * space)382 void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
383 for (Page* p : *space) {
384 if (p->IsEvacuationCandidate()) continue;
385 if (p->Contains(space->top())) {
386 CodePageMemoryModificationScope memory_modification_scope(p);
387 heap_->CreateFillerObjectAt(
388 space->top(), static_cast<int>(space->limit() - space->top()),
389 ClearRecordedSlots::kNo);
390 }
391 VerifyEvacuationOnPage(p->area_start(), p->area_end());
392 }
393 }
394
395 class FullEvacuationVerifier : public EvacuationVerifier {
396 public:
FullEvacuationVerifier(Heap * heap)397 explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
398
Run()399 void Run() override {
400 DCHECK(!heap_->mark_compact_collector()->sweeping_in_progress());
401 VerifyRoots();
402 VerifyEvacuation(heap_->new_space());
403 VerifyEvacuation(heap_->old_space());
404 VerifyEvacuation(heap_->code_space());
405 if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
406 }
407
408 protected:
VerifyHeapObjectImpl(HeapObject heap_object)409 V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
410 if (heap_->IsShared() !=
411 BasicMemoryChunk::FromHeapObject(heap_object)->InSharedHeap())
412 return;
413
414 CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
415 Heap::InToPage(heap_object));
416 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(heap_object));
417 }
418
419 template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)420 void VerifyPointersImpl(TSlot start, TSlot end) {
421 for (TSlot current = start; current < end; ++current) {
422 typename TSlot::TObject object = current.load(cage_base());
423 HeapObject heap_object;
424 if (object.GetHeapObjectIfStrong(&heap_object)) {
425 VerifyHeapObjectImpl(heap_object);
426 }
427 }
428 }
VerifyMap(Map map)429 void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
VerifyPointers(ObjectSlot start,ObjectSlot end)430 void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
431 VerifyPointersImpl(start, end);
432 }
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)433 void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
434 VerifyPointersImpl(start, end);
435 }
VerifyCodePointer(CodeObjectSlot slot)436 void VerifyCodePointer(CodeObjectSlot slot) override {
437 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
438 Object maybe_code = slot.load(code_cage_base());
439 HeapObject code;
440 // The slot might contain smi during CodeDataContainer creation, so skip it.
441 if (maybe_code.GetHeapObject(&code)) {
442 VerifyHeapObjectImpl(code);
443 }
444 }
VisitCodeTarget(Code host,RelocInfo * rinfo)445 void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
446 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
447 VerifyHeapObjectImpl(target);
448 }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)449 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
450 VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
451 }
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)452 void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
453 VerifyPointersImpl(start, end);
454 }
455 };
456
457 } // namespace
458 #endif // VERIFY_HEAP
459
460 // =============================================================================
461 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
462 // =============================================================================
463
464 namespace {
465
NumberOfAvailableCores()466 int NumberOfAvailableCores() {
467 static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
468 // This number of cores should be greater than zero and never change.
469 DCHECK_GE(num_cores, 1);
470 DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
471 return num_cores;
472 }
473
474 } // namespace
475
NumberOfParallelCompactionTasks()476 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks() {
477 int tasks = FLAG_parallel_compaction ? NumberOfAvailableCores() : 1;
478 if (!heap_->CanPromoteYoungAndExpandOldGeneration(
479 static_cast<size_t>(tasks * Page::kPageSize))) {
480 // Optimize for memory usage near the heap limit.
481 tasks = 1;
482 }
483 return tasks;
484 }
485
MarkCompactCollector(Heap * heap)486 MarkCompactCollector::MarkCompactCollector(Heap* heap)
487 : MarkCompactCollectorBase(heap),
488 #ifdef DEBUG
489 state_(IDLE),
490 #endif
491 is_shared_heap_(heap->IsShared()),
492 marking_state_(heap->isolate()),
493 non_atomic_marking_state_(heap->isolate()),
494 sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
495 }
496
~MarkCompactCollector()497 MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
498
SetUp()499 void MarkCompactCollector::SetUp() {
500 DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
501 DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
502 DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
503 DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
504 }
505
TearDown()506 void MarkCompactCollector::TearDown() {
507 AbortCompaction();
508 if (heap()->incremental_marking()->IsMarking()) {
509 local_marking_worklists()->Publish();
510 heap()->marking_barrier()->Publish();
511 // Marking barriers of LocalHeaps will be published in their destructors.
512 marking_worklists()->Clear();
513 local_weak_objects()->Publish();
514 weak_objects()->Clear();
515 }
516 sweeper()->TearDown();
517 }
518
519 // static
IsMapOrForwardedMap(Map map)520 bool MarkCompactCollector::IsMapOrForwardedMap(Map map) {
521 MapWord map_word = map.map_word(kRelaxedLoad);
522
523 if (map_word.IsForwardingAddress()) {
524 return map_word.ToForwardingAddress().IsMap();
525 } else {
526 return map_word.ToMap().IsMap();
527 }
528 }
529
AddEvacuationCandidate(Page * p)530 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
531 DCHECK(!p->NeverEvacuate());
532
533 if (FLAG_trace_evacuation_candidates) {
534 PrintIsolate(
535 isolate(),
536 "Evacuation candidate: Free bytes: %6zu. Free Lists length: %4d.\n",
537 p->area_size() - p->allocated_bytes(), p->FreeListsLength());
538 }
539
540 p->MarkEvacuationCandidate();
541 evacuation_candidates_.push_back(p);
542 }
543
TraceFragmentation(PagedSpace * space)544 static void TraceFragmentation(PagedSpace* space) {
545 int number_of_pages = space->CountTotalPages();
546 intptr_t reserved = (number_of_pages * space->AreaSize());
547 intptr_t free = reserved - space->SizeOfObjects();
548 PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
549 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
550 }
551
StartCompaction(StartCompactionMode mode)552 bool MarkCompactCollector::StartCompaction(StartCompactionMode mode) {
553 DCHECK(!compacting_);
554 DCHECK(evacuation_candidates_.empty());
555
556 // Bailouts for completely disabled compaction.
557 if (!FLAG_compact ||
558 (mode == StartCompactionMode::kAtomic && !heap()->IsGCWithoutStack() &&
559 !FLAG_compact_with_stack) ||
560 (FLAG_gc_experiment_less_compaction && !heap_->ShouldReduceMemory())) {
561 return false;
562 }
563
564 CollectEvacuationCandidates(heap()->old_space());
565
566 if (heap()->map_space() && FLAG_compact_maps) {
567 CollectEvacuationCandidates(heap()->map_space());
568 }
569
570 if (FLAG_compact_code_space &&
571 (heap()->IsGCWithoutStack() || FLAG_compact_code_space_with_stack)) {
572 CollectEvacuationCandidates(heap()->code_space());
573 } else if (FLAG_trace_fragmentation) {
574 TraceFragmentation(heap()->code_space());
575 }
576
577 if (FLAG_trace_fragmentation && heap()->map_space()) {
578 TraceFragmentation(heap()->map_space());
579 }
580
581 compacting_ = !evacuation_candidates_.empty();
582 return compacting_;
583 }
584
StartMarking()585 void MarkCompactCollector::StartMarking() {
586 std::vector<Address> contexts =
587 heap()->memory_measurement()->StartProcessing();
588 if (FLAG_stress_per_context_marking_worklist) {
589 contexts.clear();
590 HandleScope handle_scope(heap()->isolate());
591 for (auto context : heap()->FindAllNativeContexts()) {
592 contexts.push_back(context->ptr());
593 }
594 }
595 code_flush_mode_ = Heap::GetCodeFlushMode(isolate());
596 marking_worklists()->CreateContextWorklists(contexts);
597 auto* cpp_heap = CppHeap::From(heap_->cpp_heap());
598 local_marking_worklists_ = std::make_unique<MarkingWorklists::Local>(
599 marking_worklists(),
600 cpp_heap ? cpp_heap->CreateCppMarkingStateForMutatorThread()
601 : MarkingWorklists::Local::kNoCppMarkingState);
602 local_weak_objects_ = std::make_unique<WeakObjects::Local>(weak_objects());
603 marking_visitor_ = std::make_unique<MarkingVisitor>(
604 marking_state(), local_marking_worklists(), local_weak_objects_.get(),
605 heap_, epoch(), code_flush_mode(),
606 heap_->local_embedder_heap_tracer()->InUse(),
607 heap_->ShouldCurrentGCKeepAgesUnchanged());
608 // Marking bits are cleared by the sweeper.
609 #ifdef VERIFY_HEAP
610 if (FLAG_verify_heap) {
611 VerifyMarkbitsAreClean();
612 }
613 #endif // VERIFY_HEAP
614 }
615
CollectGarbage()616 void MarkCompactCollector::CollectGarbage() {
617 // Make sure that Prepare() has been called. The individual steps below will
618 // update the state as they proceed.
619 DCHECK(state_ == PREPARE_GC);
620
621 MarkLiveObjects();
622 ClearNonLiveReferences();
623 VerifyMarking();
624 heap()->memory_measurement()->FinishProcessing(native_context_stats_);
625 RecordObjectStats();
626
627 StartSweepSpaces();
628 Evacuate();
629 Finish();
630 }
631
632 #ifdef VERIFY_HEAP
VerifyMarkbitsAreDirty(ReadOnlySpace * space)633 void MarkCompactCollector::VerifyMarkbitsAreDirty(ReadOnlySpace* space) {
634 ReadOnlyHeapObjectIterator iterator(space);
635 for (HeapObject object = iterator.Next(); !object.is_null();
636 object = iterator.Next()) {
637 CHECK(non_atomic_marking_state()->IsBlack(object));
638 }
639 }
640
VerifyMarkbitsAreClean(PagedSpace * space)641 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
642 for (Page* p : *space) {
643 CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
644 CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
645 }
646 }
647
VerifyMarkbitsAreClean(NewSpace * space)648 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
649 if (!space) return;
650 for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
651 CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
652 CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
653 }
654 }
655
VerifyMarkbitsAreClean(LargeObjectSpace * space)656 void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
657 if (!space) return;
658 LargeObjectSpaceObjectIterator it(space);
659 for (HeapObject obj = it.Next(); !obj.is_null(); obj = it.Next()) {
660 CHECK(non_atomic_marking_state()->IsWhite(obj));
661 CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
662 MemoryChunk::FromHeapObject(obj)));
663 }
664 }
665
VerifyMarkbitsAreClean()666 void MarkCompactCollector::VerifyMarkbitsAreClean() {
667 VerifyMarkbitsAreClean(heap_->old_space());
668 VerifyMarkbitsAreClean(heap_->code_space());
669 if (heap_->map_space()) {
670 VerifyMarkbitsAreClean(heap_->map_space());
671 }
672 VerifyMarkbitsAreClean(heap_->new_space());
673 // Read-only space should always be black since we never collect any objects
674 // in it or linked from it.
675 VerifyMarkbitsAreDirty(heap_->read_only_space());
676 VerifyMarkbitsAreClean(heap_->lo_space());
677 VerifyMarkbitsAreClean(heap_->code_lo_space());
678 VerifyMarkbitsAreClean(heap_->new_lo_space());
679 }
680
681 #endif // VERIFY_HEAP
682
FinishSweepingIfOutOfWork()683 void MarkCompactCollector::FinishSweepingIfOutOfWork() {
684 if (sweeper()->sweeping_in_progress() && FLAG_concurrent_sweeping &&
685 !sweeper()->AreSweeperTasksRunning()) {
686 // At this point we know that all concurrent sweeping tasks have run
687 // out of work and quit: all pages are swept. The main thread still needs
688 // to complete sweeping though.
689 EnsureSweepingCompleted(SweepingForcedFinalizationMode::kV8Only);
690 }
691 if (heap()->cpp_heap()) {
692 // Ensure that sweeping is also completed for the C++ managed heap, if one
693 // exists and it's out of work.
694 CppHeap::From(heap()->cpp_heap())->FinishSweepingIfOutOfWork();
695 }
696 }
697
EnsureSweepingCompleted(SweepingForcedFinalizationMode mode)698 void MarkCompactCollector::EnsureSweepingCompleted(
699 SweepingForcedFinalizationMode mode) {
700 if (sweeper()->sweeping_in_progress()) {
701 TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
702 ThreadKind::kMain);
703
704 sweeper()->EnsureCompleted();
705 heap()->old_space()->RefillFreeList();
706 heap()->code_space()->RefillFreeList();
707 if (heap()->map_space()) {
708 heap()->map_space()->RefillFreeList();
709 heap()->map_space()->SortFreeList();
710 }
711
712 heap()->tracer()->NotifySweepingCompleted();
713
714 #ifdef VERIFY_HEAP
715 if (FLAG_verify_heap && !evacuation()) {
716 FullEvacuationVerifier verifier(heap());
717 verifier.Run();
718 }
719 #endif
720 }
721
722 if (mode == SweepingForcedFinalizationMode::kUnifiedHeap &&
723 heap()->cpp_heap()) {
724 // Ensure that sweeping is also completed for the C++ managed heap, if one
725 // exists.
726 CppHeap::From(heap()->cpp_heap())->FinishSweepingIfRunning();
727 DCHECK(
728 !CppHeap::From(heap()->cpp_heap())->sweeper().IsSweepingInProgress());
729 }
730
731 DCHECK_IMPLIES(mode == SweepingForcedFinalizationMode::kUnifiedHeap ||
732 !heap()->cpp_heap(),
733 !heap()->tracer()->IsSweepingInProgress());
734 }
735
EnsurePageIsSwept(Page * page)736 void MarkCompactCollector::EnsurePageIsSwept(Page* page) {
737 sweeper()->EnsurePageIsSwept(page);
738 }
739
DrainSweepingWorklistForSpace(AllocationSpace space)740 void MarkCompactCollector::DrainSweepingWorklistForSpace(
741 AllocationSpace space) {
742 if (!sweeper()->sweeping_in_progress()) return;
743 sweeper()->DrainSweepingWorklistForSpace(space);
744 }
745
ComputeEvacuationHeuristics(size_t area_size,int * target_fragmentation_percent,size_t * max_evacuated_bytes)746 void MarkCompactCollector::ComputeEvacuationHeuristics(
747 size_t area_size, int* target_fragmentation_percent,
748 size_t* max_evacuated_bytes) {
749 // For memory reducing and optimize for memory mode we directly define both
750 // constants.
751 const int kTargetFragmentationPercentForReduceMemory = 20;
752 const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
753 const int kTargetFragmentationPercentForOptimizeMemory = 20;
754 const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
755
756 // For regular mode (which is latency critical) we define less aggressive
757 // defaults to start and switch to a trace-based (using compaction speed)
758 // approach as soon as we have enough samples.
759 const int kTargetFragmentationPercent = 70;
760 const size_t kMaxEvacuatedBytes = 4 * MB;
761 // Time to take for a single area (=payload of page). Used as soon as there
762 // exist enough compaction speed samples.
763 const float kTargetMsPerArea = .5;
764
765 if (heap()->ShouldReduceMemory()) {
766 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
767 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
768 } else if (heap()->ShouldOptimizeForMemoryUsage()) {
769 *target_fragmentation_percent =
770 kTargetFragmentationPercentForOptimizeMemory;
771 *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
772 } else {
773 const double estimated_compaction_speed =
774 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
775 if (estimated_compaction_speed != 0) {
776 // Estimate the target fragmentation based on traced compaction speed
777 // and a goal for a single page.
778 const double estimated_ms_per_area =
779 1 + area_size / estimated_compaction_speed;
780 *target_fragmentation_percent = static_cast<int>(
781 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
782 if (*target_fragmentation_percent <
783 kTargetFragmentationPercentForReduceMemory) {
784 *target_fragmentation_percent =
785 kTargetFragmentationPercentForReduceMemory;
786 }
787 } else {
788 *target_fragmentation_percent = kTargetFragmentationPercent;
789 }
790 *max_evacuated_bytes = kMaxEvacuatedBytes;
791 }
792 }
793
CollectEvacuationCandidates(PagedSpace * space)794 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
795 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE ||
796 space->identity() == MAP_SPACE);
797
798 int number_of_pages = space->CountTotalPages();
799 size_t area_size = space->AreaSize();
800
801 const bool in_standard_path =
802 !(FLAG_manual_evacuation_candidates_selection ||
803 FLAG_stress_compaction_random || FLAG_stress_compaction ||
804 FLAG_compact_on_every_full_gc);
805 // Those variables will only be initialized if |in_standard_path|, and are not
806 // used otherwise.
807 size_t max_evacuated_bytes;
808 int target_fragmentation_percent;
809 size_t free_bytes_threshold;
810 if (in_standard_path) {
811 // We use two conditions to decide whether a page qualifies as an evacuation
812 // candidate, or not:
813 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
814 // between live bytes and capacity of this page (= area).
815 // * Evacuation quota: A global quota determining how much bytes should be
816 // compacted.
817 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
818 &max_evacuated_bytes);
819 free_bytes_threshold = target_fragmentation_percent * (area_size / 100);
820 }
821
822 // Pairs of (live_bytes_in_page, page).
823 using LiveBytesPagePair = std::pair<size_t, Page*>;
824 std::vector<LiveBytesPagePair> pages;
825 pages.reserve(number_of_pages);
826
827 DCHECK(!sweeping_in_progress());
828 Page* owner_of_linear_allocation_area =
829 space->top() == space->limit()
830 ? nullptr
831 : Page::FromAllocationAreaAddress(space->top());
832 for (Page* p : *space) {
833 if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
834 !p->CanAllocate())
835 continue;
836
837 if (p->IsPinned()) {
838 DCHECK(
839 !p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING));
840 continue;
841 }
842
843 // Invariant: Evacuation candidates are just created when marking is
844 // started. This means that sweeping has finished. Furthermore, at the end
845 // of a GC all evacuation candidates are cleared and their slot buffers are
846 // released.
847 CHECK(!p->IsEvacuationCandidate());
848 CHECK_NULL(p->slot_set<OLD_TO_OLD>());
849 CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
850 CHECK(p->SweepingDone());
851 DCHECK(p->area_size() == area_size);
852 if (in_standard_path) {
853 // Only the pages with at more than |free_bytes_threshold| free bytes are
854 // considered for evacuation.
855 if (area_size - p->allocated_bytes() >= free_bytes_threshold) {
856 pages.push_back(std::make_pair(p->allocated_bytes(), p));
857 }
858 } else {
859 pages.push_back(std::make_pair(p->allocated_bytes(), p));
860 }
861
862 // Unpin pages for the next GC
863 if (p->IsFlagSet(MemoryChunk::PINNED)) {
864 p->ClearFlag(MemoryChunk::PINNED);
865 }
866 }
867
868 int candidate_count = 0;
869 size_t total_live_bytes = 0;
870
871 const bool reduce_memory = heap()->ShouldReduceMemory();
872 if (FLAG_manual_evacuation_candidates_selection) {
873 for (size_t i = 0; i < pages.size(); i++) {
874 Page* p = pages[i].second;
875 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
876 candidate_count++;
877 total_live_bytes += pages[i].first;
878 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
879 AddEvacuationCandidate(p);
880 }
881 }
882 } else if (FLAG_stress_compaction_random) {
883 double fraction = isolate()->fuzzer_rng()->NextDouble();
884 size_t pages_to_mark_count =
885 static_cast<size_t>(fraction * (pages.size() + 1));
886 for (uint64_t i : isolate()->fuzzer_rng()->NextSample(
887 pages.size(), pages_to_mark_count)) {
888 candidate_count++;
889 total_live_bytes += pages[i].first;
890 AddEvacuationCandidate(pages[i].second);
891 }
892 } else if (FLAG_stress_compaction) {
893 for (size_t i = 0; i < pages.size(); i++) {
894 Page* p = pages[i].second;
895 if (i % 2 == 0) {
896 candidate_count++;
897 total_live_bytes += pages[i].first;
898 AddEvacuationCandidate(p);
899 }
900 }
901 } else {
902 // The following approach determines the pages that should be evacuated.
903 //
904 // Sort pages from the most free to the least free, then select
905 // the first n pages for evacuation such that:
906 // - the total size of evacuated objects does not exceed the specified
907 // limit.
908 // - fragmentation of (n+1)-th page does not exceed the specified limit.
909 std::sort(pages.begin(), pages.end(),
910 [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
911 return a.first < b.first;
912 });
913 for (size_t i = 0; i < pages.size(); i++) {
914 size_t live_bytes = pages[i].first;
915 DCHECK_GE(area_size, live_bytes);
916 if (FLAG_compact_on_every_full_gc ||
917 ((total_live_bytes + live_bytes) <= max_evacuated_bytes)) {
918 candidate_count++;
919 total_live_bytes += live_bytes;
920 }
921 if (FLAG_trace_fragmentation_verbose) {
922 PrintIsolate(isolate(),
923 "compaction-selection-page: space=%s free_bytes_page=%zu "
924 "fragmentation_limit_kb=%zu "
925 "fragmentation_limit_percent=%d sum_compaction_kb=%zu "
926 "compaction_limit_kb=%zu\n",
927 space->name(), (area_size - live_bytes) / KB,
928 free_bytes_threshold / KB, target_fragmentation_percent,
929 total_live_bytes / KB, max_evacuated_bytes / KB);
930 }
931 }
932 // How many pages we will allocated for the evacuated objects
933 // in the worst case: ceil(total_live_bytes / area_size)
934 int estimated_new_pages =
935 static_cast<int>((total_live_bytes + area_size - 1) / area_size);
936 DCHECK_LE(estimated_new_pages, candidate_count);
937 int estimated_released_pages = candidate_count - estimated_new_pages;
938 // Avoid (compact -> expand) cycles.
939 if ((estimated_released_pages == 0) && !FLAG_compact_on_every_full_gc) {
940 candidate_count = 0;
941 }
942 for (int i = 0; i < candidate_count; i++) {
943 AddEvacuationCandidate(pages[i].second);
944 }
945 }
946
947 if (FLAG_trace_fragmentation) {
948 PrintIsolate(isolate(),
949 "compaction-selection: space=%s reduce_memory=%d pages=%d "
950 "total_live_bytes=%zu\n",
951 space->name(), reduce_memory, candidate_count,
952 total_live_bytes / KB);
953 }
954 }
955
AbortCompaction()956 void MarkCompactCollector::AbortCompaction() {
957 if (compacting_) {
958 RememberedSet<OLD_TO_OLD>::ClearAll(heap());
959 if (V8_EXTERNAL_CODE_SPACE_BOOL) {
960 RememberedSet<OLD_TO_CODE>::ClearAll(heap());
961 }
962 for (Page* p : evacuation_candidates_) {
963 p->ClearEvacuationCandidate();
964 }
965 compacting_ = false;
966 evacuation_candidates_.clear();
967 }
968 DCHECK(evacuation_candidates_.empty());
969 }
970
Prepare()971 void MarkCompactCollector::Prepare() {
972 #ifdef DEBUG
973 DCHECK(state_ == IDLE);
974 state_ = PREPARE_GC;
975 #endif
976
977 DCHECK(!sweeping_in_progress());
978
979 if (!heap()->incremental_marking()->IsMarking()) {
980 const auto embedder_flags = heap_->flags_for_embedder_tracer();
981 {
982 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
983 // PrepareForTrace should be called before visitor initialization in
984 // StartMarking.
985 heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
986 }
987 StartCompaction(StartCompactionMode::kAtomic);
988 StartMarking();
989 {
990 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
991 // TracePrologue immediately starts marking which requires V8 worklists to
992 // be set up.
993 heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
994 }
995 }
996
997 heap_->FreeLinearAllocationAreas();
998
999 PagedSpaceIterator spaces(heap());
1000 for (PagedSpace* space = spaces.Next(); space != nullptr;
1001 space = spaces.Next()) {
1002 space->PrepareForMarkCompact();
1003 }
1004
1005 // All objects are guaranteed to be initialized in atomic pause
1006 if (heap()->new_lo_space()) {
1007 heap()->new_lo_space()->ResetPendingObject();
1008 }
1009
1010 if (heap()->new_space()) {
1011 DCHECK_EQ(heap()->new_space()->top(),
1012 heap()->new_space()->original_top_acquire());
1013 }
1014 }
1015
FinishConcurrentMarking()1016 void MarkCompactCollector::FinishConcurrentMarking() {
1017 // FinishConcurrentMarking is called for both, concurrent and parallel,
1018 // marking. It is safe to call this function when tasks are already finished.
1019 if (FLAG_parallel_marking || FLAG_concurrent_marking) {
1020 heap()->concurrent_marking()->Join();
1021 heap()->concurrent_marking()->FlushMemoryChunkData(
1022 non_atomic_marking_state());
1023 heap()->concurrent_marking()->FlushNativeContexts(&native_context_stats_);
1024 }
1025 if (auto* cpp_heap = CppHeap::From(heap_->cpp_heap())) {
1026 cpp_heap->FinishConcurrentMarkingIfNeeded();
1027 }
1028 }
1029
VerifyMarking()1030 void MarkCompactCollector::VerifyMarking() {
1031 CHECK(local_marking_worklists()->IsEmpty());
1032 DCHECK(heap_->incremental_marking()->IsStopped());
1033 #ifdef VERIFY_HEAP
1034 if (FLAG_verify_heap) {
1035 FullMarkingVerifier verifier(heap());
1036 verifier.Run();
1037 }
1038 #endif
1039 #ifdef VERIFY_HEAP
1040 if (FLAG_verify_heap) {
1041 heap()->old_space()->VerifyLiveBytes();
1042 if (heap()->map_space()) heap()->map_space()->VerifyLiveBytes();
1043 heap()->code_space()->VerifyLiveBytes();
1044 }
1045 #endif
1046 }
1047
Finish()1048 void MarkCompactCollector::Finish() {
1049 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
1050
1051 SweepArrayBufferExtensions();
1052
1053 #ifdef DEBUG
1054 heap()->VerifyCountersBeforeConcurrentSweeping();
1055 #endif
1056
1057 marking_visitor_.reset();
1058 local_marking_worklists_.reset();
1059 marking_worklists_.ReleaseContextWorklists();
1060 native_context_stats_.Clear();
1061
1062 CHECK(weak_objects_.current_ephemerons.IsEmpty());
1063 CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1064 local_weak_objects_->next_ephemerons_local.Publish();
1065 local_weak_objects_.reset();
1066 weak_objects_.next_ephemerons.Clear();
1067
1068 sweeper()->StartSweeperTasks();
1069 sweeper()->StartIterabilityTasks();
1070
1071 // Clear the marking state of live large objects.
1072 heap_->lo_space()->ClearMarkingStateOfLiveObjects();
1073 heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
1074
1075 #ifdef DEBUG
1076 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
1077 state_ = IDLE;
1078 #endif
1079 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
1080
1081 // The stub caches are not traversed during GC; clear them to force
1082 // their lazy re-initialization. This must be done after the
1083 // GC, because it relies on the new address of certain old space
1084 // objects (empty string, illegal builtin).
1085 isolate()->load_stub_cache()->Clear();
1086 isolate()->store_stub_cache()->Clear();
1087
1088 if (have_code_to_deoptimize_) {
1089 // Some code objects were marked for deoptimization during the GC.
1090 Deoptimizer::DeoptimizeMarkedCode(isolate());
1091 have_code_to_deoptimize_ = false;
1092 }
1093 }
1094
SweepArrayBufferExtensions()1095 void MarkCompactCollector::SweepArrayBufferExtensions() {
1096 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH_SWEEP_ARRAY_BUFFERS);
1097 heap_->array_buffer_sweeper()->RequestSweep(
1098 ArrayBufferSweeper::SweepingType::kFull);
1099 }
1100
1101 class MarkCompactCollector::RootMarkingVisitor final : public RootVisitor {
1102 public:
RootMarkingVisitor(MarkCompactCollector * collector)1103 explicit RootMarkingVisitor(MarkCompactCollector* collector)
1104 : collector_(collector), is_shared_heap_(collector->is_shared_heap()) {}
1105
VisitRootPointer(Root root,const char * description,FullObjectSlot p)1106 void VisitRootPointer(Root root, const char* description,
1107 FullObjectSlot p) final {
1108 DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
1109 MarkObjectByPointer(root, p);
1110 }
1111
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)1112 void VisitRootPointers(Root root, const char* description,
1113 FullObjectSlot start, FullObjectSlot end) final {
1114 for (FullObjectSlot p = start; p < end; ++p) {
1115 MarkObjectByPointer(root, p);
1116 }
1117 }
1118
VisitRunningCode(FullObjectSlot p)1119 void VisitRunningCode(FullObjectSlot p) final {
1120 Code code = Code::cast(*p);
1121
1122 // If Code is currently executing, then we must not remove its
1123 // deoptimization literals, which it might need in order to successfully
1124 // deoptimize.
1125 //
1126 // Must match behavior in RootsReferencesExtractor::VisitRunningCode, so
1127 // that heap snapshots accurately describe the roots.
1128 if (code.kind() != CodeKind::BASELINE) {
1129 DeoptimizationData deopt_data =
1130 DeoptimizationData::cast(code.deoptimization_data());
1131 if (deopt_data.length() > 0) {
1132 DeoptimizationLiteralArray literals = deopt_data.LiteralArray();
1133 int literals_length = literals.length();
1134 for (int i = 0; i < literals_length; ++i) {
1135 MaybeObject maybe_literal = literals.Get(i);
1136 HeapObject heap_literal;
1137 if (maybe_literal.GetHeapObject(&heap_literal)) {
1138 MarkObjectByPointer(Root::kStackRoots,
1139 FullObjectSlot(&heap_literal));
1140 }
1141 }
1142 }
1143 }
1144
1145 // And then mark the Code itself.
1146 VisitRootPointer(Root::kStackRoots, nullptr, p);
1147 }
1148
1149 private:
MarkObjectByPointer(Root root,FullObjectSlot p)1150 V8_INLINE void MarkObjectByPointer(Root root, FullObjectSlot p) {
1151 Object object = *p;
1152 if (!object.IsHeapObject()) return;
1153 HeapObject heap_object = HeapObject::cast(object);
1154 BasicMemoryChunk* target_page =
1155 BasicMemoryChunk::FromHeapObject(heap_object);
1156 if (is_shared_heap_ != target_page->InSharedHeap()) return;
1157 collector_->MarkRootObject(root, heap_object);
1158 }
1159
1160 MarkCompactCollector* const collector_;
1161 const bool is_shared_heap_;
1162 };
1163
1164 // This visitor is used to visit the body of special objects held alive by
1165 // other roots.
1166 //
1167 // It is currently used for
1168 // - Code held alive by the top optimized frame. This code cannot be deoptimized
1169 // and thus have to be kept alive in an isolate way, i.e., it should not keep
1170 // alive other code objects reachable through the weak list but they should
1171 // keep alive its embedded pointers (which would otherwise be dropped).
1172 // - Prefix of the string table.
1173 class MarkCompactCollector::CustomRootBodyMarkingVisitor final
1174 : public ObjectVisitorWithCageBases {
1175 public:
CustomRootBodyMarkingVisitor(MarkCompactCollector * collector)1176 explicit CustomRootBodyMarkingVisitor(MarkCompactCollector* collector)
1177 : ObjectVisitorWithCageBases(collector->isolate()),
1178 collector_(collector) {}
1179
VisitPointer(HeapObject host,ObjectSlot p)1180 void VisitPointer(HeapObject host, ObjectSlot p) final {
1181 MarkObject(host, p.load(cage_base()));
1182 }
1183
VisitMapPointer(HeapObject host)1184 void VisitMapPointer(HeapObject host) final {
1185 MarkObject(host, host.map(cage_base()));
1186 }
1187
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)1188 void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
1189 for (ObjectSlot p = start; p < end; ++p) {
1190 // The map slot should be handled in VisitMapPointer.
1191 DCHECK_NE(host.map_slot(), p);
1192 DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
1193 MarkObject(host, p.load(cage_base()));
1194 }
1195 }
1196
VisitCodePointer(HeapObject host,CodeObjectSlot slot)1197 void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
1198 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
1199 MarkObject(host, slot.load(code_cage_base()));
1200 }
1201
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)1202 void VisitPointers(HeapObject host, MaybeObjectSlot start,
1203 MaybeObjectSlot end) final {
1204 // At the moment, custom roots cannot contain weak pointers.
1205 UNREACHABLE();
1206 }
1207
VisitCodeTarget(Code host,RelocInfo * rinfo)1208 void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
1209 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1210 MarkObject(host, target);
1211 }
1212
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)1213 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
1214 MarkObject(host, rinfo->target_object(cage_base()));
1215 }
1216
1217 private:
MarkObject(HeapObject host,Object object)1218 V8_INLINE void MarkObject(HeapObject host, Object object) {
1219 if (!object.IsHeapObject()) return;
1220 HeapObject heap_object = HeapObject::cast(object);
1221 // We use this visitor both in client and shared GCs. The client GC should
1222 // not mark objects in the shared heap. In shared GCs we are marking each
1223 // client's top stack frame, so it is actually legal to encounter references
1224 // into the client heap here in a shared GC. We need to bail out in these
1225 // cases as well.
1226 if (collector_->is_shared_heap() != heap_object.InSharedHeap()) return;
1227 collector_->MarkObject(host, heap_object);
1228 }
1229
1230 MarkCompactCollector* const collector_;
1231 };
1232
1233 class MarkCompactCollector::SharedHeapObjectVisitor final
1234 : public ObjectVisitorWithCageBases {
1235 public:
SharedHeapObjectVisitor(MarkCompactCollector * collector)1236 explicit SharedHeapObjectVisitor(MarkCompactCollector* collector)
1237 : ObjectVisitorWithCageBases(collector->isolate()),
1238 collector_(collector) {}
1239
VisitPointer(HeapObject host,ObjectSlot p)1240 void VisitPointer(HeapObject host, ObjectSlot p) final {
1241 MarkObject(host, p, p.load(cage_base()));
1242 }
1243
VisitPointer(HeapObject host,MaybeObjectSlot p)1244 void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
1245 MaybeObject object = p.load(cage_base());
1246 HeapObject heap_object;
1247 if (object.GetHeapObject(&heap_object))
1248 MarkObject(host, ObjectSlot(p), heap_object);
1249 }
1250
VisitMapPointer(HeapObject host)1251 void VisitMapPointer(HeapObject host) final {
1252 MarkObject(host, host.map_slot(), host.map(cage_base()));
1253 }
1254
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)1255 void VisitPointers(HeapObject host, ObjectSlot start, ObjectSlot end) final {
1256 for (ObjectSlot p = start; p < end; ++p) {
1257 // The map slot should be handled in VisitMapPointer.
1258 DCHECK_NE(host.map_slot(), p);
1259 DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
1260 MarkObject(host, p, p.load(cage_base()));
1261 }
1262 }
1263
VisitCodePointer(HeapObject host,CodeObjectSlot slot)1264 void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
1265 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
1266 MarkObject(host, ObjectSlot(slot.address()), slot.load(code_cage_base()));
1267 }
1268
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)1269 void VisitPointers(HeapObject host, MaybeObjectSlot start,
1270 MaybeObjectSlot end) final {
1271 for (MaybeObjectSlot p = start; p < end; ++p) {
1272 // The map slot should be handled in VisitMapPointer.
1273 DCHECK_NE(host.map_slot(), ObjectSlot(p));
1274 VisitPointer(host, p);
1275 }
1276 }
1277
VisitCodeTarget(Code host,RelocInfo * rinfo)1278 void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
1279 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1280 RecordRelocSlot(host, rinfo, target);
1281 }
1282
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)1283 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
1284 HeapObject target = rinfo->target_object(cage_base());
1285 RecordRelocSlot(host, rinfo, target);
1286 }
1287
1288 private:
MarkObject(HeapObject host,ObjectSlot slot,Object object)1289 V8_INLINE void MarkObject(HeapObject host, ObjectSlot slot, Object object) {
1290 DCHECK(!host.InSharedHeap());
1291 if (!object.IsHeapObject()) return;
1292 HeapObject heap_object = HeapObject::cast(object);
1293 if (!heap_object.InSharedHeap()) return;
1294 RememberedSet<OLD_TO_SHARED>::Insert<AccessMode::NON_ATOMIC>(
1295 MemoryChunk::FromHeapObject(host), slot.address());
1296 collector_->MarkRootObject(Root::kClientHeap, heap_object);
1297 }
1298
RecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)1299 V8_INLINE void RecordRelocSlot(Code host, RelocInfo* rinfo,
1300 HeapObject target) {
1301 if (ShouldRecordRelocSlot(host, rinfo, target)) {
1302 RecordRelocSlotInfo info = ProcessRelocInfo(host, rinfo, target);
1303 RememberedSet<OLD_TO_SHARED>::InsertTyped(info.memory_chunk,
1304 info.slot_type, info.offset);
1305 }
1306 }
1307
ShouldRecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)1308 V8_INLINE bool ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
1309 HeapObject target) {
1310 return BasicMemoryChunk::FromHeapObject(target)->InSharedHeap();
1311 }
1312
1313 MarkCompactCollector* const collector_;
1314 };
1315
1316 class InternalizedStringTableCleaner : public RootVisitor {
1317 public:
InternalizedStringTableCleaner(Heap * heap)1318 explicit InternalizedStringTableCleaner(Heap* heap)
1319 : heap_(heap), pointers_removed_(0) {}
1320
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)1321 void VisitRootPointers(Root root, const char* description,
1322 FullObjectSlot start, FullObjectSlot end) override {
1323 UNREACHABLE();
1324 }
1325
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)1326 void VisitRootPointers(Root root, const char* description,
1327 OffHeapObjectSlot start,
1328 OffHeapObjectSlot end) override {
1329 DCHECK_EQ(root, Root::kStringTable);
1330 // Visit all HeapObject pointers in [start, end).
1331 MarkCompactCollector::NonAtomicMarkingState* marking_state =
1332 heap_->mark_compact_collector()->non_atomic_marking_state();
1333 Isolate* isolate = heap_->isolate();
1334 for (OffHeapObjectSlot p = start; p < end; ++p) {
1335 Object o = p.load(isolate);
1336 if (o.IsHeapObject()) {
1337 HeapObject heap_object = HeapObject::cast(o);
1338 DCHECK(!Heap::InYoungGeneration(heap_object));
1339 if (marking_state->IsWhite(heap_object)) {
1340 pointers_removed_++;
1341 // Set the entry to the_hole_value (as deleted).
1342 p.store(StringTable::deleted_element());
1343 }
1344 }
1345 }
1346 }
1347
PointersRemoved()1348 int PointersRemoved() { return pointers_removed_; }
1349
1350 private:
1351 Heap* heap_;
1352 int pointers_removed_;
1353 };
1354
1355 class ExternalStringTableCleaner : public RootVisitor {
1356 public:
ExternalStringTableCleaner(Heap * heap)1357 explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
1358
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)1359 void VisitRootPointers(Root root, const char* description,
1360 FullObjectSlot start, FullObjectSlot end) override {
1361 // Visit all HeapObject pointers in [start, end).
1362 MarkCompactCollector::NonAtomicMarkingState* marking_state =
1363 heap_->mark_compact_collector()->non_atomic_marking_state();
1364 Object the_hole = ReadOnlyRoots(heap_).the_hole_value();
1365 for (FullObjectSlot p = start; p < end; ++p) {
1366 Object o = *p;
1367 if (o.IsHeapObject()) {
1368 HeapObject heap_object = HeapObject::cast(o);
1369 if (marking_state->IsWhite(heap_object)) {
1370 if (o.IsExternalString()) {
1371 heap_->FinalizeExternalString(String::cast(o));
1372 } else {
1373 // The original external string may have been internalized.
1374 DCHECK(o.IsThinString());
1375 }
1376 // Set the entry to the_hole_value (as deleted).
1377 p.store(the_hole);
1378 }
1379 }
1380 }
1381 }
1382
1383 private:
1384 Heap* heap_;
1385 };
1386
1387 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1388 // are retained.
1389 class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
1390 public:
MarkCompactWeakObjectRetainer(MarkCompactCollector::NonAtomicMarkingState * marking_state)1391 explicit MarkCompactWeakObjectRetainer(
1392 MarkCompactCollector::NonAtomicMarkingState* marking_state)
1393 : marking_state_(marking_state) {}
1394
RetainAs(Object object)1395 Object RetainAs(Object object) override {
1396 HeapObject heap_object = HeapObject::cast(object);
1397 DCHECK(!marking_state_->IsGrey(heap_object));
1398 if (marking_state_->IsBlack(heap_object)) {
1399 return object;
1400 } else if (object.IsAllocationSite() &&
1401 !(AllocationSite::cast(object).IsZombie())) {
1402 // "dead" AllocationSites need to live long enough for a traversal of new
1403 // space. These sites get a one-time reprieve.
1404
1405 Object nested = object;
1406 while (nested.IsAllocationSite()) {
1407 AllocationSite current_site = AllocationSite::cast(nested);
1408 // MarkZombie will override the nested_site, read it first before
1409 // marking
1410 nested = current_site.nested_site();
1411 current_site.MarkZombie();
1412 marking_state_->WhiteToBlack(current_site);
1413 }
1414
1415 return object;
1416 } else {
1417 return Object();
1418 }
1419 }
1420
1421 private:
1422 MarkCompactCollector::NonAtomicMarkingState* marking_state_;
1423 };
1424
1425 class RecordMigratedSlotVisitor : public ObjectVisitorWithCageBases {
1426 public:
RecordMigratedSlotVisitor(MarkCompactCollector * collector,EphemeronRememberedSet * ephemeron_remembered_set)1427 explicit RecordMigratedSlotVisitor(
1428 MarkCompactCollector* collector,
1429 EphemeronRememberedSet* ephemeron_remembered_set)
1430 : ObjectVisitorWithCageBases(collector->isolate()),
1431 collector_(collector),
1432 ephemeron_remembered_set_(ephemeron_remembered_set) {}
1433
VisitPointer(HeapObject host,ObjectSlot p)1434 inline void VisitPointer(HeapObject host, ObjectSlot p) final {
1435 DCHECK(!HasWeakHeapObjectTag(p.load(cage_base())));
1436 RecordMigratedSlot(host, MaybeObject::FromObject(p.load(cage_base())),
1437 p.address());
1438 }
1439
VisitMapPointer(HeapObject host)1440 inline void VisitMapPointer(HeapObject host) final {
1441 VisitPointer(host, host.map_slot());
1442 }
1443
VisitPointer(HeapObject host,MaybeObjectSlot p)1444 inline void VisitPointer(HeapObject host, MaybeObjectSlot p) final {
1445 DCHECK(!MapWord::IsPacked(p.Relaxed_Load(cage_base()).ptr()));
1446 RecordMigratedSlot(host, p.load(cage_base()), p.address());
1447 }
1448
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)1449 inline void VisitPointers(HeapObject host, ObjectSlot start,
1450 ObjectSlot end) final {
1451 while (start < end) {
1452 VisitPointer(host, start);
1453 ++start;
1454 }
1455 }
1456
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)1457 inline void VisitPointers(HeapObject host, MaybeObjectSlot start,
1458 MaybeObjectSlot end) final {
1459 while (start < end) {
1460 VisitPointer(host, start);
1461 ++start;
1462 }
1463 }
1464
VisitCodePointer(HeapObject host,CodeObjectSlot slot)1465 inline void VisitCodePointer(HeapObject host, CodeObjectSlot slot) final {
1466 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
1467 // This code is similar to the implementation of VisitPointer() modulo
1468 // new kind of slot.
1469 DCHECK(!HasWeakHeapObjectTag(slot.load(code_cage_base())));
1470 Object code = slot.load(code_cage_base());
1471 RecordMigratedSlot(host, MaybeObject::FromObject(code), slot.address());
1472 }
1473
VisitEphemeron(HeapObject host,int index,ObjectSlot key,ObjectSlot value)1474 inline void VisitEphemeron(HeapObject host, int index, ObjectSlot key,
1475 ObjectSlot value) override {
1476 DCHECK(host.IsEphemeronHashTable());
1477 DCHECK(!Heap::InYoungGeneration(host));
1478
1479 VisitPointer(host, value);
1480
1481 if (ephemeron_remembered_set_ && Heap::InYoungGeneration(*key)) {
1482 auto table = EphemeronHashTable::unchecked_cast(host);
1483 auto insert_result =
1484 ephemeron_remembered_set_->insert({table, std::unordered_set<int>()});
1485 insert_result.first->second.insert(index);
1486 } else {
1487 VisitPointer(host, key);
1488 }
1489 }
1490
VisitCodeTarget(Code host,RelocInfo * rinfo)1491 inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
1492 DCHECK_EQ(host, rinfo->host());
1493 DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
1494 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1495 // The target is always in old space, we don't have to record the slot in
1496 // the old-to-new remembered set.
1497 DCHECK(!Heap::InYoungGeneration(target));
1498 collector_->RecordRelocSlot(host, rinfo, target);
1499 }
1500
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)1501 inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
1502 DCHECK_EQ(host, rinfo->host());
1503 DCHECK(RelocInfo::IsEmbeddedObjectMode(rinfo->rmode()));
1504 HeapObject object = rinfo->target_object(cage_base());
1505 GenerationalBarrierForCode(host, rinfo, object);
1506 collector_->RecordRelocSlot(host, rinfo, object);
1507 }
1508
1509 // Entries that are skipped for recording.
VisitExternalReference(Code host,RelocInfo * rinfo)1510 inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
VisitExternalReference(Foreign host,Address * p)1511 inline void VisitExternalReference(Foreign host, Address* p) final {}
VisitRuntimeEntry(Code host,RelocInfo * rinfo)1512 inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
VisitInternalReference(Code host,RelocInfo * rinfo)1513 inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
1514
MarkArrayBufferExtensionPromoted(HeapObject object)1515 virtual void MarkArrayBufferExtensionPromoted(HeapObject object) {}
1516
1517 protected:
RecordMigratedSlot(HeapObject host,MaybeObject value,Address slot)1518 inline virtual void RecordMigratedSlot(HeapObject host, MaybeObject value,
1519 Address slot) {
1520 if (value->IsStrongOrWeak()) {
1521 BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
1522 if (p->InYoungGeneration()) {
1523 DCHECK_IMPLIES(
1524 p->IsToPage(),
1525 p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
1526
1527 MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
1528 DCHECK(chunk->SweepingDone());
1529 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
1530 } else if (p->IsEvacuationCandidate()) {
1531 if (V8_EXTERNAL_CODE_SPACE_BOOL &&
1532 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
1533 RememberedSet<OLD_TO_CODE>::Insert<AccessMode::NON_ATOMIC>(
1534 MemoryChunk::FromHeapObject(host), slot);
1535 } else {
1536 RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
1537 MemoryChunk::FromHeapObject(host), slot);
1538 }
1539 }
1540 }
1541 }
1542
1543 MarkCompactCollector* collector_;
1544 EphemeronRememberedSet* ephemeron_remembered_set_;
1545 };
1546
1547 class MigrationObserver {
1548 public:
MigrationObserver(Heap * heap)1549 explicit MigrationObserver(Heap* heap) : heap_(heap) {}
1550
1551 virtual ~MigrationObserver() = default;
1552 virtual void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
1553 int size) = 0;
1554
1555 protected:
1556 Heap* heap_;
1557 };
1558
1559 class ProfilingMigrationObserver final : public MigrationObserver {
1560 public:
ProfilingMigrationObserver(Heap * heap)1561 explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
1562
Move(AllocationSpace dest,HeapObject src,HeapObject dst,int size)1563 inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
1564 int size) final {
1565 if (dest == CODE_SPACE || (dest == OLD_SPACE && dst.IsBytecodeArray())) {
1566 PROFILE(heap_->isolate(),
1567 CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
1568 }
1569 heap_->OnMoveEvent(dst, src, size);
1570 }
1571 };
1572
1573 class HeapObjectVisitor {
1574 public:
1575 virtual ~HeapObjectVisitor() = default;
1576 virtual bool Visit(HeapObject object, int size) = 0;
1577 };
1578
1579 class EvacuateVisitorBase : public HeapObjectVisitor {
1580 public:
AddObserver(MigrationObserver * observer)1581 void AddObserver(MigrationObserver* observer) {
1582 migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
1583 observers_.push_back(observer);
1584 }
1585
1586 protected:
1587 enum MigrationMode { kFast, kObserved };
1588
cage_base()1589 PtrComprCageBase cage_base() {
1590 #if V8_COMPRESS_POINTERS
1591 return PtrComprCageBase{heap_->isolate()};
1592 #else
1593 return PtrComprCageBase{};
1594 #endif // V8_COMPRESS_POINTERS
1595 }
1596
1597 using MigrateFunction = void (*)(EvacuateVisitorBase* base, HeapObject dst,
1598 HeapObject src, int size,
1599 AllocationSpace dest);
1600
1601 template <MigrationMode mode>
RawMigrateObject(EvacuateVisitorBase * base,HeapObject dst,HeapObject src,int size,AllocationSpace dest)1602 static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject dst,
1603 HeapObject src, int size, AllocationSpace dest) {
1604 Address dst_addr = dst.address();
1605 Address src_addr = src.address();
1606 PtrComprCageBase cage_base = base->cage_base();
1607 DCHECK(base->heap_->AllowedToBeMigrated(src.map(cage_base), src, dest));
1608 DCHECK_NE(dest, LO_SPACE);
1609 DCHECK_NE(dest, CODE_LO_SPACE);
1610 if (dest == OLD_SPACE) {
1611 DCHECK_OBJECT_SIZE(size);
1612 DCHECK(IsAligned(size, kTaggedSize));
1613 base->heap_->CopyBlock(dst_addr, src_addr, size);
1614 if (mode != MigrationMode::kFast)
1615 base->ExecuteMigrationObservers(dest, src, dst, size);
1616 // In case the object's map gets relocated during GC we load the old map
1617 // here. This is fine since they store the same content.
1618 dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
1619 if (V8_UNLIKELY(FLAG_minor_mc)) {
1620 base->record_visitor_->MarkArrayBufferExtensionPromoted(dst);
1621 }
1622 } else if (dest == MAP_SPACE) {
1623 DCHECK_OBJECT_SIZE(size);
1624 DCHECK(IsAligned(size, kTaggedSize));
1625 base->heap_->CopyBlock(dst_addr, src_addr, size);
1626 if (mode != MigrationMode::kFast)
1627 base->ExecuteMigrationObservers(dest, src, dst, size);
1628 dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
1629 } else if (dest == CODE_SPACE) {
1630 DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
1631 base->heap_->CopyBlock(dst_addr, src_addr, size);
1632 Code code = Code::cast(dst);
1633 code.Relocate(dst_addr - src_addr);
1634 if (mode != MigrationMode::kFast)
1635 base->ExecuteMigrationObservers(dest, src, dst, size);
1636 // In case the object's map gets relocated during GC we load the old map
1637 // here. This is fine since they store the same content.
1638 dst.IterateFast(dst.map(cage_base), size, base->record_visitor_);
1639 } else {
1640 DCHECK_OBJECT_SIZE(size);
1641 DCHECK(dest == NEW_SPACE);
1642 base->heap_->CopyBlock(dst_addr, src_addr, size);
1643 if (mode != MigrationMode::kFast)
1644 base->ExecuteMigrationObservers(dest, src, dst, size);
1645 }
1646 src.set_map_word(MapWord::FromForwardingAddress(dst), kRelaxedStore);
1647 }
1648
EvacuateVisitorBase(Heap * heap,EvacuationAllocator * local_allocator,ConcurrentAllocator * shared_old_allocator,RecordMigratedSlotVisitor * record_visitor)1649 EvacuateVisitorBase(Heap* heap, EvacuationAllocator* local_allocator,
1650 ConcurrentAllocator* shared_old_allocator,
1651 RecordMigratedSlotVisitor* record_visitor)
1652 : heap_(heap),
1653 local_allocator_(local_allocator),
1654 shared_old_allocator_(shared_old_allocator),
1655 record_visitor_(record_visitor),
1656 shared_string_table_(shared_old_allocator != nullptr) {
1657 migration_function_ = RawMigrateObject<MigrationMode::kFast>;
1658 }
1659
TryEvacuateObject(AllocationSpace target_space,HeapObject object,int size,HeapObject * target_object)1660 inline bool TryEvacuateObject(AllocationSpace target_space, HeapObject object,
1661 int size, HeapObject* target_object) {
1662 #ifdef DEBUG
1663 if (FLAG_stress_compaction && AbortCompactionForTesting(object))
1664 return false;
1665 #endif // DEBUG
1666 Map map = object.map(cage_base());
1667 AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
1668 AllocationResult allocation;
1669 if (ShouldPromoteIntoSharedHeap(map)) {
1670 DCHECK_EQ(target_space, OLD_SPACE);
1671 DCHECK(Heap::InYoungGeneration(object));
1672 DCHECK_NOT_NULL(shared_old_allocator_);
1673 allocation = shared_old_allocator_->AllocateRaw(size, alignment,
1674 AllocationOrigin::kGC);
1675 } else {
1676 allocation = local_allocator_->Allocate(target_space, size,
1677 AllocationOrigin::kGC, alignment);
1678 }
1679 if (allocation.To(target_object)) {
1680 MigrateObject(*target_object, object, size, target_space);
1681 if (target_space == CODE_SPACE)
1682 MemoryChunk::FromHeapObject(*target_object)
1683 ->GetCodeObjectRegistry()
1684 ->RegisterNewlyAllocatedCodeObject((*target_object).address());
1685 return true;
1686 }
1687 return false;
1688 }
1689
ShouldPromoteIntoSharedHeap(Map map)1690 inline bool ShouldPromoteIntoSharedHeap(Map map) {
1691 if (shared_string_table_) {
1692 return String::IsInPlaceInternalizableExcludingExternal(
1693 map.instance_type());
1694 }
1695 return false;
1696 }
1697
ExecuteMigrationObservers(AllocationSpace dest,HeapObject src,HeapObject dst,int size)1698 inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject src,
1699 HeapObject dst, int size) {
1700 for (MigrationObserver* obs : observers_) {
1701 obs->Move(dest, src, dst, size);
1702 }
1703 }
1704
MigrateObject(HeapObject dst,HeapObject src,int size,AllocationSpace dest)1705 inline void MigrateObject(HeapObject dst, HeapObject src, int size,
1706 AllocationSpace dest) {
1707 migration_function_(this, dst, src, size, dest);
1708 }
1709
1710 #ifdef DEBUG
AbortCompactionForTesting(HeapObject object)1711 bool AbortCompactionForTesting(HeapObject object) {
1712 if (FLAG_stress_compaction) {
1713 const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1714 kPageAlignmentMask & ~kObjectAlignmentMask;
1715 if ((object.ptr() & kPageAlignmentMask) == mask) {
1716 Page* page = Page::FromHeapObject(object);
1717 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1718 page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1719 } else {
1720 page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1721 return true;
1722 }
1723 }
1724 }
1725 return false;
1726 }
1727 #endif // DEBUG
1728
1729 Heap* heap_;
1730 EvacuationAllocator* local_allocator_;
1731 ConcurrentAllocator* shared_old_allocator_;
1732 RecordMigratedSlotVisitor* record_visitor_;
1733 std::vector<MigrationObserver*> observers_;
1734 MigrateFunction migration_function_;
1735 bool shared_string_table_ = false;
1736 };
1737
1738 class EvacuateNewSpaceVisitor final : public EvacuateVisitorBase {
1739 public:
EvacuateNewSpaceVisitor(Heap * heap,EvacuationAllocator * local_allocator,ConcurrentAllocator * shared_old_allocator,RecordMigratedSlotVisitor * record_visitor,Heap::PretenuringFeedbackMap * local_pretenuring_feedback,AlwaysPromoteYoung always_promote_young)1740 explicit EvacuateNewSpaceVisitor(
1741 Heap* heap, EvacuationAllocator* local_allocator,
1742 ConcurrentAllocator* shared_old_allocator,
1743 RecordMigratedSlotVisitor* record_visitor,
1744 Heap::PretenuringFeedbackMap* local_pretenuring_feedback,
1745 AlwaysPromoteYoung always_promote_young)
1746 : EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
1747 record_visitor),
1748 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1749 promoted_size_(0),
1750 semispace_copied_size_(0),
1751 local_pretenuring_feedback_(local_pretenuring_feedback),
1752 is_incremental_marking_(heap->incremental_marking()->IsMarking()),
1753 always_promote_young_(always_promote_young) {}
1754
Visit(HeapObject object,int size)1755 inline bool Visit(HeapObject object, int size) override {
1756 if (TryEvacuateWithoutCopy(object)) return true;
1757 HeapObject target_object;
1758
1759 if (always_promote_young_ == AlwaysPromoteYoung::kYes) {
1760 heap_->UpdateAllocationSite(object.map(), object,
1761 local_pretenuring_feedback_);
1762
1763 if (!TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1764 heap_->FatalProcessOutOfMemory(
1765 "MarkCompactCollector: young object promotion failed");
1766 }
1767
1768 promoted_size_ += size;
1769 return true;
1770 }
1771
1772 if (heap_->ShouldBePromoted(object.address()) &&
1773 TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1774 promoted_size_ += size;
1775 return true;
1776 }
1777
1778 heap_->UpdateAllocationSite(object.map(), object,
1779 local_pretenuring_feedback_);
1780
1781 HeapObject target;
1782 AllocationSpace space = AllocateTargetObject(object, size, &target);
1783 MigrateObject(HeapObject::cast(target), object, size, space);
1784 semispace_copied_size_ += size;
1785 return true;
1786 }
1787
promoted_size()1788 intptr_t promoted_size() { return promoted_size_; }
semispace_copied_size()1789 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1790
1791 private:
TryEvacuateWithoutCopy(HeapObject object)1792 inline bool TryEvacuateWithoutCopy(HeapObject object) {
1793 if (is_incremental_marking_) return false;
1794
1795 Map map = object.map();
1796
1797 // Some objects can be evacuated without creating a copy.
1798 if (map.visitor_id() == kVisitThinString) {
1799 HeapObject actual = ThinString::cast(object).unchecked_actual();
1800 if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
1801 object.set_map_word(MapWord::FromForwardingAddress(actual),
1802 kRelaxedStore);
1803 return true;
1804 }
1805 // TODO(mlippautz): Handle ConsString.
1806
1807 return false;
1808 }
1809
AllocateTargetObject(HeapObject old_object,int size,HeapObject * target_object)1810 inline AllocationSpace AllocateTargetObject(HeapObject old_object, int size,
1811 HeapObject* target_object) {
1812 AllocationAlignment alignment =
1813 HeapObject::RequiredAlignment(old_object.map());
1814 AllocationSpace space_allocated_in = NEW_SPACE;
1815 AllocationResult allocation = local_allocator_->Allocate(
1816 NEW_SPACE, size, AllocationOrigin::kGC, alignment);
1817 if (allocation.IsFailure()) {
1818 allocation = AllocateInOldSpace(size, alignment);
1819 space_allocated_in = OLD_SPACE;
1820 }
1821 bool ok = allocation.To(target_object);
1822 DCHECK(ok);
1823 USE(ok);
1824 return space_allocated_in;
1825 }
1826
AllocateInOldSpace(int size_in_bytes,AllocationAlignment alignment)1827 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1828 AllocationAlignment alignment) {
1829 AllocationResult allocation = local_allocator_->Allocate(
1830 OLD_SPACE, size_in_bytes, AllocationOrigin::kGC, alignment);
1831 if (allocation.IsFailure()) {
1832 heap_->FatalProcessOutOfMemory(
1833 "MarkCompactCollector: semi-space copy, fallback in old gen");
1834 }
1835 return allocation;
1836 }
1837
1838 LocalAllocationBuffer buffer_;
1839 intptr_t promoted_size_;
1840 intptr_t semispace_copied_size_;
1841 Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1842 bool is_incremental_marking_;
1843 AlwaysPromoteYoung always_promote_young_;
1844 };
1845
1846 template <PageEvacuationMode mode>
1847 class EvacuateNewSpacePageVisitor final : public HeapObjectVisitor {
1848 public:
EvacuateNewSpacePageVisitor(Heap * heap,RecordMigratedSlotVisitor * record_visitor,Heap::PretenuringFeedbackMap * local_pretenuring_feedback)1849 explicit EvacuateNewSpacePageVisitor(
1850 Heap* heap, RecordMigratedSlotVisitor* record_visitor,
1851 Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1852 : heap_(heap),
1853 record_visitor_(record_visitor),
1854 moved_bytes_(0),
1855 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1856
Move(Page * page)1857 static void Move(Page* page) {
1858 switch (mode) {
1859 case NEW_TO_NEW:
1860 page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1861 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1862 break;
1863 case NEW_TO_OLD: {
1864 page->heap()->new_space()->from_space().RemovePage(page);
1865 Page* new_page = Page::ConvertNewToOld(page);
1866 DCHECK(!new_page->InYoungGeneration());
1867 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1868 break;
1869 }
1870 }
1871 }
1872
Visit(HeapObject object,int size)1873 inline bool Visit(HeapObject object, int size) override {
1874 if (mode == NEW_TO_NEW) {
1875 heap_->UpdateAllocationSite(object.map(), object,
1876 local_pretenuring_feedback_);
1877 } else if (mode == NEW_TO_OLD) {
1878 DCHECK_IMPLIES(V8_EXTERNAL_CODE_SPACE_BOOL, !IsCodeSpaceObject(object));
1879 PtrComprCageBase cage_base = GetPtrComprCageBase(object);
1880 object.IterateFast(cage_base, record_visitor_);
1881 if (V8_UNLIKELY(FLAG_minor_mc)) {
1882 record_visitor_->MarkArrayBufferExtensionPromoted(object);
1883 }
1884 }
1885 return true;
1886 }
1887
moved_bytes()1888 intptr_t moved_bytes() { return moved_bytes_; }
account_moved_bytes(intptr_t bytes)1889 void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1890
1891 private:
1892 Heap* heap_;
1893 RecordMigratedSlotVisitor* record_visitor_;
1894 intptr_t moved_bytes_;
1895 Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1896 };
1897
1898 class EvacuateOldSpaceVisitor final : public EvacuateVisitorBase {
1899 public:
EvacuateOldSpaceVisitor(Heap * heap,EvacuationAllocator * local_allocator,ConcurrentAllocator * shared_old_allocator,RecordMigratedSlotVisitor * record_visitor)1900 EvacuateOldSpaceVisitor(Heap* heap, EvacuationAllocator* local_allocator,
1901 ConcurrentAllocator* shared_old_allocator,
1902 RecordMigratedSlotVisitor* record_visitor)
1903 : EvacuateVisitorBase(heap, local_allocator, shared_old_allocator,
1904 record_visitor) {}
1905
Visit(HeapObject object,int size)1906 inline bool Visit(HeapObject object, int size) override {
1907 HeapObject target_object;
1908 if (TryEvacuateObject(Page::FromHeapObject(object)->owner_identity(),
1909 object, size, &target_object)) {
1910 DCHECK(object.map_word(heap_->isolate(), kRelaxedLoad)
1911 .IsForwardingAddress());
1912 return true;
1913 }
1914 return false;
1915 }
1916 };
1917
1918 class EvacuateRecordOnlyVisitor final : public HeapObjectVisitor {
1919 public:
EvacuateRecordOnlyVisitor(Heap * heap)1920 explicit EvacuateRecordOnlyVisitor(Heap* heap)
1921 : heap_(heap)
1922 #ifdef V8_COMPRESS_POINTERS
1923 ,
1924 cage_base_(heap->isolate())
1925 #endif // V8_COMPRESS_POINTERS
1926 {
1927 }
1928
1929 // The pointer compression cage base value used for decompression of all
1930 // tagged values except references to Code objects.
cage_base() const1931 V8_INLINE PtrComprCageBase cage_base() const {
1932 #ifdef V8_COMPRESS_POINTERS
1933 return cage_base_;
1934 #else
1935 return PtrComprCageBase{};
1936 #endif // V8_COMPRESS_POINTERS
1937 }
1938
Visit(HeapObject object,int size)1939 inline bool Visit(HeapObject object, int size) override {
1940 RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector(),
1941 &heap_->ephemeron_remembered_set_);
1942 Map map = object.map(cage_base());
1943 // Instead of calling object.IterateBodyFast(cage_base(), &visitor) here
1944 // we can shortcut and use the precomputed size value passed to the visitor.
1945 DCHECK_EQ(object.SizeFromMap(map), size);
1946 object.IterateBodyFast(map, size, &visitor);
1947 return true;
1948 }
1949
1950 private:
1951 Heap* heap_;
1952 #ifdef V8_COMPRESS_POINTERS
1953 const PtrComprCageBase cage_base_;
1954 #endif // V8_COMPRESS_POINTERS
1955 };
1956
IsUnmarkedHeapObject(Heap * heap,FullObjectSlot p)1957 bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, FullObjectSlot p) {
1958 Object o = *p;
1959 if (!o.IsHeapObject()) return false;
1960 HeapObject heap_object = HeapObject::cast(o);
1961 return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
1962 heap_object);
1963 }
1964
MarkRoots(RootVisitor * root_visitor,ObjectVisitor * custom_root_body_visitor)1965 void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
1966 ObjectVisitor* custom_root_body_visitor) {
1967 // Mark the heap roots including global variables, stack variables,
1968 // etc., and all objects reachable from them.
1969 heap()->IterateRootsIncludingClients(
1970 root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kWeak});
1971
1972 // Custom marking for top optimized frame.
1973 ProcessTopOptimizedFrame(custom_root_body_visitor, isolate());
1974
1975 if (isolate()->is_shared()) {
1976 isolate()->global_safepoint()->IterateClientIsolates(
1977 [this, custom_root_body_visitor](Isolate* client) {
1978 ProcessTopOptimizedFrame(custom_root_body_visitor, client);
1979 });
1980 }
1981 }
1982
MarkObjectsFromClientHeaps()1983 void MarkCompactCollector::MarkObjectsFromClientHeaps() {
1984 if (!isolate()->is_shared()) return;
1985
1986 SharedHeapObjectVisitor visitor(this);
1987
1988 isolate()->global_safepoint()->IterateClientIsolates(
1989 [&visitor](Isolate* client) {
1990 Heap* heap = client->heap();
1991 HeapObjectIterator iterator(heap, HeapObjectIterator::kNoFiltering);
1992 PtrComprCageBase cage_base(client);
1993 for (HeapObject obj = iterator.Next(); !obj.is_null();
1994 obj = iterator.Next()) {
1995 obj.IterateFast(cage_base, &visitor);
1996 }
1997 });
1998 }
1999
VisitObject(HeapObject obj)2000 void MarkCompactCollector::VisitObject(HeapObject obj) {
2001 marking_visitor_->Visit(obj.map(), obj);
2002 }
2003
RevisitObject(HeapObject obj)2004 void MarkCompactCollector::RevisitObject(HeapObject obj) {
2005 DCHECK(marking_state()->IsBlack(obj));
2006 DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->ProgressBar().IsEnabled(),
2007 0u == MemoryChunk::FromHeapObject(obj)->ProgressBar().Value());
2008 MarkingVisitor::RevisitScope revisit(marking_visitor_.get());
2009 marking_visitor_->Visit(obj.map(marking_visitor_->cage_base()), obj);
2010 }
2011
ProcessEphemeronsUntilFixpoint()2012 bool MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
2013 int iterations = 0;
2014 int max_iterations = FLAG_ephemeron_fixpoint_iterations;
2015
2016 bool another_ephemeron_iteration_main_thread;
2017
2018 do {
2019 PerformWrapperTracing();
2020
2021 if (iterations >= max_iterations) {
2022 // Give up fixpoint iteration and switch to linear algorithm.
2023 return false;
2024 }
2025
2026 // Move ephemerons from next_ephemerons into current_ephemerons to
2027 // drain them in this iteration.
2028 DCHECK(
2029 local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2030 weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
2031 heap()->concurrent_marking()->set_another_ephemeron_iteration(false);
2032
2033 {
2034 TRACE_GC(heap()->tracer(),
2035 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
2036
2037 if (FLAG_parallel_marking) {
2038 heap_->concurrent_marking()->RescheduleJobIfNeeded(
2039 TaskPriority::kUserBlocking);
2040 }
2041
2042 another_ephemeron_iteration_main_thread = ProcessEphemerons();
2043 FinishConcurrentMarking();
2044 }
2045
2046 CHECK(
2047 local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2048 CHECK(local_weak_objects()
2049 ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
2050
2051 ++iterations;
2052 } while (another_ephemeron_iteration_main_thread ||
2053 heap()->concurrent_marking()->another_ephemeron_iteration() ||
2054 !local_marking_worklists()->IsEmpty() ||
2055 !local_marking_worklists()->IsWrapperEmpty() ||
2056 !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
2057
2058 CHECK(local_marking_worklists()->IsEmpty());
2059 CHECK(local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2060 CHECK(local_weak_objects()
2061 ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
2062 return true;
2063 }
2064
ProcessEphemerons()2065 bool MarkCompactCollector::ProcessEphemerons() {
2066 Ephemeron ephemeron;
2067 bool another_ephemeron_iteration = false;
2068
2069 // Drain current_ephemerons and push ephemerons where key and value are still
2070 // unreachable into next_ephemerons.
2071 while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
2072 if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
2073 another_ephemeron_iteration = true;
2074 }
2075 }
2076
2077 // Drain marking worklist and push discovered ephemerons into
2078 // discovered_ephemerons.
2079 size_t objects_processed;
2080 std::tie(std::ignore, objects_processed) = ProcessMarkingWorklist(0);
2081
2082 // As soon as a single object was processed and potentially marked another
2083 // object we need another iteration. Otherwise we might miss to apply
2084 // ephemeron semantics on it.
2085 if (objects_processed > 0) another_ephemeron_iteration = true;
2086
2087 // Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
2088 // before) and push ephemerons where key and value are still unreachable into
2089 // next_ephemerons.
2090 while (local_weak_objects()->discovered_ephemerons_local.Pop(&ephemeron)) {
2091 if (ProcessEphemeron(ephemeron.key, ephemeron.value)) {
2092 another_ephemeron_iteration = true;
2093 }
2094 }
2095
2096 // Flush local ephemerons for main task to global pool.
2097 local_weak_objects()->ephemeron_hash_tables_local.Publish();
2098 local_weak_objects()->next_ephemerons_local.Publish();
2099
2100 return another_ephemeron_iteration;
2101 }
2102
ProcessEphemeronsLinear()2103 void MarkCompactCollector::ProcessEphemeronsLinear() {
2104 TRACE_GC(heap()->tracer(),
2105 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
2106 CHECK(heap()->concurrent_marking()->IsStopped());
2107 std::unordered_multimap<HeapObject, HeapObject, Object::Hasher> key_to_values;
2108 Ephemeron ephemeron;
2109
2110 DCHECK(
2111 local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2112 weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
2113 while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
2114 ProcessEphemeron(ephemeron.key, ephemeron.value);
2115
2116 if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
2117 key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
2118 }
2119 }
2120
2121 ephemeron_marking_.newly_discovered_limit = key_to_values.size();
2122 bool work_to_do = true;
2123
2124 while (work_to_do) {
2125 PerformWrapperTracing();
2126
2127 ResetNewlyDiscovered();
2128 ephemeron_marking_.newly_discovered_limit = key_to_values.size();
2129
2130 {
2131 TRACE_GC(heap()->tracer(),
2132 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
2133 // Drain marking worklist and push all discovered objects into
2134 // newly_discovered.
2135 ProcessMarkingWorklist<
2136 MarkCompactCollector::MarkingWorklistProcessingMode::
2137 kTrackNewlyDiscoveredObjects>(0);
2138 }
2139
2140 while (local_weak_objects()->discovered_ephemerons_local.Pop(&ephemeron)) {
2141 ProcessEphemeron(ephemeron.key, ephemeron.value);
2142
2143 if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
2144 key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
2145 }
2146 }
2147
2148 if (ephemeron_marking_.newly_discovered_overflowed) {
2149 // If newly_discovered was overflowed just visit all ephemerons in
2150 // next_ephemerons.
2151 local_weak_objects()->next_ephemerons_local.Publish();
2152 weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
2153 if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
2154 non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
2155 local_marking_worklists()->Push(ephemeron.value);
2156 }
2157 });
2158
2159 } else {
2160 // This is the good case: newly_discovered stores all discovered
2161 // objects. Now use key_to_values to see if discovered objects keep more
2162 // objects alive due to ephemeron semantics.
2163 for (HeapObject object : ephemeron_marking_.newly_discovered) {
2164 auto range = key_to_values.equal_range(object);
2165 for (auto it = range.first; it != range.second; ++it) {
2166 HeapObject value = it->second;
2167 MarkObject(object, value);
2168 }
2169 }
2170 }
2171
2172 // Do NOT drain marking worklist here, otherwise the current checks
2173 // for work_to_do are not sufficient for determining if another iteration
2174 // is necessary.
2175
2176 work_to_do = !local_marking_worklists()->IsEmpty() ||
2177 !local_marking_worklists()->IsWrapperEmpty() ||
2178 !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
2179 CHECK(local_weak_objects()
2180 ->discovered_ephemerons_local.IsLocalAndGlobalEmpty());
2181 }
2182
2183 ResetNewlyDiscovered();
2184 ephemeron_marking_.newly_discovered.shrink_to_fit();
2185
2186 CHECK(local_marking_worklists()->IsEmpty());
2187 CHECK(weak_objects_.current_ephemerons.IsEmpty());
2188 CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
2189
2190 // Flush local ephemerons for main task to global pool.
2191 local_weak_objects()->ephemeron_hash_tables_local.Publish();
2192 local_weak_objects()->next_ephemerons_local.Publish();
2193 }
2194
PerformWrapperTracing()2195 void MarkCompactCollector::PerformWrapperTracing() {
2196 if (heap_->local_embedder_heap_tracer()->InUse()) {
2197 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
2198 if (local_marking_worklists()->PublishWrapper()) {
2199 DCHECK(local_marking_worklists()->IsWrapperEmpty());
2200 } else {
2201 // Cannot directly publish wrapper objects.
2202 LocalEmbedderHeapTracer::ProcessingScope scope(
2203 heap_->local_embedder_heap_tracer());
2204 HeapObject object;
2205 while (local_marking_worklists()->PopWrapper(&object)) {
2206 scope.TracePossibleWrapper(JSObject::cast(object));
2207 }
2208 }
2209 heap_->local_embedder_heap_tracer()->Trace(
2210 std::numeric_limits<double>::infinity());
2211 }
2212 }
2213
DrainMarkingWorklist()2214 void MarkCompactCollector::DrainMarkingWorklist() { ProcessMarkingWorklist(0); }
2215
2216 template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
ProcessMarkingWorklist(size_t bytes_to_process)2217 std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist(
2218 size_t bytes_to_process) {
2219 HeapObject object;
2220 size_t bytes_processed = 0;
2221 size_t objects_processed = 0;
2222 bool is_per_context_mode = local_marking_worklists()->IsPerContextMode();
2223 Isolate* isolate = heap()->isolate();
2224 PtrComprCageBase cage_base(isolate);
2225 while (local_marking_worklists()->Pop(&object) ||
2226 local_marking_worklists()->PopOnHold(&object)) {
2227 // Left trimming may result in grey or black filler objects on the marking
2228 // worklist. Ignore these objects.
2229 if (object.IsFreeSpaceOrFiller(cage_base)) {
2230 // Due to copying mark bits and the fact that grey and black have their
2231 // first bit set, one word fillers are always black.
2232 DCHECK_IMPLIES(object.map(cage_base) ==
2233 ReadOnlyRoots(isolate).one_pointer_filler_map(),
2234 marking_state()->IsBlack(object));
2235 // Other fillers may be black or grey depending on the color of the object
2236 // that was trimmed.
2237 DCHECK_IMPLIES(object.map(cage_base) !=
2238 ReadOnlyRoots(isolate).one_pointer_filler_map(),
2239 marking_state()->IsBlackOrGrey(object));
2240 continue;
2241 }
2242 DCHECK(object.IsHeapObject());
2243 DCHECK(heap()->Contains(object));
2244 DCHECK(!(marking_state()->IsWhite(object)));
2245 if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
2246 kTrackNewlyDiscoveredObjects) {
2247 AddNewlyDiscovered(object);
2248 }
2249 Map map = object.map(cage_base);
2250 if (is_per_context_mode) {
2251 Address context;
2252 if (native_context_inferrer_.Infer(isolate, map, object, &context)) {
2253 local_marking_worklists()->SwitchToContext(context);
2254 }
2255 }
2256 size_t visited_size = marking_visitor_->Visit(map, object);
2257 if (is_per_context_mode) {
2258 native_context_stats_.IncrementSize(local_marking_worklists()->Context(),
2259 map, object, visited_size);
2260 }
2261 bytes_processed += visited_size;
2262 objects_processed++;
2263 if (bytes_to_process && bytes_processed >= bytes_to_process) {
2264 break;
2265 }
2266 }
2267 return std::make_pair(bytes_processed, objects_processed);
2268 }
2269
2270 // Generate definitions for use in other files.
2271 template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
2272 MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>(
2273 size_t bytes_to_process);
2274 template std::pair<size_t, size_t> MarkCompactCollector::ProcessMarkingWorklist<
2275 MarkCompactCollector::MarkingWorklistProcessingMode::
2276 kTrackNewlyDiscoveredObjects>(size_t bytes_to_process);
2277
ProcessEphemeron(HeapObject key,HeapObject value)2278 bool MarkCompactCollector::ProcessEphemeron(HeapObject key, HeapObject value) {
2279 if (marking_state()->IsBlackOrGrey(key)) {
2280 if (marking_state()->WhiteToGrey(value)) {
2281 local_marking_worklists()->Push(value);
2282 return true;
2283 }
2284
2285 } else if (marking_state()->IsWhite(value)) {
2286 local_weak_objects()->next_ephemerons_local.Push(Ephemeron{key, value});
2287 }
2288 return false;
2289 }
2290
ProcessEphemeronMarking()2291 void MarkCompactCollector::ProcessEphemeronMarking() {
2292 DCHECK(local_marking_worklists()->IsEmpty());
2293
2294 // Incremental marking might leave ephemerons in main task's local
2295 // buffer, flush it into global pool.
2296 local_weak_objects()->next_ephemerons_local.Publish();
2297
2298 if (!ProcessEphemeronsUntilFixpoint()) {
2299 // Fixpoint iteration needed too many iterations and was cancelled. Use the
2300 // guaranteed linear algorithm.
2301 ProcessEphemeronsLinear();
2302 }
2303
2304 #ifdef VERIFY_HEAP
2305 if (FLAG_verify_heap) {
2306 Ephemeron ephemeron;
2307
2308 DCHECK(
2309 local_weak_objects()->current_ephemerons_local.IsLocalAndGlobalEmpty());
2310 weak_objects_.current_ephemerons.Swap(&weak_objects_.next_ephemerons);
2311 while (local_weak_objects()->current_ephemerons_local.Pop(&ephemeron)) {
2312 CHECK(!ProcessEphemeron(ephemeron.key, ephemeron.value));
2313 }
2314 }
2315 #endif
2316
2317 CHECK(local_marking_worklists()->IsEmpty());
2318 CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
2319 }
2320
ProcessTopOptimizedFrame(ObjectVisitor * visitor,Isolate * isolate)2321 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor,
2322 Isolate* isolate) {
2323 for (StackFrameIterator it(isolate, isolate->thread_local_top()); !it.done();
2324 it.Advance()) {
2325 if (it.frame()->is_unoptimized()) return;
2326 if (it.frame()->type() == StackFrame::OPTIMIZED) {
2327 Code code = it.frame()->LookupCode();
2328 if (!code.CanDeoptAt(isolate, it.frame()->pc())) {
2329 PtrComprCageBase cage_base(isolate);
2330 Code::BodyDescriptor::IterateBody(code.map(cage_base), code, visitor);
2331 }
2332 return;
2333 }
2334 }
2335 }
2336
RecordObjectStats()2337 void MarkCompactCollector::RecordObjectStats() {
2338 if (V8_LIKELY(!TracingFlags::is_gc_stats_enabled())) return;
2339 // Cannot run during bootstrapping due to incomplete objects.
2340 if (isolate()->bootstrapper()->IsActive()) return;
2341 heap()->CreateObjectStats();
2342 ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
2343 heap()->dead_object_stats_.get());
2344 collector.Collect();
2345 if (V8_UNLIKELY(TracingFlags::gc_stats.load(std::memory_order_relaxed) &
2346 v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
2347 std::stringstream live, dead;
2348 heap()->live_object_stats_->Dump(live);
2349 heap()->dead_object_stats_->Dump(dead);
2350 TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
2351 "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
2352 "live", TRACE_STR_COPY(live.str().c_str()), "dead",
2353 TRACE_STR_COPY(dead.str().c_str()));
2354 }
2355 if (FLAG_trace_gc_object_stats) {
2356 heap()->live_object_stats_->PrintJSON("live");
2357 heap()->dead_object_stats_->PrintJSON("dead");
2358 }
2359 heap()->live_object_stats_->CheckpointObjectStats();
2360 heap()->dead_object_stats_->ClearObjectStats();
2361 }
2362
MarkLiveObjects()2363 void MarkCompactCollector::MarkLiveObjects() {
2364 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
2365 // The recursive GC marker detects when it is nearing stack overflow,
2366 // and switches to a different marking system. JS interrupts interfere
2367 // with the C stack limit check.
2368 PostponeInterruptsScope postpone(isolate());
2369
2370 bool was_marked_incrementally = false;
2371 {
2372 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
2373 if (heap_->incremental_marking()->Stop()) {
2374 MarkingBarrier::PublishAll(heap());
2375 was_marked_incrementally = true;
2376 }
2377 }
2378
2379 #ifdef DEBUG
2380 DCHECK(state_ == PREPARE_GC);
2381 state_ = MARK_LIVE_OBJECTS;
2382 #endif
2383
2384 heap_->local_embedder_heap_tracer()->EnterFinalPause();
2385
2386 RootMarkingVisitor root_visitor(this);
2387
2388 {
2389 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
2390 CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
2391 MarkRoots(&root_visitor, &custom_root_body_visitor);
2392 }
2393
2394 {
2395 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_CLIENT_HEAPS);
2396 MarkObjectsFromClientHeaps();
2397 }
2398
2399 {
2400 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
2401 if (FLAG_parallel_marking) {
2402 heap_->concurrent_marking()->RescheduleJobIfNeeded(
2403 TaskPriority::kUserBlocking);
2404 }
2405 DrainMarkingWorklist();
2406
2407 FinishConcurrentMarking();
2408 DrainMarkingWorklist();
2409 }
2410
2411 {
2412 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
2413
2414 DCHECK(local_marking_worklists()->IsEmpty());
2415
2416 // Mark objects reachable through the embedder heap. This phase is
2417 // opportunistic as it may not discover graphs that are only reachable
2418 // through ephemerons.
2419 {
2420 TRACE_GC(heap()->tracer(),
2421 GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
2422 do {
2423 // PerformWrapperTracing() also empties the work items collected by
2424 // concurrent markers. As a result this call needs to happen at least
2425 // once.
2426 PerformWrapperTracing();
2427 DrainMarkingWorklist();
2428 } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
2429 !local_marking_worklists()->IsWrapperEmpty());
2430 DCHECK(local_marking_worklists()->IsWrapperEmpty());
2431 DCHECK(local_marking_worklists()->IsEmpty());
2432 }
2433
2434 // The objects reachable from the roots are marked, yet unreachable objects
2435 // are unmarked. Mark objects reachable due to embedder heap tracing or
2436 // harmony weak maps.
2437 {
2438 TRACE_GC(heap()->tracer(),
2439 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
2440 ProcessEphemeronMarking();
2441 DCHECK(local_marking_worklists()->IsEmpty());
2442 }
2443
2444 // The objects reachable from the roots, weak maps, and embedder heap
2445 // tracing are marked. Objects pointed to only by weak global handles cannot
2446 // be immediately reclaimed. Instead, we have to mark them as pending and
2447 // mark objects reachable from them.
2448 //
2449 // First we identify nonlive weak handles and mark them as pending
2450 // destruction.
2451 {
2452 TRACE_GC(heap()->tracer(),
2453 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
2454 heap()->isolate()->global_handles()->IterateWeakRootsIdentifyFinalizers(
2455 &IsUnmarkedHeapObject);
2456 DrainMarkingWorklist();
2457 }
2458
2459 // Process finalizers, effectively keeping them alive until the next
2460 // garbage collection.
2461 {
2462 TRACE_GC(heap()->tracer(),
2463 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
2464 heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
2465 &root_visitor);
2466 DrainMarkingWorklist();
2467 }
2468
2469 // Repeat ephemeron processing from the newly marked objects.
2470 {
2471 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
2472 ProcessEphemeronMarking();
2473 DCHECK(local_marking_worklists()->IsWrapperEmpty());
2474 DCHECK(local_marking_worklists()->IsEmpty());
2475 }
2476
2477 // We depend on IterateWeakRootsForPhantomHandles being called before
2478 // ProcessOldCodeCandidates in order to identify flushed bytecode in the
2479 // CPU profiler.
2480 {
2481 heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
2482 &IsUnmarkedHeapObject);
2483 }
2484 }
2485
2486 if (was_marked_incrementally) {
2487 // Disable the marking barrier after concurrent/parallel marking has
2488 // finished as it will reset page flags that share the same bitmap as
2489 // the evacuation candidate bit.
2490 MarkingBarrier::DeactivateAll(heap());
2491 GlobalHandles::DisableMarkingBarrier(heap()->isolate());
2492 }
2493
2494 epoch_++;
2495 }
2496
ClearNonLiveReferences()2497 void MarkCompactCollector::ClearNonLiveReferences() {
2498 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
2499
2500 if (isolate()->OwnsStringTable()) {
2501 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
2502
2503 // Prune the string table removing all strings only pointed to by the
2504 // string table. Cannot use string_table() here because the string
2505 // table is marked.
2506 StringTable* string_table = isolate()->string_table();
2507 InternalizedStringTableCleaner internalized_visitor(heap());
2508 string_table->DropOldData();
2509 string_table->IterateElements(&internalized_visitor);
2510 string_table->NotifyElementsRemoved(internalized_visitor.PointersRemoved());
2511 }
2512
2513 ExternalStringTableCleaner external_visitor(heap());
2514 heap()->external_string_table_.IterateAll(&external_visitor);
2515 heap()->external_string_table_.CleanUpAll();
2516
2517 {
2518 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHABLE_BYTECODE);
2519 // ProcessFlusheBaselineCandidates should be called after clearing bytecode
2520 // so that we flush any bytecode if needed so we could correctly set the
2521 // code object on the JSFunction.
2522 ProcessOldCodeCandidates();
2523 ProcessFlushedBaselineCandidates();
2524 }
2525
2526 {
2527 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_FLUSHED_JS_FUNCTIONS);
2528 ClearFlushedJsFunctions();
2529 }
2530
2531 {
2532 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
2533 // Process the weak references.
2534 MarkCompactWeakObjectRetainer mark_compact_object_retainer(
2535 non_atomic_marking_state());
2536 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
2537 }
2538
2539 {
2540 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
2541 // ClearFullMapTransitions must be called before weak references are
2542 // cleared.
2543 ClearFullMapTransitions();
2544 }
2545 {
2546 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
2547 ClearWeakReferences();
2548 ClearWeakCollections();
2549 ClearJSWeakRefs();
2550 }
2551
2552 PROFILE(heap()->isolate(), WeakCodeClearEvent());
2553
2554 MarkDependentCodeForDeoptimization();
2555
2556 #ifdef V8_SANDBOXED_EXTERNAL_POINTERS
2557 {
2558 TRACE_GC(heap()->tracer(),
2559 GCTracer::Scope::MC_SWEEP_EXTERNAL_POINTER_TABLE);
2560 isolate()->external_pointer_table().Sweep(isolate());
2561 }
2562 #endif // V8_SANDBOXED_EXTERNAL_POINTERS
2563
2564 DCHECK(weak_objects_.transition_arrays.IsEmpty());
2565 DCHECK(weak_objects_.weak_references.IsEmpty());
2566 DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
2567 DCHECK(weak_objects_.js_weak_refs.IsEmpty());
2568 DCHECK(weak_objects_.weak_cells.IsEmpty());
2569 DCHECK(weak_objects_.code_flushing_candidates.IsEmpty());
2570 DCHECK(weak_objects_.baseline_flushing_candidates.IsEmpty());
2571 DCHECK(weak_objects_.flushed_js_functions.IsEmpty());
2572 }
2573
MarkDependentCodeForDeoptimization()2574 void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
2575 std::pair<HeapObject, Code> weak_object_in_code;
2576 while (local_weak_objects()->weak_objects_in_code_local.Pop(
2577 &weak_object_in_code)) {
2578 HeapObject object = weak_object_in_code.first;
2579 Code code = weak_object_in_code.second;
2580 if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
2581 !code.embedded_objects_cleared()) {
2582 if (!code.marked_for_deoptimization()) {
2583 code.SetMarkedForDeoptimization("weak objects");
2584 have_code_to_deoptimize_ = true;
2585 }
2586 code.ClearEmbeddedObjects(heap_);
2587 DCHECK(code.embedded_objects_cleared());
2588 }
2589 }
2590 }
2591
ClearPotentialSimpleMapTransition(Map dead_target)2592 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
2593 DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
2594 Object potential_parent = dead_target.constructor_or_back_pointer();
2595 if (potential_parent.IsMap()) {
2596 Map parent = Map::cast(potential_parent);
2597 DisallowGarbageCollection no_gc_obviously;
2598 if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
2599 TransitionsAccessor(isolate(), parent)
2600 .HasSimpleTransitionTo(dead_target)) {
2601 ClearPotentialSimpleMapTransition(parent, dead_target);
2602 }
2603 }
2604 }
2605
ClearPotentialSimpleMapTransition(Map map,Map dead_target)2606 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
2607 Map dead_target) {
2608 DCHECK(!map.is_prototype_map());
2609 DCHECK(!dead_target.is_prototype_map());
2610 DCHECK_EQ(map.raw_transitions(), HeapObjectReference::Weak(dead_target));
2611 // Take ownership of the descriptor array.
2612 int number_of_own_descriptors = map.NumberOfOwnDescriptors();
2613 DescriptorArray descriptors = map.instance_descriptors(isolate());
2614 if (descriptors == dead_target.instance_descriptors(isolate()) &&
2615 number_of_own_descriptors > 0) {
2616 TrimDescriptorArray(map, descriptors);
2617 DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
2618 }
2619 }
2620
FlushBytecodeFromSFI(SharedFunctionInfo shared_info)2621 void MarkCompactCollector::FlushBytecodeFromSFI(
2622 SharedFunctionInfo shared_info) {
2623 DCHECK(shared_info.HasBytecodeArray());
2624
2625 // Retain objects required for uncompiled data.
2626 String inferred_name = shared_info.inferred_name();
2627 int start_position = shared_info.StartPosition();
2628 int end_position = shared_info.EndPosition();
2629
2630 shared_info.DiscardCompiledMetadata(
2631 isolate(), [](HeapObject object, ObjectSlot slot, HeapObject target) {
2632 RecordSlot(object, slot, target);
2633 });
2634
2635 // The size of the bytecode array should always be larger than an
2636 // UncompiledData object.
2637 STATIC_ASSERT(BytecodeArray::SizeFor(0) >=
2638 UncompiledDataWithoutPreparseData::kSize);
2639
2640 // Replace bytecode array with an uncompiled data array.
2641 HeapObject compiled_data = shared_info.GetBytecodeArray(isolate());
2642 Address compiled_data_start = compiled_data.address();
2643 int compiled_data_size = compiled_data.Size();
2644 MemoryChunk* chunk = MemoryChunk::FromAddress(compiled_data_start);
2645
2646 // Clear any recorded slots for the compiled data as being invalid.
2647 RememberedSet<OLD_TO_NEW>::RemoveRange(
2648 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
2649 SlotSet::FREE_EMPTY_BUCKETS);
2650 RememberedSet<OLD_TO_OLD>::RemoveRange(
2651 chunk, compiled_data_start, compiled_data_start + compiled_data_size,
2652 SlotSet::FREE_EMPTY_BUCKETS);
2653
2654 // Swap the map, using set_map_after_allocation to avoid verify heap checks
2655 // which are not necessary since we are doing this during the GC atomic pause.
2656 compiled_data.set_map_after_allocation(
2657 ReadOnlyRoots(heap()).uncompiled_data_without_preparse_data_map(),
2658 SKIP_WRITE_BARRIER);
2659
2660 // Create a filler object for any left over space in the bytecode array.
2661 if (!heap()->IsLargeObject(compiled_data)) {
2662 heap()->CreateFillerObjectAt(
2663 compiled_data.address() + UncompiledDataWithoutPreparseData::kSize,
2664 compiled_data_size - UncompiledDataWithoutPreparseData::kSize,
2665 ClearRecordedSlots::kNo);
2666 }
2667
2668 // Initialize the uncompiled data.
2669 UncompiledData uncompiled_data = UncompiledData::cast(compiled_data);
2670 uncompiled_data.InitAfterBytecodeFlush(
2671 inferred_name, start_position, end_position,
2672 [](HeapObject object, ObjectSlot slot, HeapObject target) {
2673 RecordSlot(object, slot, target);
2674 });
2675
2676 // Mark the uncompiled data as black, and ensure all fields have already been
2677 // marked.
2678 DCHECK(non_atomic_marking_state()->IsBlackOrGrey(inferred_name));
2679 non_atomic_marking_state()->WhiteToBlack(uncompiled_data);
2680
2681 // Use the raw function data setter to avoid validity checks, since we're
2682 // performing the unusual task of decompiling.
2683 shared_info.set_function_data(uncompiled_data, kReleaseStore);
2684 DCHECK(!shared_info.is_compiled());
2685 }
2686
ProcessOldCodeCandidates()2687 void MarkCompactCollector::ProcessOldCodeCandidates() {
2688 DCHECK(FLAG_flush_bytecode || FLAG_flush_baseline_code ||
2689 weak_objects_.code_flushing_candidates.IsEmpty());
2690 SharedFunctionInfo flushing_candidate;
2691 while (local_weak_objects()->code_flushing_candidates_local.Pop(
2692 &flushing_candidate)) {
2693 bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
2694 flushing_candidate.GetBytecodeArray(isolate()));
2695 if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineCode()) {
2696 CodeT baseline_codet =
2697 CodeT::cast(flushing_candidate.function_data(kAcquireLoad));
2698 // Safe to do a relaxed load here since the CodeT was acquire-loaded.
2699 Code baseline_code = FromCodeT(baseline_codet, kRelaxedLoad);
2700 if (non_atomic_marking_state()->IsBlackOrGrey(baseline_code)) {
2701 // Currently baseline code holds bytecode array strongly and it is
2702 // always ensured that bytecode is live if baseline code is live. Hence
2703 // baseline code can safely load bytecode array without any additional
2704 // checks. In future if this changes we need to update these checks to
2705 // flush code if the bytecode is not live and also update baseline code
2706 // to bailout if there is no bytecode.
2707 DCHECK(is_bytecode_live);
2708
2709 // Regardless of whether the CodeT is a CodeDataContainer or the Code
2710 // itself, if the Code is live then the CodeT has to be live and will
2711 // have been marked via the owning JSFunction.
2712 DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_codet));
2713 } else if (is_bytecode_live) {
2714 // If baseline code is flushed but we have a valid bytecode array reset
2715 // the function_data field to the BytecodeArray/InterpreterData.
2716 flushing_candidate.set_function_data(
2717 baseline_code.bytecode_or_interpreter_data(), kReleaseStore);
2718 }
2719 }
2720
2721 if (!is_bytecode_live) {
2722 // If baseline code flushing is disabled we should only flush bytecode
2723 // from functions that don't have baseline data.
2724 DCHECK(FLAG_flush_baseline_code || !flushing_candidate.HasBaselineCode());
2725
2726 // If the BytecodeArray is dead, flush it, which will replace the field
2727 // with an uncompiled data object.
2728 FlushBytecodeFromSFI(flushing_candidate);
2729 }
2730
2731 // Now record the slot, which has either been updated to an uncompiled data,
2732 // Baseline code or BytecodeArray which is still alive.
2733 ObjectSlot slot =
2734 flushing_candidate.RawField(SharedFunctionInfo::kFunctionDataOffset);
2735 RecordSlot(flushing_candidate, slot, HeapObject::cast(*slot));
2736 }
2737 }
2738
ClearFlushedJsFunctions()2739 void MarkCompactCollector::ClearFlushedJsFunctions() {
2740 DCHECK(FLAG_flush_bytecode || weak_objects_.flushed_js_functions.IsEmpty());
2741 JSFunction flushed_js_function;
2742 while (local_weak_objects()->flushed_js_functions_local.Pop(
2743 &flushed_js_function)) {
2744 auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
2745 Object target) {
2746 RecordSlot(object, slot, HeapObject::cast(target));
2747 };
2748 flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
2749 }
2750 }
2751
ProcessFlushedBaselineCandidates()2752 void MarkCompactCollector::ProcessFlushedBaselineCandidates() {
2753 DCHECK(FLAG_flush_baseline_code ||
2754 weak_objects_.baseline_flushing_candidates.IsEmpty());
2755 JSFunction flushed_js_function;
2756 while (local_weak_objects()->baseline_flushing_candidates_local.Pop(
2757 &flushed_js_function)) {
2758 auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
2759 Object target) {
2760 RecordSlot(object, slot, HeapObject::cast(target));
2761 };
2762 flushed_js_function.ResetIfCodeFlushed(gc_notify_updated_slot);
2763
2764 // Record the code slot that has been updated either to CompileLazy,
2765 // InterpreterEntryTrampoline or baseline code.
2766 ObjectSlot slot = flushed_js_function.RawField(JSFunction::kCodeOffset);
2767 RecordSlot(flushed_js_function, slot, HeapObject::cast(*slot));
2768 }
2769 }
2770
ClearFullMapTransitions()2771 void MarkCompactCollector::ClearFullMapTransitions() {
2772 TransitionArray array;
2773 while (local_weak_objects()->transition_arrays_local.Pop(&array)) {
2774 int num_transitions = array.number_of_entries();
2775 if (num_transitions > 0) {
2776 Map map;
2777 // The array might contain "undefined" elements because it's not yet
2778 // filled. Allow it.
2779 if (array.GetTargetIfExists(0, isolate(), &map)) {
2780 DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
2781 Object constructor_or_back_pointer = map.constructor_or_back_pointer();
2782 if (constructor_or_back_pointer.IsSmi()) {
2783 DCHECK(isolate()->has_active_deserializer());
2784 DCHECK_EQ(constructor_or_back_pointer,
2785 Smi::uninitialized_deserialization_value());
2786 continue;
2787 }
2788 Map parent = Map::cast(map.constructor_or_back_pointer());
2789 bool parent_is_alive =
2790 non_atomic_marking_state()->IsBlackOrGrey(parent);
2791 DescriptorArray descriptors =
2792 parent_is_alive ? parent.instance_descriptors(isolate())
2793 : DescriptorArray();
2794 bool descriptors_owner_died =
2795 CompactTransitionArray(parent, array, descriptors);
2796 if (descriptors_owner_died) {
2797 TrimDescriptorArray(parent, descriptors);
2798 }
2799 }
2800 }
2801 }
2802 }
2803
2804 // Returns false if no maps have died, or if the transition array is
2805 // still being deserialized.
TransitionArrayNeedsCompaction(TransitionArray transitions,int num_transitions)2806 bool MarkCompactCollector::TransitionArrayNeedsCompaction(
2807 TransitionArray transitions, int num_transitions) {
2808 for (int i = 0; i < num_transitions; ++i) {
2809 MaybeObject raw_target = transitions.GetRawTarget(i);
2810 if (raw_target.IsSmi()) {
2811 // This target is still being deserialized,
2812 DCHECK(isolate()->has_active_deserializer());
2813 DCHECK_EQ(raw_target.ToSmi(), Smi::uninitialized_deserialization_value());
2814 #ifdef DEBUG
2815 // Targets can only be dead iff this array is fully deserialized.
2816 for (int j = 0; j < num_transitions; ++j) {
2817 DCHECK_IMPLIES(
2818 !transitions.GetRawTarget(j).IsSmi(),
2819 !non_atomic_marking_state()->IsWhite(transitions.GetTarget(j)));
2820 }
2821 #endif
2822 return false;
2823 } else if (non_atomic_marking_state()->IsWhite(
2824 TransitionsAccessor::GetTargetFromRaw(raw_target))) {
2825 #ifdef DEBUG
2826 // Targets can only be dead iff this array is fully deserialized.
2827 for (int j = 0; j < num_transitions; ++j) {
2828 DCHECK(!transitions.GetRawTarget(j).IsSmi());
2829 }
2830 #endif
2831 return true;
2832 }
2833 }
2834 return false;
2835 }
2836
CompactTransitionArray(Map map,TransitionArray transitions,DescriptorArray descriptors)2837 bool MarkCompactCollector::CompactTransitionArray(Map map,
2838 TransitionArray transitions,
2839 DescriptorArray descriptors) {
2840 DCHECK(!map.is_prototype_map());
2841 int num_transitions = transitions.number_of_entries();
2842 if (!TransitionArrayNeedsCompaction(transitions, num_transitions)) {
2843 return false;
2844 }
2845 bool descriptors_owner_died = false;
2846 int transition_index = 0;
2847 // Compact all live transitions to the left.
2848 for (int i = 0; i < num_transitions; ++i) {
2849 Map target = transitions.GetTarget(i);
2850 DCHECK_EQ(target.constructor_or_back_pointer(), map);
2851 if (non_atomic_marking_state()->IsWhite(target)) {
2852 if (!descriptors.is_null() &&
2853 target.instance_descriptors(isolate()) == descriptors) {
2854 DCHECK(!target.is_prototype_map());
2855 descriptors_owner_died = true;
2856 }
2857 } else {
2858 if (i != transition_index) {
2859 Name key = transitions.GetKey(i);
2860 transitions.SetKey(transition_index, key);
2861 HeapObjectSlot key_slot = transitions.GetKeySlot(transition_index);
2862 RecordSlot(transitions, key_slot, key);
2863 MaybeObject raw_target = transitions.GetRawTarget(i);
2864 transitions.SetRawTarget(transition_index, raw_target);
2865 HeapObjectSlot target_slot =
2866 transitions.GetTargetSlot(transition_index);
2867 RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
2868 }
2869 transition_index++;
2870 }
2871 }
2872 // If there are no transitions to be cleared, return.
2873 if (transition_index == num_transitions) {
2874 DCHECK(!descriptors_owner_died);
2875 return false;
2876 }
2877 // Note that we never eliminate a transition array, though we might right-trim
2878 // such that number_of_transitions() == 0. If this assumption changes,
2879 // TransitionArray::Insert() will need to deal with the case that a transition
2880 // array disappeared during GC.
2881 int trim = transitions.Capacity() - transition_index;
2882 if (trim > 0) {
2883 heap_->RightTrimWeakFixedArray(transitions,
2884 trim * TransitionArray::kEntrySize);
2885 transitions.SetNumberOfTransitions(transition_index);
2886 }
2887 return descriptors_owner_died;
2888 }
2889
RightTrimDescriptorArray(DescriptorArray array,int descriptors_to_trim)2890 void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray array,
2891 int descriptors_to_trim) {
2892 int old_nof_all_descriptors = array.number_of_all_descriptors();
2893 int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
2894 DCHECK_LT(0, descriptors_to_trim);
2895 DCHECK_LE(0, new_nof_all_descriptors);
2896 Address start = array.GetDescriptorSlot(new_nof_all_descriptors).address();
2897 Address end = array.GetDescriptorSlot(old_nof_all_descriptors).address();
2898 MemoryChunk* chunk = MemoryChunk::FromHeapObject(array);
2899 RememberedSet<OLD_TO_NEW>::RemoveRange(chunk, start, end,
2900 SlotSet::FREE_EMPTY_BUCKETS);
2901 RememberedSet<OLD_TO_OLD>::RemoveRange(chunk, start, end,
2902 SlotSet::FREE_EMPTY_BUCKETS);
2903 heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
2904 ClearRecordedSlots::kNo);
2905 array.set_number_of_all_descriptors(new_nof_all_descriptors);
2906 }
2907
TrimDescriptorArray(Map map,DescriptorArray descriptors)2908 void MarkCompactCollector::TrimDescriptorArray(Map map,
2909 DescriptorArray descriptors) {
2910 int number_of_own_descriptors = map.NumberOfOwnDescriptors();
2911 if (number_of_own_descriptors == 0) {
2912 DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
2913 return;
2914 }
2915 int to_trim =
2916 descriptors.number_of_all_descriptors() - number_of_own_descriptors;
2917 if (to_trim > 0) {
2918 descriptors.set_number_of_descriptors(number_of_own_descriptors);
2919 RightTrimDescriptorArray(descriptors, to_trim);
2920
2921 TrimEnumCache(map, descriptors);
2922 descriptors.Sort();
2923 }
2924 DCHECK(descriptors.number_of_descriptors() == number_of_own_descriptors);
2925 map.set_owns_descriptors(true);
2926 }
2927
TrimEnumCache(Map map,DescriptorArray descriptors)2928 void MarkCompactCollector::TrimEnumCache(Map map, DescriptorArray descriptors) {
2929 int live_enum = map.EnumLength();
2930 if (live_enum == kInvalidEnumCacheSentinel) {
2931 live_enum = map.NumberOfEnumerableProperties();
2932 }
2933 if (live_enum == 0) return descriptors.ClearEnumCache();
2934 EnumCache enum_cache = descriptors.enum_cache();
2935
2936 FixedArray keys = enum_cache.keys();
2937 int to_trim = keys.length() - live_enum;
2938 if (to_trim <= 0) return;
2939 heap_->RightTrimFixedArray(keys, to_trim);
2940
2941 FixedArray indices = enum_cache.indices();
2942 to_trim = indices.length() - live_enum;
2943 if (to_trim <= 0) return;
2944 heap_->RightTrimFixedArray(indices, to_trim);
2945 }
2946
ClearWeakCollections()2947 void MarkCompactCollector::ClearWeakCollections() {
2948 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2949 EphemeronHashTable table;
2950 while (local_weak_objects()->ephemeron_hash_tables_local.Pop(&table)) {
2951 for (InternalIndex i : table.IterateEntries()) {
2952 HeapObject key = HeapObject::cast(table.KeyAt(i));
2953 #ifdef VERIFY_HEAP
2954 if (FLAG_verify_heap) {
2955 Object value = table.ValueAt(i);
2956 if (value.IsHeapObject()) {
2957 HeapObject heap_object = HeapObject::cast(value);
2958 CHECK_IMPLIES(
2959 (!is_shared_heap_ && key.InSharedHeap()) ||
2960 non_atomic_marking_state()->IsBlackOrGrey(key),
2961 (!is_shared_heap_ && heap_object.InSharedHeap()) ||
2962 non_atomic_marking_state()->IsBlackOrGrey(heap_object));
2963 }
2964 }
2965 #endif
2966 if (!is_shared_heap_ && key.InSharedHeap()) continue;
2967 if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
2968 table.RemoveEntry(i);
2969 }
2970 }
2971 }
2972 for (auto it = heap_->ephemeron_remembered_set_.begin();
2973 it != heap_->ephemeron_remembered_set_.end();) {
2974 if (!non_atomic_marking_state()->IsBlackOrGrey(it->first)) {
2975 it = heap_->ephemeron_remembered_set_.erase(it);
2976 } else {
2977 ++it;
2978 }
2979 }
2980 }
2981
ClearWeakReferences()2982 void MarkCompactCollector::ClearWeakReferences() {
2983 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
2984 std::pair<HeapObject, HeapObjectSlot> slot;
2985 HeapObjectReference cleared_weak_ref =
2986 HeapObjectReference::ClearedValue(isolate());
2987 while (local_weak_objects()->weak_references_local.Pop(&slot)) {
2988 HeapObject value;
2989 // The slot could have been overwritten, so we have to treat it
2990 // as MaybeObjectSlot.
2991 MaybeObjectSlot location(slot.second);
2992 if ((*location)->GetHeapObjectIfWeak(&value)) {
2993 DCHECK(!value.IsCell());
2994 if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
2995 // The value of the weak reference is alive.
2996 RecordSlot(slot.first, HeapObjectSlot(location), value);
2997 } else {
2998 if (value.IsMap()) {
2999 // The map is non-live.
3000 ClearPotentialSimpleMapTransition(Map::cast(value));
3001 }
3002 location.store(cleared_weak_ref);
3003 }
3004 }
3005 }
3006 }
3007
ClearJSWeakRefs()3008 void MarkCompactCollector::ClearJSWeakRefs() {
3009 JSWeakRef weak_ref;
3010 while (local_weak_objects()->js_weak_refs_local.Pop(&weak_ref)) {
3011 HeapObject target = HeapObject::cast(weak_ref.target());
3012 if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
3013 weak_ref.set_target(ReadOnlyRoots(isolate()).undefined_value());
3014 } else {
3015 // The value of the JSWeakRef is alive.
3016 ObjectSlot slot = weak_ref.RawField(JSWeakRef::kTargetOffset);
3017 RecordSlot(weak_ref, slot, target);
3018 }
3019 }
3020 WeakCell weak_cell;
3021 while (local_weak_objects()->weak_cells_local.Pop(&weak_cell)) {
3022 auto gc_notify_updated_slot = [](HeapObject object, ObjectSlot slot,
3023 Object target) {
3024 if (target.IsHeapObject()) {
3025 RecordSlot(object, slot, HeapObject::cast(target));
3026 }
3027 };
3028 HeapObject target = HeapObject::cast(weak_cell.target());
3029 if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
3030 DCHECK(!target.IsUndefined());
3031 // The value of the WeakCell is dead.
3032 JSFinalizationRegistry finalization_registry =
3033 JSFinalizationRegistry::cast(weak_cell.finalization_registry());
3034 if (!finalization_registry.scheduled_for_cleanup()) {
3035 heap()->EnqueueDirtyJSFinalizationRegistry(finalization_registry,
3036 gc_notify_updated_slot);
3037 }
3038 // We're modifying the pointers in WeakCell and JSFinalizationRegistry
3039 // during GC; thus we need to record the slots it writes. The normal write
3040 // barrier is not enough, since it's disabled before GC.
3041 weak_cell.Nullify(isolate(), gc_notify_updated_slot);
3042 DCHECK(finalization_registry.NeedsCleanup());
3043 DCHECK(finalization_registry.scheduled_for_cleanup());
3044 } else {
3045 // The value of the WeakCell is alive.
3046 ObjectSlot slot = weak_cell.RawField(WeakCell::kTargetOffset);
3047 RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
3048 }
3049
3050 HeapObject unregister_token = weak_cell.unregister_token();
3051 if (!non_atomic_marking_state()->IsBlackOrGrey(unregister_token)) {
3052 // The unregister token is dead. Remove any corresponding entries in the
3053 // key map. Multiple WeakCell with the same token will have all their
3054 // unregister_token field set to undefined when processing the first
3055 // WeakCell. Like above, we're modifying pointers during GC, so record the
3056 // slots.
3057 JSFinalizationRegistry finalization_registry =
3058 JSFinalizationRegistry::cast(weak_cell.finalization_registry());
3059 finalization_registry.RemoveUnregisterToken(
3060 JSReceiver::cast(unregister_token), isolate(),
3061 JSFinalizationRegistry::kKeepMatchedCellsInRegistry,
3062 gc_notify_updated_slot);
3063 } else {
3064 // The unregister_token is alive.
3065 ObjectSlot slot = weak_cell.RawField(WeakCell::kUnregisterTokenOffset);
3066 RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
3067 }
3068 }
3069 heap()->PostFinalizationRegistryCleanupTaskIfNeeded();
3070 }
3071
IsOnEvacuationCandidate(MaybeObject obj)3072 bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
3073 return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
3074 }
3075
3076 // static
ShouldRecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)3077 bool MarkCompactCollector::ShouldRecordRelocSlot(Code host, RelocInfo* rinfo,
3078 HeapObject target) {
3079 MemoryChunk* source_chunk = MemoryChunk::FromHeapObject(host);
3080 BasicMemoryChunk* target_chunk = BasicMemoryChunk::FromHeapObject(target);
3081 return target_chunk->IsEvacuationCandidate() &&
3082 !source_chunk->ShouldSkipEvacuationSlotRecording();
3083 }
3084
3085 // static
3086 MarkCompactCollector::RecordRelocSlotInfo
ProcessRelocInfo(Code host,RelocInfo * rinfo,HeapObject target)3087 MarkCompactCollector::ProcessRelocInfo(Code host, RelocInfo* rinfo,
3088 HeapObject target) {
3089 DCHECK_EQ(host, rinfo->host());
3090
3091 RecordRelocSlotInfo result;
3092 const RelocInfo::Mode rmode = rinfo->rmode();
3093 Address addr;
3094 SlotType slot_type;
3095
3096 if (rinfo->IsInConstantPool()) {
3097 addr = rinfo->constant_pool_entry_address();
3098
3099 if (RelocInfo::IsCodeTargetMode(rmode)) {
3100 slot_type = SlotType::kConstPoolCodeEntry;
3101 } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
3102 slot_type = SlotType::kConstPoolEmbeddedObjectCompressed;
3103 } else {
3104 DCHECK(RelocInfo::IsFullEmbeddedObject(rmode));
3105 slot_type = SlotType::kConstPoolEmbeddedObjectFull;
3106 }
3107 } else {
3108 addr = rinfo->pc();
3109
3110 if (RelocInfo::IsCodeTargetMode(rmode)) {
3111 slot_type = SlotType::kCodeEntry;
3112 } else if (RelocInfo::IsFullEmbeddedObject(rmode)) {
3113 slot_type = SlotType::kEmbeddedObjectFull;
3114 } else if (RelocInfo::IsCompressedEmbeddedObject(rmode)) {
3115 slot_type = SlotType::kEmbeddedObjectCompressed;
3116 } else {
3117 DCHECK(RelocInfo::IsDataEmbeddedObject(rmode));
3118 slot_type = SlotType::kEmbeddedObjectData;
3119 }
3120 }
3121
3122 MemoryChunk* const source_chunk = MemoryChunk::FromHeapObject(host);
3123 const uintptr_t offset = addr - source_chunk->address();
3124 DCHECK_LT(offset, static_cast<uintptr_t>(TypedSlotSet::kMaxOffset));
3125 result.memory_chunk = source_chunk;
3126 result.slot_type = slot_type;
3127 result.offset = static_cast<uint32_t>(offset);
3128
3129 return result;
3130 }
3131
3132 // static
RecordRelocSlot(Code host,RelocInfo * rinfo,HeapObject target)3133 void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
3134 HeapObject target) {
3135 if (!ShouldRecordRelocSlot(host, rinfo, target)) return;
3136 RecordRelocSlotInfo info = ProcessRelocInfo(host, rinfo, target);
3137
3138 // Access to TypeSlots need to be protected, since LocalHeaps might
3139 // publish code in the background thread.
3140 base::Optional<base::MutexGuard> opt_guard;
3141 if (FLAG_concurrent_sparkplug) {
3142 opt_guard.emplace(info.memory_chunk->mutex());
3143 }
3144 RememberedSet<OLD_TO_OLD>::InsertTyped(info.memory_chunk, info.slot_type,
3145 info.offset);
3146 }
3147
3148 namespace {
3149
3150 // Missing specialization MakeSlotValue<FullObjectSlot, WEAK>() will turn
3151 // attempt to store a weak reference to strong-only slot to a compilation error.
3152 template <typename TSlot, HeapObjectReferenceType reference_type>
3153 typename TSlot::TObject MakeSlotValue(HeapObject heap_object);
3154
3155 template <>
MakeSlotValue(HeapObject heap_object)3156 Object MakeSlotValue<ObjectSlot, HeapObjectReferenceType::STRONG>(
3157 HeapObject heap_object) {
3158 return heap_object;
3159 }
3160
3161 template <>
MakeSlotValue(HeapObject heap_object)3162 MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::STRONG>(
3163 HeapObject heap_object) {
3164 return HeapObjectReference::Strong(heap_object);
3165 }
3166
3167 template <>
MakeSlotValue(HeapObject heap_object)3168 MaybeObject MakeSlotValue<MaybeObjectSlot, HeapObjectReferenceType::WEAK>(
3169 HeapObject heap_object) {
3170 return HeapObjectReference::Weak(heap_object);
3171 }
3172
3173 template <>
MakeSlotValue(HeapObject heap_object)3174 Object MakeSlotValue<OffHeapObjectSlot, HeapObjectReferenceType::STRONG>(
3175 HeapObject heap_object) {
3176 return heap_object;
3177 }
3178
3179 #ifdef V8_COMPRESS_POINTERS
3180 template <>
MakeSlotValue(HeapObject heap_object)3181 Object MakeSlotValue<FullObjectSlot, HeapObjectReferenceType::STRONG>(
3182 HeapObject heap_object) {
3183 return heap_object;
3184 }
3185
3186 template <>
MakeSlotValue(HeapObject heap_object)3187 MaybeObject MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::STRONG>(
3188 HeapObject heap_object) {
3189 return HeapObjectReference::Strong(heap_object);
3190 }
3191
3192 // The following specialization
3193 // MakeSlotValue<FullMaybeObjectSlot, HeapObjectReferenceType::WEAK>()
3194 // is not used.
3195 #endif
3196
3197 template <AccessMode access_mode, HeapObjectReferenceType reference_type,
3198 typename TSlot>
UpdateSlot(PtrComprCageBase cage_base,TSlot slot,typename TSlot::TObject old,HeapObject heap_obj)3199 static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
3200 TSlot slot,
3201 typename TSlot::TObject old,
3202 HeapObject heap_obj) {
3203 static_assert(std::is_same<TSlot, FullObjectSlot>::value ||
3204 std::is_same<TSlot, ObjectSlot>::value ||
3205 std::is_same<TSlot, FullMaybeObjectSlot>::value ||
3206 std::is_same<TSlot, MaybeObjectSlot>::value ||
3207 std::is_same<TSlot, OffHeapObjectSlot>::value,
3208 "Only [Full|OffHeap]ObjectSlot and [Full]MaybeObjectSlot are "
3209 "expected here");
3210 MapWord map_word = heap_obj.map_word(cage_base, kRelaxedLoad);
3211 if (map_word.IsForwardingAddress()) {
3212 DCHECK_IMPLIES(!Heap::InFromPage(heap_obj),
3213 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
3214 Page::FromHeapObject(heap_obj)->IsFlagSet(
3215 Page::COMPACTION_WAS_ABORTED));
3216 PtrComprCageBase host_cage_base =
3217 V8_EXTERNAL_CODE_SPACE_BOOL ? GetPtrComprCageBase(heap_obj) : cage_base;
3218 typename TSlot::TObject target = MakeSlotValue<TSlot, reference_type>(
3219 map_word.ToForwardingAddress(host_cage_base));
3220 if (access_mode == AccessMode::NON_ATOMIC) {
3221 // Needs to be atomic for map space compaction: This slot could be a map
3222 // word which we update while loading the map word for updating the slot
3223 // on another page.
3224 slot.Relaxed_Store(target);
3225 } else {
3226 slot.Release_CompareAndSwap(old, target);
3227 }
3228 DCHECK(!Heap::InFromPage(target));
3229 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
3230 } else {
3231 DCHECK(MarkCompactCollector::IsMapOrForwardedMap(map_word.ToMap()));
3232 }
3233 return REMOVE_SLOT;
3234 }
3235
3236 template <AccessMode access_mode, typename TSlot>
UpdateSlot(PtrComprCageBase cage_base,TSlot slot)3237 static inline SlotCallbackResult UpdateSlot(PtrComprCageBase cage_base,
3238 TSlot slot) {
3239 typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
3240 HeapObject heap_obj;
3241 if (TSlot::kCanBeWeak && obj->GetHeapObjectIfWeak(&heap_obj)) {
3242 UpdateSlot<access_mode, HeapObjectReferenceType::WEAK>(cage_base, slot, obj,
3243 heap_obj);
3244 } else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
3245 return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(
3246 cage_base, slot, obj, heap_obj);
3247 }
3248 return REMOVE_SLOT;
3249 }
3250
3251 template <AccessMode access_mode, typename TSlot>
UpdateStrongSlot(PtrComprCageBase cage_base,TSlot slot)3252 static inline SlotCallbackResult UpdateStrongSlot(PtrComprCageBase cage_base,
3253 TSlot slot) {
3254 typename TSlot::TObject obj = slot.Relaxed_Load(cage_base);
3255 DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
3256 HeapObject heap_obj;
3257 if (obj.GetHeapObject(&heap_obj)) {
3258 return UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(
3259 cage_base, slot, obj, heap_obj);
3260 }
3261 return REMOVE_SLOT;
3262 }
3263
3264 template <AccessMode access_mode>
UpdateStrongCodeSlot(HeapObject host,PtrComprCageBase cage_base,PtrComprCageBase code_cage_base,CodeObjectSlot slot)3265 static inline SlotCallbackResult UpdateStrongCodeSlot(
3266 HeapObject host, PtrComprCageBase cage_base,
3267 PtrComprCageBase code_cage_base, CodeObjectSlot slot) {
3268 Object obj = slot.Relaxed_Load(code_cage_base);
3269 DCHECK(!HAS_WEAK_HEAP_OBJECT_TAG(obj.ptr()));
3270 HeapObject heap_obj;
3271 if (obj.GetHeapObject(&heap_obj)) {
3272 SlotCallbackResult result =
3273 UpdateSlot<access_mode, HeapObjectReferenceType::STRONG>(
3274 cage_base, slot, obj, heap_obj);
3275
3276 CodeDataContainer code_data_container =
3277 CodeDataContainer::cast(HeapObject::FromAddress(
3278 slot.address() - CodeDataContainer::kCodeOffset));
3279 Code code = code_data_container.code(code_cage_base);
3280 Isolate* isolate_for_sandbox = GetIsolateForSandbox(host);
3281 code_data_container.UpdateCodeEntryPoint(isolate_for_sandbox, code);
3282 return result;
3283 }
3284 return REMOVE_SLOT;
3285 }
3286
3287 } // namespace
3288
3289 // Visitor for updating root pointers and to-space pointers.
3290 // It does not expect to encounter pointers to dead objects.
3291 class PointersUpdatingVisitor final : public ObjectVisitorWithCageBases,
3292 public RootVisitor {
3293 public:
PointersUpdatingVisitor(Heap * heap)3294 explicit PointersUpdatingVisitor(Heap* heap)
3295 : ObjectVisitorWithCageBases(heap) {}
3296
VisitPointer(HeapObject host,ObjectSlot p)3297 void VisitPointer(HeapObject host, ObjectSlot p) override {
3298 UpdateStrongSlotInternal(cage_base(), p);
3299 }
3300
VisitPointer(HeapObject host,MaybeObjectSlot p)3301 void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
3302 UpdateSlotInternal(cage_base(), p);
3303 }
3304
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)3305 void VisitPointers(HeapObject host, ObjectSlot start,
3306 ObjectSlot end) override {
3307 for (ObjectSlot p = start; p < end; ++p) {
3308 UpdateStrongSlotInternal(cage_base(), p);
3309 }
3310 }
3311
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)3312 void VisitPointers(HeapObject host, MaybeObjectSlot start,
3313 MaybeObjectSlot end) final {
3314 for (MaybeObjectSlot p = start; p < end; ++p) {
3315 UpdateSlotInternal(cage_base(), p);
3316 }
3317 }
3318
VisitCodePointer(HeapObject host,CodeObjectSlot slot)3319 void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
3320 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
3321 UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(host, cage_base(),
3322 code_cage_base(), slot);
3323 }
3324
VisitRootPointer(Root root,const char * description,FullObjectSlot p)3325 void VisitRootPointer(Root root, const char* description,
3326 FullObjectSlot p) override {
3327 DCHECK(!MapWord::IsPacked(p.Relaxed_Load().ptr()));
3328 UpdateRootSlotInternal(cage_base(), p);
3329 }
3330
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)3331 void VisitRootPointers(Root root, const char* description,
3332 FullObjectSlot start, FullObjectSlot end) override {
3333 for (FullObjectSlot p = start; p < end; ++p) {
3334 UpdateRootSlotInternal(cage_base(), p);
3335 }
3336 }
3337
VisitRootPointers(Root root,const char * description,OffHeapObjectSlot start,OffHeapObjectSlot end)3338 void VisitRootPointers(Root root, const char* description,
3339 OffHeapObjectSlot start,
3340 OffHeapObjectSlot end) override {
3341 for (OffHeapObjectSlot p = start; p < end; ++p) {
3342 UpdateRootSlotInternal(cage_base(), p);
3343 }
3344 }
3345
VisitCodeTarget(Code host,RelocInfo * rinfo)3346 void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
3347 // This visitor nevers visits code objects.
3348 UNREACHABLE();
3349 }
3350
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)3351 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
3352 // This visitor nevers visits code objects.
3353 UNREACHABLE();
3354 }
3355
3356 private:
UpdateRootSlotInternal(PtrComprCageBase cage_base,FullObjectSlot slot)3357 static inline SlotCallbackResult UpdateRootSlotInternal(
3358 PtrComprCageBase cage_base, FullObjectSlot slot) {
3359 return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
3360 }
3361
UpdateRootSlotInternal(PtrComprCageBase cage_base,OffHeapObjectSlot slot)3362 static inline SlotCallbackResult UpdateRootSlotInternal(
3363 PtrComprCageBase cage_base, OffHeapObjectSlot slot) {
3364 return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
3365 }
3366
UpdateStrongMaybeObjectSlotInternal(PtrComprCageBase cage_base,MaybeObjectSlot slot)3367 static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
3368 PtrComprCageBase cage_base, MaybeObjectSlot slot) {
3369 return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
3370 }
3371
UpdateStrongSlotInternal(PtrComprCageBase cage_base,ObjectSlot slot)3372 static inline SlotCallbackResult UpdateStrongSlotInternal(
3373 PtrComprCageBase cage_base, ObjectSlot slot) {
3374 return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
3375 }
3376
UpdateSlotInternal(PtrComprCageBase cage_base,MaybeObjectSlot slot)3377 static inline SlotCallbackResult UpdateSlotInternal(
3378 PtrComprCageBase cage_base, MaybeObjectSlot slot) {
3379 return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
3380 }
3381 };
3382
3383 #ifdef VERIFY_HEAP
3384 // Visitor for updating root pointers and to-space pointers.
3385 // It does not expect to encounter pointers to dead objects.
3386 class ClientHeapVerifier final : public ObjectVisitorWithCageBases {
3387 public:
ClientHeapVerifier(Heap * heap)3388 explicit ClientHeapVerifier(Heap* heap) : ObjectVisitorWithCageBases(heap) {}
3389
VisitPointer(HeapObject host,ObjectSlot p)3390 void VisitPointer(HeapObject host, ObjectSlot p) override {
3391 VerifySlot(cage_base(), p);
3392 }
3393
VisitPointer(HeapObject host,MaybeObjectSlot p)3394 void VisitPointer(HeapObject host, MaybeObjectSlot p) override {
3395 VerifySlot(cage_base(), p);
3396 }
3397
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)3398 void VisitPointers(HeapObject host, ObjectSlot start,
3399 ObjectSlot end) override {
3400 for (ObjectSlot p = start; p < end; ++p) {
3401 VerifySlot(cage_base(), p);
3402 }
3403 }
3404
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)3405 void VisitPointers(HeapObject host, MaybeObjectSlot start,
3406 MaybeObjectSlot end) final {
3407 for (MaybeObjectSlot p = start; p < end; ++p) {
3408 VerifySlot(cage_base(), p);
3409 }
3410 }
3411
VisitMapPointer(HeapObject host)3412 void VisitMapPointer(HeapObject host) override {
3413 VerifySlot(cage_base(), host.map_slot());
3414 }
3415
VisitCodePointer(HeapObject host,CodeObjectSlot slot)3416 void VisitCodePointer(HeapObject host, CodeObjectSlot slot) override {
3417 VerifySlot(code_cage_base(), ObjectSlot(slot.address()));
3418 }
3419
VisitCodeTarget(Code host,RelocInfo * rinfo)3420 void VisitCodeTarget(Code host, RelocInfo* rinfo) override {}
3421
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)3422 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {}
3423
3424 private:
VerifySlot(PtrComprCageBase cage_base,ObjectSlot slot)3425 void VerifySlot(PtrComprCageBase cage_base, ObjectSlot slot) {
3426 HeapObject heap_object;
3427 if (slot.load(cage_base).GetHeapObject(&heap_object)) {
3428 VerifyHeapObject(heap_object);
3429 }
3430 }
3431
VerifySlot(PtrComprCageBase cage_base,MaybeObjectSlot slot)3432 void VerifySlot(PtrComprCageBase cage_base, MaybeObjectSlot slot) {
3433 HeapObject heap_object;
3434 if (slot.load(cage_base).GetHeapObject(&heap_object)) {
3435 VerifyHeapObject(heap_object);
3436 }
3437 }
3438
VerifyHeapObject(HeapObject heap_object)3439 void VerifyHeapObject(HeapObject heap_object) {
3440 if (BasicMemoryChunk::FromHeapObject(heap_object)->InReadOnlySpace())
3441 return;
3442 if (!heap_object.InSharedHeap()) return;
3443 CHECK(!heap_object.map_word(kRelaxedLoad).IsForwardingAddress());
3444 }
3445 };
3446 #endif // VERIFY_HEAP
3447
UpdateReferenceInExternalStringTableEntry(Heap * heap,FullObjectSlot p)3448 static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
3449 FullObjectSlot p) {
3450 HeapObject old_string = HeapObject::cast(*p);
3451 MapWord map_word = old_string.map_word(kRelaxedLoad);
3452
3453 if (map_word.IsForwardingAddress()) {
3454 String new_string = String::cast(map_word.ToForwardingAddress());
3455
3456 if (new_string.IsExternalString()) {
3457 MemoryChunk::MoveExternalBackingStoreBytes(
3458 ExternalBackingStoreType::kExternalString,
3459 Page::FromAddress((*p).ptr()), Page::FromHeapObject(new_string),
3460 ExternalString::cast(new_string).ExternalPayloadSize());
3461 }
3462 return new_string;
3463 }
3464
3465 return String::cast(*p);
3466 }
3467
EvacuatePrologue()3468 void MarkCompactCollector::EvacuatePrologue() {
3469 // New space.
3470 NewSpace* new_space = heap()->new_space();
3471
3472 if (new_space) {
3473 // Append the list of new space pages to be processed.
3474 for (Page* p :
3475 PageRange(new_space->first_allocatable_address(), new_space->top())) {
3476 new_space_evacuation_pages_.push_back(p);
3477 }
3478 new_space->Flip();
3479 new_space->ResetLinearAllocationArea();
3480
3481 DCHECK_EQ(new_space->Size(), 0);
3482 }
3483
3484 if (heap()->new_lo_space()) {
3485 heap()->new_lo_space()->Flip();
3486 heap()->new_lo_space()->ResetPendingObject();
3487 }
3488
3489 // Old space.
3490 DCHECK(old_space_evacuation_pages_.empty());
3491 old_space_evacuation_pages_ = std::move(evacuation_candidates_);
3492 evacuation_candidates_.clear();
3493 DCHECK(evacuation_candidates_.empty());
3494 }
3495
EvacuateEpilogue()3496 void MarkCompactCollector::EvacuateEpilogue() {
3497 aborted_evacuation_candidates_due_to_oom_.clear();
3498 aborted_evacuation_candidates_due_to_flags_.clear();
3499
3500 // New space.
3501 if (heap()->new_space()) {
3502 heap()->new_space()->set_age_mark(heap()->new_space()->top());
3503 DCHECK_EQ(0, heap()->new_space()->Size());
3504 }
3505
3506 // Deallocate unmarked large objects.
3507 heap()->lo_space()->FreeUnmarkedObjects();
3508 heap()->code_lo_space()->FreeUnmarkedObjects();
3509 if (heap()->new_lo_space()) {
3510 heap()->new_lo_space()->FreeUnmarkedObjects();
3511 }
3512
3513 // Old space. Deallocate evacuated candidate pages.
3514 ReleaseEvacuationCandidates();
3515
3516 // Give pages that are queued to be freed back to the OS.
3517 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
3518
3519 #ifdef DEBUG
3520 MemoryChunkIterator chunk_iterator(heap());
3521
3522 while (chunk_iterator.HasNext()) {
3523 MemoryChunk* chunk = chunk_iterator.Next();
3524
3525 // Old-to-old slot sets must be empty after evacuation.
3526 DCHECK_NULL((chunk->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
3527 DCHECK_NULL((chunk->slot_set<OLD_TO_SHARED, AccessMode::NON_ATOMIC>()));
3528 DCHECK_NULL((chunk->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
3529 DCHECK_NULL(chunk->invalidated_slots<OLD_TO_OLD>());
3530 DCHECK_NULL(chunk->invalidated_slots<OLD_TO_NEW>());
3531 }
3532 #endif
3533 }
3534
3535 namespace {
CreateSharedOldAllocator(Heap * heap)3536 ConcurrentAllocator* CreateSharedOldAllocator(Heap* heap) {
3537 if (FLAG_shared_string_table && heap->isolate()->shared_isolate()) {
3538 return new ConcurrentAllocator(nullptr, heap->shared_old_space());
3539 }
3540
3541 return nullptr;
3542 }
3543 } // namespace
3544
3545 class Evacuator : public Malloced {
3546 public:
3547 enum EvacuationMode {
3548 kObjectsNewToOld,
3549 kPageNewToOld,
3550 kObjectsOldToOld,
3551 kPageNewToNew,
3552 };
3553
EvacuationModeName(EvacuationMode mode)3554 static const char* EvacuationModeName(EvacuationMode mode) {
3555 switch (mode) {
3556 case kObjectsNewToOld:
3557 return "objects-new-to-old";
3558 case kPageNewToOld:
3559 return "page-new-to-old";
3560 case kObjectsOldToOld:
3561 return "objects-old-to-old";
3562 case kPageNewToNew:
3563 return "page-new-to-new";
3564 }
3565 }
3566
ComputeEvacuationMode(MemoryChunk * chunk)3567 static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
3568 // Note: The order of checks is important in this function.
3569 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
3570 return kPageNewToOld;
3571 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
3572 return kPageNewToNew;
3573 if (chunk->InYoungGeneration()) return kObjectsNewToOld;
3574 return kObjectsOldToOld;
3575 }
3576
3577 // NewSpacePages with more live bytes than this threshold qualify for fast
3578 // evacuation.
NewSpacePageEvacuationThreshold()3579 static intptr_t NewSpacePageEvacuationThreshold() {
3580 if (FLAG_page_promotion)
3581 return FLAG_page_promotion_threshold *
3582 MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
3583 return MemoryChunkLayout::AllocatableMemoryInDataPage() + kTaggedSize;
3584 }
3585
Evacuator(Heap * heap,RecordMigratedSlotVisitor * record_visitor,EvacuationAllocator * local_allocator,AlwaysPromoteYoung always_promote_young)3586 Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor,
3587 EvacuationAllocator* local_allocator,
3588 AlwaysPromoteYoung always_promote_young)
3589 : heap_(heap),
3590 local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
3591 shared_old_allocator_(CreateSharedOldAllocator(heap_)),
3592 new_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
3593 record_visitor, &local_pretenuring_feedback_,
3594 always_promote_young),
3595 new_to_new_page_visitor_(heap_, record_visitor,
3596 &local_pretenuring_feedback_),
3597 new_to_old_page_visitor_(heap_, record_visitor,
3598 &local_pretenuring_feedback_),
3599
3600 old_space_visitor_(heap_, local_allocator, shared_old_allocator_.get(),
3601 record_visitor),
3602 local_allocator_(local_allocator),
3603 duration_(0.0),
3604 bytes_compacted_(0) {}
3605
3606 virtual ~Evacuator() = default;
3607
3608 void EvacuatePage(MemoryChunk* chunk);
3609
AddObserver(MigrationObserver * observer)3610 void AddObserver(MigrationObserver* observer) {
3611 new_space_visitor_.AddObserver(observer);
3612 old_space_visitor_.AddObserver(observer);
3613 }
3614
3615 // Merge back locally cached info sequentially. Note that this method needs
3616 // to be called from the main thread.
3617 virtual void Finalize();
3618
3619 virtual GCTracer::Scope::ScopeId GetBackgroundTracingScope() = 0;
3620 virtual GCTracer::Scope::ScopeId GetTracingScope() = 0;
3621
3622 protected:
3623 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3624
3625 // |saved_live_bytes| returns the live bytes of the page that was processed.
3626 virtual void RawEvacuatePage(MemoryChunk* chunk,
3627 intptr_t* saved_live_bytes) = 0;
3628
heap()3629 inline Heap* heap() { return heap_; }
3630
ReportCompactionProgress(double duration,intptr_t bytes_compacted)3631 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3632 duration_ += duration;
3633 bytes_compacted_ += bytes_compacted;
3634 }
3635
3636 Heap* heap_;
3637
3638 Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
3639
3640 // Allocator for the shared heap.
3641 std::unique_ptr<ConcurrentAllocator> shared_old_allocator_;
3642
3643 // Visitors for the corresponding spaces.
3644 EvacuateNewSpaceVisitor new_space_visitor_;
3645 EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_NEW>
3646 new_to_new_page_visitor_;
3647 EvacuateNewSpacePageVisitor<PageEvacuationMode::NEW_TO_OLD>
3648 new_to_old_page_visitor_;
3649 EvacuateOldSpaceVisitor old_space_visitor_;
3650
3651 // Locally cached collector data.
3652 EvacuationAllocator* local_allocator_;
3653
3654 // Book keeping info.
3655 double duration_;
3656 intptr_t bytes_compacted_;
3657 };
3658
EvacuatePage(MemoryChunk * chunk)3659 void Evacuator::EvacuatePage(MemoryChunk* chunk) {
3660 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
3661 DCHECK(chunk->SweepingDone());
3662 intptr_t saved_live_bytes = 0;
3663 double evacuation_time = 0.0;
3664 {
3665 AlwaysAllocateScope always_allocate(heap());
3666 TimedScope timed_scope(&evacuation_time);
3667 RawEvacuatePage(chunk, &saved_live_bytes);
3668 }
3669 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3670 if (FLAG_trace_evacuation) {
3671 PrintIsolate(heap()->isolate(),
3672 "evacuation[%p]: page=%p new_space=%d "
3673 "page_evacuation=%d executable=%d contains_age_mark=%d "
3674 "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
3675 static_cast<void*>(this), static_cast<void*>(chunk),
3676 chunk->InNewSpace(),
3677 chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
3678 chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
3679 chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
3680 chunk->Contains(heap()->new_space()->age_mark()),
3681 saved_live_bytes, evacuation_time,
3682 chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
3683 }
3684 }
3685
Finalize()3686 void Evacuator::Finalize() {
3687 local_allocator_->Finalize();
3688 if (shared_old_allocator_) shared_old_allocator_->FreeLinearAllocationArea();
3689 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3690 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
3691 new_to_old_page_visitor_.moved_bytes());
3692 heap()->IncrementSemiSpaceCopiedObjectSize(
3693 new_space_visitor_.semispace_copied_size() +
3694 new_to_new_page_visitor_.moved_bytes());
3695 heap()->IncrementYoungSurvivorsCounter(
3696 new_space_visitor_.promoted_size() +
3697 new_space_visitor_.semispace_copied_size() +
3698 new_to_old_page_visitor_.moved_bytes() +
3699 new_to_new_page_visitor_.moved_bytes());
3700 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3701 }
3702
3703 class FullEvacuator : public Evacuator {
3704 public:
FullEvacuator(MarkCompactCollector * collector)3705 explicit FullEvacuator(MarkCompactCollector* collector)
3706 : Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
3707 AlwaysPromoteYoung::kYes),
3708 record_visitor_(collector, &ephemeron_remembered_set_),
3709 local_allocator_(heap_,
3710 CompactionSpaceKind::kCompactionSpaceForMarkCompact),
3711 collector_(collector) {}
3712
GetBackgroundTracingScope()3713 GCTracer::Scope::ScopeId GetBackgroundTracingScope() override {
3714 return GCTracer::Scope::MC_BACKGROUND_EVACUATE_COPY;
3715 }
3716
GetTracingScope()3717 GCTracer::Scope::ScopeId GetTracingScope() override {
3718 return GCTracer::Scope::MC_EVACUATE_COPY_PARALLEL;
3719 }
3720
Finalize()3721 void Finalize() override {
3722 Evacuator::Finalize();
3723
3724 for (auto it = ephemeron_remembered_set_.begin();
3725 it != ephemeron_remembered_set_.end(); ++it) {
3726 auto insert_result =
3727 heap()->ephemeron_remembered_set_.insert({it->first, it->second});
3728 if (!insert_result.second) {
3729 // Insertion didn't happen, there was already an item.
3730 auto set = insert_result.first->second;
3731 for (int entry : it->second) {
3732 set.insert(entry);
3733 }
3734 }
3735 }
3736 }
3737
3738 protected:
3739 void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
3740 EphemeronRememberedSet ephemeron_remembered_set_;
3741 RecordMigratedSlotVisitor record_visitor_;
3742 EvacuationAllocator local_allocator_;
3743
3744 MarkCompactCollector* collector_;
3745 };
3746
RawEvacuatePage(MemoryChunk * chunk,intptr_t * live_bytes)3747 void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
3748 const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
3749 MarkCompactCollector::NonAtomicMarkingState* marking_state =
3750 collector_->non_atomic_marking_state();
3751 *live_bytes = marking_state->live_bytes(chunk);
3752 TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3753 "FullEvacuator::RawEvacuatePage", "evacuation_mode",
3754 EvacuationModeName(evacuation_mode), "live_bytes", *live_bytes);
3755 HeapObject failed_object;
3756 switch (evacuation_mode) {
3757 case kObjectsNewToOld:
3758 LiveObjectVisitor::VisitBlackObjectsNoFail(
3759 chunk, marking_state, &new_space_visitor_,
3760 LiveObjectVisitor::kClearMarkbits);
3761 break;
3762 case kPageNewToOld:
3763 LiveObjectVisitor::VisitBlackObjectsNoFail(
3764 chunk, marking_state, &new_to_old_page_visitor_,
3765 LiveObjectVisitor::kKeepMarking);
3766 new_to_old_page_visitor_.account_moved_bytes(
3767 marking_state->live_bytes(chunk));
3768 break;
3769 case kPageNewToNew:
3770 LiveObjectVisitor::VisitBlackObjectsNoFail(
3771 chunk, marking_state, &new_to_new_page_visitor_,
3772 LiveObjectVisitor::kKeepMarking);
3773 new_to_new_page_visitor_.account_moved_bytes(
3774 marking_state->live_bytes(chunk));
3775 break;
3776 case kObjectsOldToOld: {
3777 const bool success = LiveObjectVisitor::VisitBlackObjects(
3778 chunk, marking_state, &old_space_visitor_,
3779 LiveObjectVisitor::kClearMarkbits, &failed_object);
3780 if (!success) {
3781 if (FLAG_crash_on_aborted_evacuation) {
3782 heap_->FatalProcessOutOfMemory("FullEvacuator::RawEvacuatePage");
3783 } else {
3784 // Aborted compaction page. Actual processing happens on the main
3785 // thread for simplicity reasons.
3786 collector_->ReportAbortedEvacuationCandidateDueToOOM(
3787 failed_object.address(), static_cast<Page*>(chunk));
3788 }
3789 }
3790 break;
3791 }
3792 }
3793 }
3794
3795 class PageEvacuationJob : public v8::JobTask {
3796 public:
PageEvacuationJob(Isolate * isolate,std::vector<std::unique_ptr<Evacuator>> * evacuators,std::vector<std::pair<ParallelWorkItem,MemoryChunk * >> evacuation_items)3797 PageEvacuationJob(
3798 Isolate* isolate, std::vector<std::unique_ptr<Evacuator>>* evacuators,
3799 std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items)
3800 : evacuators_(evacuators),
3801 evacuation_items_(std::move(evacuation_items)),
3802 remaining_evacuation_items_(evacuation_items_.size()),
3803 generator_(evacuation_items_.size()),
3804 tracer_(isolate->heap()->tracer()) {}
3805
Run(JobDelegate * delegate)3806 void Run(JobDelegate* delegate) override {
3807 Evacuator* evacuator = (*evacuators_)[delegate->GetTaskId()].get();
3808 if (delegate->IsJoiningThread()) {
3809 TRACE_GC(tracer_, evacuator->GetTracingScope());
3810 ProcessItems(delegate, evacuator);
3811 } else {
3812 TRACE_GC_EPOCH(tracer_, evacuator->GetBackgroundTracingScope(),
3813 ThreadKind::kBackground);
3814 ProcessItems(delegate, evacuator);
3815 }
3816 }
3817
ProcessItems(JobDelegate * delegate,Evacuator * evacuator)3818 void ProcessItems(JobDelegate* delegate, Evacuator* evacuator) {
3819 while (remaining_evacuation_items_.load(std::memory_order_relaxed) > 0) {
3820 base::Optional<size_t> index = generator_.GetNext();
3821 if (!index) return;
3822 for (size_t i = *index; i < evacuation_items_.size(); ++i) {
3823 auto& work_item = evacuation_items_[i];
3824 if (!work_item.first.TryAcquire()) break;
3825 evacuator->EvacuatePage(work_item.second);
3826 if (remaining_evacuation_items_.fetch_sub(
3827 1, std::memory_order_relaxed) <= 1) {
3828 return;
3829 }
3830 }
3831 }
3832 }
3833
GetMaxConcurrency(size_t worker_count) const3834 size_t GetMaxConcurrency(size_t worker_count) const override {
3835 const size_t kItemsPerWorker = std::max(1, MB / Page::kPageSize);
3836 // Ceiling division to ensure enough workers for all
3837 // |remaining_evacuation_items_|
3838 const size_t wanted_num_workers =
3839 (remaining_evacuation_items_.load(std::memory_order_relaxed) +
3840 kItemsPerWorker - 1) /
3841 kItemsPerWorker;
3842 return std::min<size_t>(wanted_num_workers, evacuators_->size());
3843 }
3844
3845 private:
3846 std::vector<std::unique_ptr<Evacuator>>* evacuators_;
3847 std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items_;
3848 std::atomic<size_t> remaining_evacuation_items_{0};
3849 IndexGenerator generator_;
3850
3851 GCTracer* tracer_;
3852 };
3853
3854 template <class Evacuator, class Collector>
CreateAndExecuteEvacuationTasks(Collector * collector,std::vector<std::pair<ParallelWorkItem,MemoryChunk * >> evacuation_items,MigrationObserver * migration_observer)3855 size_t MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
3856 Collector* collector,
3857 std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items,
3858 MigrationObserver* migration_observer) {
3859 base::Optional<ProfilingMigrationObserver> profiling_observer;
3860 if (isolate()->LogObjectRelocation()) {
3861 profiling_observer.emplace(heap());
3862 }
3863 std::vector<std::unique_ptr<v8::internal::Evacuator>> evacuators;
3864 const int wanted_num_tasks = NumberOfParallelCompactionTasks();
3865 for (int i = 0; i < wanted_num_tasks; i++) {
3866 auto evacuator = std::make_unique<Evacuator>(collector);
3867 if (profiling_observer) {
3868 evacuator->AddObserver(&profiling_observer.value());
3869 }
3870 if (migration_observer) {
3871 evacuator->AddObserver(migration_observer);
3872 }
3873 evacuators.push_back(std::move(evacuator));
3874 }
3875 V8::GetCurrentPlatform()
3876 ->PostJob(v8::TaskPriority::kUserBlocking,
3877 std::make_unique<PageEvacuationJob>(
3878 isolate(), &evacuators, std::move(evacuation_items)))
3879 ->Join();
3880 for (auto& evacuator : evacuators) {
3881 evacuator->Finalize();
3882 }
3883 return wanted_num_tasks;
3884 }
3885
ShouldMovePage(Page * p,intptr_t live_bytes,AlwaysPromoteYoung always_promote_young)3886 bool MarkCompactCollectorBase::ShouldMovePage(
3887 Page* p, intptr_t live_bytes, AlwaysPromoteYoung always_promote_young) {
3888 const bool reduce_memory = heap()->ShouldReduceMemory();
3889 const Address age_mark = heap()->new_space()->age_mark();
3890 return !reduce_memory && !p->NeverEvacuate() &&
3891 (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
3892 (always_promote_young == AlwaysPromoteYoung::kYes ||
3893 !p->Contains(age_mark)) &&
3894 heap()->CanExpandOldGeneration(live_bytes);
3895 }
3896
3897 namespace {
3898
TraceEvacuation(Isolate * isolate,size_t pages_count,size_t wanted_num_tasks,size_t live_bytes,size_t aborted_pages)3899 void TraceEvacuation(Isolate* isolate, size_t pages_count,
3900 size_t wanted_num_tasks, size_t live_bytes,
3901 size_t aborted_pages) {
3902 DCHECK(FLAG_trace_evacuation);
3903 PrintIsolate(
3904 isolate,
3905 "%8.0f ms: evacuation-summary: parallel=%s pages=%zu "
3906 "wanted_tasks=%zu cores=%d live_bytes=%" V8PRIdPTR
3907 " compaction_speed=%.f aborted=%zu\n",
3908 isolate->time_millis_since_init(),
3909 FLAG_parallel_compaction ? "yes" : "no", pages_count, wanted_num_tasks,
3910 V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1, live_bytes,
3911 isolate->heap()->tracer()->CompactionSpeedInBytesPerMillisecond(),
3912 aborted_pages);
3913 }
3914
3915 } // namespace
3916
EvacuatePagesInParallel()3917 void MarkCompactCollector::EvacuatePagesInParallel() {
3918 std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
3919 intptr_t live_bytes = 0;
3920
3921 // Evacuation of new space pages cannot be aborted, so it needs to run
3922 // before old space evacuation.
3923 for (Page* page : new_space_evacuation_pages_) {
3924 intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
3925 if (live_bytes_on_page == 0) continue;
3926 live_bytes += live_bytes_on_page;
3927 if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kYes)) {
3928 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3929 DCHECK_EQ(heap()->old_space(), page->owner());
3930 // The move added page->allocated_bytes to the old space, but we are
3931 // going to sweep the page and add page->live_byte_count.
3932 heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
3933 page);
3934 }
3935 evacuation_items.emplace_back(ParallelWorkItem{}, page);
3936 }
3937
3938 if (!heap()->IsGCWithoutStack()) {
3939 if (!FLAG_compact_with_stack || !FLAG_compact_code_space_with_stack) {
3940 for (Page* page : old_space_evacuation_pages_) {
3941 if (!FLAG_compact_with_stack || page->owner_identity() == CODE_SPACE) {
3942 ReportAbortedEvacuationCandidateDueToFlags(page->area_start(), page);
3943 // Set this flag early on in this case to allow filtering such pages
3944 // below.
3945 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3946 }
3947 }
3948 }
3949 }
3950
3951 for (Page* page : old_space_evacuation_pages_) {
3952 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) continue;
3953
3954 live_bytes += non_atomic_marking_state()->live_bytes(page);
3955 evacuation_items.emplace_back(ParallelWorkItem{}, page);
3956 }
3957
3958 // Promote young generation large objects.
3959 if (heap()->new_lo_space()) {
3960 IncrementalMarking::NonAtomicMarkingState* marking_state =
3961 heap()->incremental_marking()->non_atomic_marking_state();
3962
3963 for (auto it = heap()->new_lo_space()->begin();
3964 it != heap()->new_lo_space()->end();) {
3965 LargePage* current = *it;
3966 it++;
3967 HeapObject object = current->GetObject();
3968 DCHECK(!marking_state->IsGrey(object));
3969 if (marking_state->IsBlack(object)) {
3970 heap_->lo_space()->PromoteNewLargeObject(current);
3971 current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
3972 promoted_large_pages_.push_back(current);
3973 evacuation_items.emplace_back(ParallelWorkItem{}, current);
3974 }
3975 }
3976 }
3977
3978 const size_t pages_count = evacuation_items.size();
3979 size_t wanted_num_tasks = 0;
3980 if (!evacuation_items.empty()) {
3981 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3982 "MarkCompactCollector::EvacuatePagesInParallel", "pages",
3983 evacuation_items.size());
3984
3985 wanted_num_tasks = CreateAndExecuteEvacuationTasks<FullEvacuator>(
3986 this, std::move(evacuation_items), nullptr);
3987 }
3988
3989 const size_t aborted_pages = PostProcessEvacuationCandidates();
3990
3991 if (FLAG_trace_evacuation) {
3992 TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes,
3993 aborted_pages);
3994 }
3995 }
3996
3997 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3998 public:
RetainAs(Object object)3999 Object RetainAs(Object object) override {
4000 if (object.IsHeapObject()) {
4001 HeapObject heap_object = HeapObject::cast(object);
4002 MapWord map_word = heap_object.map_word(kRelaxedLoad);
4003 if (map_word.IsForwardingAddress()) {
4004 return map_word.ToForwardingAddress();
4005 }
4006 }
4007 return object;
4008 }
4009 };
4010
RecordLiveSlotsOnPage(Page * page)4011 void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
4012 EvacuateRecordOnlyVisitor visitor(heap());
4013 LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
4014 &visitor,
4015 LiveObjectVisitor::kKeepMarking);
4016 }
4017
4018 template <class Visitor, typename MarkingState>
VisitBlackObjects(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode,HeapObject * failed_object)4019 bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
4020 MarkingState* marking_state,
4021 Visitor* visitor,
4022 IterationMode iteration_mode,
4023 HeapObject* failed_object) {
4024 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4025 "LiveObjectVisitor::VisitBlackObjects");
4026 for (auto object_and_size :
4027 LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
4028 HeapObject const object = object_and_size.first;
4029 if (!visitor->Visit(object, object_and_size.second)) {
4030 if (iteration_mode == kClearMarkbits) {
4031 marking_state->bitmap(chunk)->ClearRange(
4032 chunk->AddressToMarkbitIndex(chunk->area_start()),
4033 chunk->AddressToMarkbitIndex(object.address()));
4034 *failed_object = object;
4035 }
4036 return false;
4037 }
4038 }
4039 if (iteration_mode == kClearMarkbits) {
4040 marking_state->ClearLiveness(chunk);
4041 }
4042 return true;
4043 }
4044
4045 template <class Visitor, typename MarkingState>
VisitBlackObjectsNoFail(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode)4046 void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
4047 MarkingState* marking_state,
4048 Visitor* visitor,
4049 IterationMode iteration_mode) {
4050 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4051 "LiveObjectVisitor::VisitBlackObjectsNoFail");
4052 if (chunk->IsLargePage()) {
4053 HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
4054 if (marking_state->IsBlack(object)) {
4055 const bool success = visitor->Visit(object, object.Size());
4056 USE(success);
4057 DCHECK(success);
4058 }
4059 } else {
4060 for (auto object_and_size :
4061 LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
4062 HeapObject const object = object_and_size.first;
4063 DCHECK(marking_state->IsBlack(object));
4064 const bool success = visitor->Visit(object, object_and_size.second);
4065 USE(success);
4066 DCHECK(success);
4067 }
4068 }
4069 if (iteration_mode == kClearMarkbits) {
4070 marking_state->ClearLiveness(chunk);
4071 }
4072 }
4073
4074 template <class Visitor, typename MarkingState>
VisitGreyObjectsNoFail(MemoryChunk * chunk,MarkingState * marking_state,Visitor * visitor,IterationMode iteration_mode)4075 void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
4076 MarkingState* marking_state,
4077 Visitor* visitor,
4078 IterationMode iteration_mode) {
4079 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4080 "LiveObjectVisitor::VisitGreyObjectsNoFail");
4081 if (chunk->IsLargePage()) {
4082 HeapObject object = reinterpret_cast<LargePage*>(chunk)->GetObject();
4083 if (marking_state->IsGrey(object)) {
4084 const bool success = visitor->Visit(object, object.Size());
4085 USE(success);
4086 DCHECK(success);
4087 }
4088 } else {
4089 for (auto object_and_size :
4090 LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
4091 HeapObject const object = object_and_size.first;
4092 DCHECK(marking_state->IsGrey(object));
4093 const bool success = visitor->Visit(object, object_and_size.second);
4094 USE(success);
4095 DCHECK(success);
4096 }
4097 }
4098 if (iteration_mode == kClearMarkbits) {
4099 marking_state->ClearLiveness(chunk);
4100 }
4101 }
4102
4103 template <typename MarkingState>
RecomputeLiveBytes(MemoryChunk * chunk,MarkingState * marking_state)4104 void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
4105 MarkingState* marking_state) {
4106 int new_live_size = 0;
4107 for (auto object_and_size :
4108 LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
4109 new_live_size += object_and_size.second;
4110 }
4111 marking_state->SetLiveBytes(chunk, new_live_size);
4112 }
4113
Evacuate()4114 void MarkCompactCollector::Evacuate() {
4115 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
4116 base::MutexGuard guard(heap()->relocation_mutex());
4117
4118 {
4119 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
4120 EvacuatePrologue();
4121 }
4122
4123 {
4124 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
4125 EvacuationScope evacuation_scope(this);
4126 EvacuatePagesInParallel();
4127 }
4128
4129 UpdatePointersAfterEvacuation();
4130
4131 if (heap()->new_space()) {
4132 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
4133 if (!heap()->new_space()->Rebalance()) {
4134 heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
4135 }
4136 }
4137
4138 // Give pages that are queued to be freed back to the OS. Note that filtering
4139 // slots only handles old space (for unboxed doubles), and thus map space can
4140 // still contain stale pointers. We only free the chunks after pointer updates
4141 // to still have access to page headers.
4142 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
4143
4144 {
4145 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
4146
4147 for (Page* p : new_space_evacuation_pages_) {
4148 // Full GCs don't promote pages within new space.
4149 DCHECK(!p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
4150 if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
4151 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
4152 DCHECK_EQ(OLD_SPACE, p->owner_identity());
4153 sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
4154 }
4155 }
4156 new_space_evacuation_pages_.clear();
4157
4158 for (LargePage* p : promoted_large_pages_) {
4159 DCHECK(p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
4160 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
4161 }
4162 promoted_large_pages_.clear();
4163
4164 for (Page* p : old_space_evacuation_pages_) {
4165 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
4166 sweeper()->AddPage(p->owner_identity(), p, Sweeper::REGULAR);
4167 p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
4168 }
4169 }
4170 }
4171
4172 {
4173 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
4174 EvacuateEpilogue();
4175 }
4176
4177 #ifdef VERIFY_HEAP
4178 if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) {
4179 FullEvacuationVerifier verifier(heap());
4180 verifier.Run();
4181 }
4182 #endif
4183 }
4184
4185 class UpdatingItem : public ParallelWorkItem {
4186 public:
4187 virtual ~UpdatingItem() = default;
4188 virtual void Process() = 0;
4189 };
4190
4191 class PointersUpdatingJob : public v8::JobTask {
4192 public:
PointersUpdatingJob(Isolate * isolate,std::vector<std::unique_ptr<UpdatingItem>> updating_items,GCTracer::Scope::ScopeId scope,GCTracer::Scope::ScopeId background_scope)4193 explicit PointersUpdatingJob(
4194 Isolate* isolate,
4195 std::vector<std::unique_ptr<UpdatingItem>> updating_items,
4196 GCTracer::Scope::ScopeId scope, GCTracer::Scope::ScopeId background_scope)
4197 : updating_items_(std::move(updating_items)),
4198 remaining_updating_items_(updating_items_.size()),
4199 generator_(updating_items_.size()),
4200 tracer_(isolate->heap()->tracer()),
4201 scope_(scope),
4202 background_scope_(background_scope) {}
4203
Run(JobDelegate * delegate)4204 void Run(JobDelegate* delegate) override {
4205 if (delegate->IsJoiningThread()) {
4206 TRACE_GC(tracer_, scope_);
4207 UpdatePointers(delegate);
4208 } else {
4209 TRACE_GC_EPOCH(tracer_, background_scope_, ThreadKind::kBackground);
4210 UpdatePointers(delegate);
4211 }
4212 }
4213
UpdatePointers(JobDelegate * delegate)4214 void UpdatePointers(JobDelegate* delegate) {
4215 while (remaining_updating_items_.load(std::memory_order_relaxed) > 0) {
4216 base::Optional<size_t> index = generator_.GetNext();
4217 if (!index) return;
4218 for (size_t i = *index; i < updating_items_.size(); ++i) {
4219 auto& work_item = updating_items_[i];
4220 if (!work_item->TryAcquire()) break;
4221 work_item->Process();
4222 if (remaining_updating_items_.fetch_sub(1, std::memory_order_relaxed) <=
4223 1) {
4224 return;
4225 }
4226 }
4227 }
4228 }
4229
GetMaxConcurrency(size_t worker_count) const4230 size_t GetMaxConcurrency(size_t worker_count) const override {
4231 size_t items = remaining_updating_items_.load(std::memory_order_relaxed);
4232 if (!FLAG_parallel_pointer_update) return items > 0;
4233 const size_t kMaxPointerUpdateTasks = 8;
4234 size_t max_concurrency = std::min<size_t>(kMaxPointerUpdateTasks, items);
4235 DCHECK_IMPLIES(items > 0, max_concurrency > 0);
4236 return max_concurrency;
4237 }
4238
4239 private:
4240 std::vector<std::unique_ptr<UpdatingItem>> updating_items_;
4241 std::atomic<size_t> remaining_updating_items_{0};
4242 IndexGenerator generator_;
4243
4244 GCTracer* tracer_;
4245 GCTracer::Scope::ScopeId scope_;
4246 GCTracer::Scope::ScopeId background_scope_;
4247 };
4248
4249 template <typename MarkingState>
4250 class ToSpaceUpdatingItem : public UpdatingItem {
4251 public:
ToSpaceUpdatingItem(Heap * heap,MemoryChunk * chunk,Address start,Address end,MarkingState * marking_state)4252 explicit ToSpaceUpdatingItem(Heap* heap, MemoryChunk* chunk, Address start,
4253 Address end, MarkingState* marking_state)
4254 : heap_(heap),
4255 chunk_(chunk),
4256 start_(start),
4257 end_(end),
4258 marking_state_(marking_state) {}
4259 ~ToSpaceUpdatingItem() override = default;
4260
Process()4261 void Process() override {
4262 if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
4263 // New->new promoted pages contain garbage so they require iteration using
4264 // markbits.
4265 ProcessVisitLive();
4266 } else {
4267 ProcessVisitAll();
4268 }
4269 }
4270
4271 private:
ProcessVisitAll()4272 void ProcessVisitAll() {
4273 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4274 "ToSpaceUpdatingItem::ProcessVisitAll");
4275 PointersUpdatingVisitor visitor(heap_);
4276 for (Address cur = start_; cur < end_;) {
4277 HeapObject object = HeapObject::FromAddress(cur);
4278 Map map = object.map(visitor.cage_base());
4279 int size = object.SizeFromMap(map);
4280 object.IterateBodyFast(map, size, &visitor);
4281 cur += size;
4282 }
4283 }
4284
ProcessVisitLive()4285 void ProcessVisitLive() {
4286 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4287 "ToSpaceUpdatingItem::ProcessVisitLive");
4288 // For young generation evacuations we want to visit grey objects, for
4289 // full MC, we need to visit black objects.
4290 PointersUpdatingVisitor visitor(heap_);
4291 for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
4292 chunk_, marking_state_->bitmap(chunk_))) {
4293 object_and_size.first.IterateBodyFast(visitor.cage_base(), &visitor);
4294 }
4295 }
4296
4297 Heap* heap_;
4298 MemoryChunk* chunk_;
4299 Address start_;
4300 Address end_;
4301 MarkingState* marking_state_;
4302 };
4303
4304 template <typename MarkingState, GarbageCollector collector>
4305 class RememberedSetUpdatingItem : public UpdatingItem {
4306 public:
RememberedSetUpdatingItem(Heap * heap,MarkingState * marking_state,MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)4307 explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
4308 MemoryChunk* chunk,
4309 RememberedSetUpdatingMode updating_mode)
4310 : heap_(heap),
4311 marking_state_(marking_state),
4312 chunk_(chunk),
4313 updating_mode_(updating_mode) {}
4314 ~RememberedSetUpdatingItem() override = default;
4315
Process()4316 void Process() override {
4317 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4318 "RememberedSetUpdatingItem::Process");
4319 base::MutexGuard guard(chunk_->mutex());
4320 CodePageMemoryModificationScope memory_modification_scope(chunk_);
4321 UpdateUntypedPointers();
4322 UpdateTypedPointers();
4323 }
4324
4325 private:
4326 template <typename TSlot>
CheckAndUpdateOldToNewSlot(TSlot slot)4327 inline SlotCallbackResult CheckAndUpdateOldToNewSlot(TSlot slot) {
4328 static_assert(
4329 std::is_same<TSlot, FullMaybeObjectSlot>::value ||
4330 std::is_same<TSlot, MaybeObjectSlot>::value,
4331 "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
4332 using THeapObjectSlot = typename TSlot::THeapObjectSlot;
4333 HeapObject heap_object;
4334 if (!(*slot).GetHeapObject(&heap_object)) {
4335 return REMOVE_SLOT;
4336 }
4337 if (Heap::InFromPage(heap_object)) {
4338 MapWord map_word = heap_object.map_word(kRelaxedLoad);
4339 if (map_word.IsForwardingAddress()) {
4340 HeapObjectReference::Update(THeapObjectSlot(slot),
4341 map_word.ToForwardingAddress());
4342 }
4343 bool success = (*slot).GetHeapObject(&heap_object);
4344 USE(success);
4345 DCHECK(success);
4346 // If the object was in from space before and is after executing the
4347 // callback in to space, the object is still live.
4348 // Unfortunately, we do not know about the slot. It could be in a
4349 // just freed free space object.
4350 if (Heap::InToPage(heap_object)) {
4351 return KEEP_SLOT;
4352 }
4353 } else if (Heap::InToPage(heap_object)) {
4354 // Slots can point to "to" space if the page has been moved, or if the
4355 // slot has been recorded multiple times in the remembered set, or
4356 // if the slot was already updated during old->old updating.
4357 // In case the page has been moved, check markbits to determine liveness
4358 // of the slot. In the other case, the slot can just be kept.
4359 if (Page::FromHeapObject(heap_object)
4360 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
4361 // IsBlackOrGrey is required because objects are marked as grey for
4362 // the young generation collector while they are black for the full
4363 // MC.);
4364 if (marking_state_->IsBlackOrGrey(heap_object)) {
4365 return KEEP_SLOT;
4366 } else {
4367 return REMOVE_SLOT;
4368 }
4369 }
4370 return KEEP_SLOT;
4371 } else {
4372 DCHECK(!Heap::InYoungGeneration(heap_object));
4373 }
4374 return REMOVE_SLOT;
4375 }
4376
UpdateUntypedPointers()4377 void UpdateUntypedPointers() {
4378 if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
4379 InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
4380 int slots = RememberedSet<OLD_TO_NEW>::Iterate(
4381 chunk_,
4382 [this, &filter](MaybeObjectSlot slot) {
4383 if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
4384 return CheckAndUpdateOldToNewSlot(slot);
4385 },
4386 SlotSet::FREE_EMPTY_BUCKETS);
4387
4388 DCHECK_IMPLIES(collector == GarbageCollector::MARK_COMPACTOR, slots == 0);
4389
4390 if (slots == 0) {
4391 chunk_->ReleaseSlotSet<OLD_TO_NEW>();
4392 }
4393 }
4394
4395 if (chunk_->invalidated_slots<OLD_TO_NEW>() != nullptr) {
4396 // The invalidated slots are not needed after old-to-new slots were
4397 // processed.
4398 chunk_->ReleaseInvalidatedSlots<OLD_TO_NEW>();
4399 }
4400
4401 if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
4402 (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
4403 InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToOld(chunk_);
4404 PtrComprCageBase cage_base = heap_->isolate();
4405 RememberedSet<OLD_TO_OLD>::Iterate(
4406 chunk_,
4407 [&filter, cage_base](MaybeObjectSlot slot) {
4408 if (filter.IsValid(slot.address())) {
4409 UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
4410 }
4411 // Always keep slot since all slots are dropped at once after
4412 // iteration.
4413 return KEEP_SLOT;
4414 },
4415 SlotSet::KEEP_EMPTY_BUCKETS);
4416 chunk_->ReleaseSlotSet<OLD_TO_OLD>();
4417 }
4418 if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
4419 chunk_->invalidated_slots<OLD_TO_OLD>() != nullptr) {
4420 // The invalidated slots are not needed after old-to-old slots were
4421 // processsed.
4422 chunk_->ReleaseInvalidatedSlots<OLD_TO_OLD>();
4423 }
4424 if (V8_EXTERNAL_CODE_SPACE_BOOL) {
4425 if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
4426 (chunk_->slot_set<OLD_TO_CODE, AccessMode::NON_ATOMIC>() !=
4427 nullptr)) {
4428 PtrComprCageBase cage_base = heap_->isolate();
4429 #ifdef V8_EXTERNAL_CODE_SPACE
4430 PtrComprCageBase code_cage_base(heap_->isolate()->code_cage_base());
4431 #else
4432 PtrComprCageBase code_cage_base = cage_base;
4433 #endif
4434 RememberedSet<OLD_TO_CODE>::Iterate(
4435 chunk_,
4436 [=](MaybeObjectSlot slot) {
4437 HeapObject host = HeapObject::FromAddress(
4438 slot.address() - CodeDataContainer::kCodeOffset);
4439 DCHECK(host.IsCodeDataContainer(cage_base));
4440 return UpdateStrongCodeSlot<AccessMode::NON_ATOMIC>(
4441 host, cage_base, code_cage_base,
4442 CodeObjectSlot(slot.address()));
4443 },
4444 SlotSet::FREE_EMPTY_BUCKETS);
4445 chunk_->ReleaseSlotSet<OLD_TO_CODE>();
4446 }
4447 // The invalidated slots are not needed after old-to-code slots were
4448 // processsed, but since there are no invalidated OLD_TO_CODE slots,
4449 // there's nothing to clear.
4450 }
4451 }
4452
UpdateTypedPointers()4453 void UpdateTypedPointers() {
4454 if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
4455 nullptr) {
4456 CHECK_NE(chunk_->owner(), heap_->map_space());
4457 const auto check_and_update_old_to_new_slot_fn =
4458 [this](FullMaybeObjectSlot slot) {
4459 return CheckAndUpdateOldToNewSlot(slot);
4460 };
4461 RememberedSet<OLD_TO_NEW>::IterateTyped(
4462 chunk_, [=](SlotType slot_type, Address slot) {
4463 return UpdateTypedSlotHelper::UpdateTypedSlot(
4464 heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
4465 });
4466 }
4467 if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
4468 (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
4469 nullptr)) {
4470 CHECK_NE(chunk_->owner(), heap_->map_space());
4471 RememberedSet<OLD_TO_OLD>::IterateTyped(chunk_, [=](SlotType slot_type,
4472 Address slot) {
4473 // Using UpdateStrongSlot is OK here, because there are no weak
4474 // typed slots.
4475 PtrComprCageBase cage_base = heap_->isolate();
4476 UpdateTypedSlotHelper::UpdateTypedSlot(
4477 heap_, slot_type, slot, [cage_base](FullMaybeObjectSlot slot) {
4478 return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
4479 });
4480 // Always keep slot since all slots are dropped at once after iteration.
4481 return KEEP_SLOT;
4482 });
4483 chunk_->ReleaseTypedSlotSet<OLD_TO_OLD>();
4484 }
4485 }
4486
4487 Heap* heap_;
4488 MarkingState* marking_state_;
4489 MemoryChunk* chunk_;
4490 RememberedSetUpdatingMode updating_mode_;
4491 };
4492
4493 std::unique_ptr<UpdatingItem>
CreateRememberedSetUpdatingItem(MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)4494 MarkCompactCollector::CreateRememberedSetUpdatingItem(
4495 MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
4496 return std::make_unique<RememberedSetUpdatingItem<
4497 NonAtomicMarkingState, GarbageCollector::MARK_COMPACTOR>>(
4498 heap(), non_atomic_marking_state(), chunk, updating_mode);
4499 }
4500
4501 template <typename IterateableSpace>
CollectRememberedSetUpdatingItems(std::vector<std::unique_ptr<UpdatingItem>> * items,IterateableSpace * space,RememberedSetUpdatingMode mode)4502 int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
4503 std::vector<std::unique_ptr<UpdatingItem>>* items, IterateableSpace* space,
4504 RememberedSetUpdatingMode mode) {
4505 int pages = 0;
4506 for (MemoryChunk* chunk : *space) {
4507 const bool contains_old_to_old_slots =
4508 chunk->slot_set<OLD_TO_OLD>() != nullptr ||
4509 chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
4510 const bool contains_old_to_code_slots =
4511 V8_EXTERNAL_CODE_SPACE_BOOL &&
4512 chunk->slot_set<OLD_TO_CODE>() != nullptr;
4513 const bool contains_old_to_new_slots =
4514 chunk->slot_set<OLD_TO_NEW>() != nullptr ||
4515 chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
4516 const bool contains_old_to_old_invalidated_slots =
4517 chunk->invalidated_slots<OLD_TO_OLD>() != nullptr;
4518 const bool contains_old_to_new_invalidated_slots =
4519 chunk->invalidated_slots<OLD_TO_NEW>() != nullptr;
4520 if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
4521 !contains_old_to_old_invalidated_slots &&
4522 !contains_old_to_new_invalidated_slots && !contains_old_to_code_slots)
4523 continue;
4524 if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
4525 contains_old_to_old_invalidated_slots ||
4526 contains_old_to_new_invalidated_slots) {
4527 items->emplace_back(CreateRememberedSetUpdatingItem(chunk, mode));
4528 pages++;
4529 }
4530 }
4531 return pages;
4532 }
4533
4534 class EphemeronTableUpdatingItem : public UpdatingItem {
4535 public:
4536 enum EvacuationState { kRegular, kAborted };
4537
EphemeronTableUpdatingItem(Heap * heap)4538 explicit EphemeronTableUpdatingItem(Heap* heap) : heap_(heap) {}
4539 ~EphemeronTableUpdatingItem() override = default;
4540
Process()4541 void Process() override {
4542 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4543 "EphemeronTableUpdatingItem::Process");
4544 PtrComprCageBase cage_base(heap_->isolate());
4545
4546 for (auto it = heap_->ephemeron_remembered_set_.begin();
4547 it != heap_->ephemeron_remembered_set_.end();) {
4548 EphemeronHashTable table = it->first;
4549 auto& indices = it->second;
4550 if (table.map_word(cage_base, kRelaxedLoad).IsForwardingAddress()) {
4551 // The table has moved, and RecordMigratedSlotVisitor::VisitEphemeron
4552 // inserts entries for the moved table into ephemeron_remembered_set_.
4553 it = heap_->ephemeron_remembered_set_.erase(it);
4554 continue;
4555 }
4556 DCHECK(table.map(cage_base).IsMap(cage_base));
4557 DCHECK(table.IsEphemeronHashTable(cage_base));
4558 for (auto iti = indices.begin(); iti != indices.end();) {
4559 // EphemeronHashTable keys must be heap objects.
4560 HeapObjectSlot key_slot(table.RawFieldOfElementAt(
4561 EphemeronHashTable::EntryToIndex(InternalIndex(*iti))));
4562 HeapObject key = key_slot.ToHeapObject();
4563 MapWord map_word = key.map_word(cage_base, kRelaxedLoad);
4564 if (map_word.IsForwardingAddress()) {
4565 key = map_word.ToForwardingAddress();
4566 key_slot.StoreHeapObject(key);
4567 }
4568 if (!heap_->InYoungGeneration(key)) {
4569 iti = indices.erase(iti);
4570 } else {
4571 ++iti;
4572 }
4573 }
4574 if (indices.size() == 0) {
4575 it = heap_->ephemeron_remembered_set_.erase(it);
4576 } else {
4577 ++it;
4578 }
4579 }
4580 }
4581
4582 private:
4583 Heap* const heap_;
4584 };
4585
UpdatePointersAfterEvacuation()4586 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
4587 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
4588
4589 {
4590 TRACE_GC(heap()->tracer(),
4591 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
4592 // The external string table is updated at the end.
4593 PointersUpdatingVisitor updating_visitor(heap());
4594 heap_->IterateRootsIncludingClients(
4595 &updating_visitor,
4596 base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable});
4597 }
4598
4599 {
4600 TRACE_GC(heap()->tracer(),
4601 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_CLIENT_HEAPS);
4602 UpdatePointersInClientHeaps();
4603 }
4604
4605 {
4606 TRACE_GC(heap()->tracer(),
4607 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
4608 std::vector<std::unique_ptr<UpdatingItem>> updating_items;
4609
4610 CollectRememberedSetUpdatingItems(&updating_items, heap()->old_space(),
4611 RememberedSetUpdatingMode::ALL);
4612 CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
4613 RememberedSetUpdatingMode::ALL);
4614 CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
4615 RememberedSetUpdatingMode::ALL);
4616 CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
4617 RememberedSetUpdatingMode::ALL);
4618 if (heap()->map_space()) {
4619 CollectRememberedSetUpdatingItems(&updating_items, heap()->map_space(),
4620 RememberedSetUpdatingMode::ALL);
4621 }
4622
4623 // Iterating to space may require a valid body descriptor for e.g.
4624 // WasmStruct which races with updating a slot in Map. Since to space is
4625 // empty after a full GC, such races can't happen.
4626 DCHECK_IMPLIES(heap()->new_space(), heap()->new_space()->Size() == 0);
4627
4628 updating_items.push_back(
4629 std::make_unique<EphemeronTableUpdatingItem>(heap()));
4630
4631 V8::GetCurrentPlatform()
4632 ->PostJob(v8::TaskPriority::kUserBlocking,
4633 std::make_unique<PointersUpdatingJob>(
4634 isolate(), std::move(updating_items),
4635 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
4636 GCTracer::Scope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
4637 ->Join();
4638 }
4639
4640 {
4641 TRACE_GC(heap()->tracer(),
4642 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
4643 // Update pointers from external string table.
4644 heap_->UpdateReferencesInExternalStringTable(
4645 &UpdateReferenceInExternalStringTableEntry);
4646
4647 EvacuationWeakObjectRetainer evacuation_object_retainer;
4648 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
4649 }
4650 }
4651
UpdatePointersInClientHeaps()4652 void MarkCompactCollector::UpdatePointersInClientHeaps() {
4653 if (!isolate()->is_shared()) return;
4654
4655 isolate()->global_safepoint()->IterateClientIsolates(
4656 [this](Isolate* client) { UpdatePointersInClientHeap(client); });
4657 }
4658
UpdatePointersInClientHeap(Isolate * client)4659 void MarkCompactCollector::UpdatePointersInClientHeap(Isolate* client) {
4660 PtrComprCageBase cage_base(client);
4661 MemoryChunkIterator chunk_iterator(client->heap());
4662
4663 while (chunk_iterator.HasNext()) {
4664 MemoryChunk* chunk = chunk_iterator.Next();
4665 CodePageMemoryModificationScope unprotect_code_page(chunk);
4666
4667 RememberedSet<OLD_TO_SHARED>::Iterate(
4668 chunk,
4669 [cage_base](MaybeObjectSlot slot) {
4670 return UpdateSlot<AccessMode::NON_ATOMIC>(cage_base, slot);
4671 },
4672 SlotSet::KEEP_EMPTY_BUCKETS);
4673
4674 chunk->ReleaseSlotSet<OLD_TO_SHARED>();
4675
4676 RememberedSet<OLD_TO_SHARED>::IterateTyped(
4677 chunk, [this](SlotType slot_type, Address slot) {
4678 // Using UpdateStrongSlot is OK here, because there are no weak
4679 // typed slots.
4680 PtrComprCageBase cage_base = heap_->isolate();
4681 return UpdateTypedSlotHelper::UpdateTypedSlot(
4682 heap_, slot_type, slot, [cage_base](FullMaybeObjectSlot slot) {
4683 return UpdateStrongSlot<AccessMode::NON_ATOMIC>(cage_base,
4684 slot);
4685 });
4686 });
4687
4688 chunk->ReleaseTypedSlotSet<OLD_TO_SHARED>();
4689 }
4690
4691 #ifdef VERIFY_HEAP
4692 if (FLAG_verify_heap) {
4693 ClientHeapVerifier verifier_visitor(client->heap());
4694
4695 HeapObjectIterator iterator(client->heap(),
4696 HeapObjectIterator::kNoFiltering);
4697 for (HeapObject obj = iterator.Next(); !obj.is_null();
4698 obj = iterator.Next()) {
4699 obj.IterateFast(cage_base, &verifier_visitor);
4700 }
4701 }
4702 #endif // VERIFY_HEAP
4703 }
4704
ReportAbortedEvacuationCandidateDueToOOM(Address failed_start,Page * page)4705 void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToOOM(
4706 Address failed_start, Page* page) {
4707 base::MutexGuard guard(&mutex_);
4708 aborted_evacuation_candidates_due_to_oom_.push_back(
4709 std::make_pair(failed_start, page));
4710 }
4711
ReportAbortedEvacuationCandidateDueToFlags(Address failed_start,Page * page)4712 void MarkCompactCollector::ReportAbortedEvacuationCandidateDueToFlags(
4713 Address failed_start, Page* page) {
4714 base::MutexGuard guard(&mutex_);
4715 aborted_evacuation_candidates_due_to_flags_.push_back(
4716 std::make_pair(failed_start, page));
4717 }
4718
4719 namespace {
4720
ReRecordPage(Heap * heap,v8::internal::MarkCompactCollector::NonAtomicMarkingState * marking_state,Address failed_start,Page * page)4721 void ReRecordPage(
4722 Heap* heap,
4723 v8::internal::MarkCompactCollector::NonAtomicMarkingState* marking_state,
4724 Address failed_start, Page* page) {
4725 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
4726 // Aborted compaction page. We have to record slots here, since we
4727 // might not have recorded them in first place.
4728
4729 // Remove outdated slots.
4730 RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
4731 SlotSet::FREE_EMPTY_BUCKETS);
4732 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
4733 failed_start);
4734
4735 // Remove invalidated slots.
4736 if (failed_start > page->area_start()) {
4737 InvalidatedSlotsCleanup old_to_new_cleanup =
4738 InvalidatedSlotsCleanup::OldToNew(page);
4739 old_to_new_cleanup.Free(page->area_start(), failed_start);
4740 }
4741
4742 // Recompute live bytes.
4743 LiveObjectVisitor::RecomputeLiveBytes(page, marking_state);
4744 // Re-record slots.
4745 EvacuateRecordOnlyVisitor record_visitor(heap);
4746 LiveObjectVisitor::VisitBlackObjectsNoFail(
4747 page, marking_state, &record_visitor, LiveObjectVisitor::kKeepMarking);
4748 // Array buffers will be processed during pointer updating.
4749 }
4750
4751 } // namespace
4752
PostProcessEvacuationCandidates()4753 size_t MarkCompactCollector::PostProcessEvacuationCandidates() {
4754 CHECK_IMPLIES(FLAG_crash_on_aborted_evacuation,
4755 aborted_evacuation_candidates_due_to_oom_.empty());
4756 for (auto start_and_page : aborted_evacuation_candidates_due_to_oom_) {
4757 ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
4758 start_and_page.second);
4759 }
4760 for (auto start_and_page : aborted_evacuation_candidates_due_to_flags_) {
4761 ReRecordPage(heap(), non_atomic_marking_state(), start_and_page.first,
4762 start_and_page.second);
4763 }
4764 const size_t aborted_pages =
4765 aborted_evacuation_candidates_due_to_oom_.size() +
4766 aborted_evacuation_candidates_due_to_flags_.size();
4767 size_t aborted_pages_verified = 0;
4768 for (Page* p : old_space_evacuation_pages_) {
4769 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
4770 // After clearing the evacuation candidate flag the page is again in a
4771 // regular state.
4772 p->ClearEvacuationCandidate();
4773 aborted_pages_verified++;
4774 } else {
4775 DCHECK(p->IsEvacuationCandidate());
4776 DCHECK(p->SweepingDone());
4777 p->owner()->memory_chunk_list().Remove(p);
4778 }
4779 }
4780 DCHECK_EQ(aborted_pages_verified, aborted_pages);
4781 USE(aborted_pages_verified);
4782 return aborted_pages;
4783 }
4784
ReleaseEvacuationCandidates()4785 void MarkCompactCollector::ReleaseEvacuationCandidates() {
4786 for (Page* p : old_space_evacuation_pages_) {
4787 if (!p->IsEvacuationCandidate()) continue;
4788 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
4789 non_atomic_marking_state()->SetLiveBytes(p, 0);
4790 CHECK(p->SweepingDone());
4791 space->ReleasePage(p);
4792 }
4793 old_space_evacuation_pages_.clear();
4794 compacting_ = false;
4795 }
4796
StartSweepSpace(PagedSpace * space)4797 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
4798 space->ClearAllocatorState();
4799
4800 int will_be_swept = 0;
4801 bool unused_page_present = false;
4802
4803 // Loop needs to support deletion if live bytes == 0 for a page.
4804 for (auto it = space->begin(); it != space->end();) {
4805 Page* p = *(it++);
4806 DCHECK(p->SweepingDone());
4807
4808 if (p->IsEvacuationCandidate()) {
4809 // Will be processed in Evacuate.
4810 DCHECK(!evacuation_candidates_.empty());
4811 continue;
4812 }
4813
4814 // One unused page is kept, all further are released before sweeping them.
4815 if (non_atomic_marking_state()->live_bytes(p) == 0) {
4816 if (unused_page_present) {
4817 if (FLAG_gc_verbose) {
4818 PrintIsolate(isolate(), "sweeping: released page: %p",
4819 static_cast<void*>(p));
4820 }
4821 space->memory_chunk_list().Remove(p);
4822 space->ReleasePage(p);
4823 continue;
4824 }
4825 unused_page_present = true;
4826 }
4827
4828 sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
4829 will_be_swept++;
4830 }
4831
4832 if (FLAG_gc_verbose) {
4833 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
4834 space->name(), will_be_swept);
4835 }
4836 }
4837
StartSweepSpaces()4838 void MarkCompactCollector::StartSweepSpaces() {
4839 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
4840 #ifdef DEBUG
4841 state_ = SWEEP_SPACES;
4842 #endif
4843
4844 {
4845 {
4846 GCTracer::Scope sweep_scope(
4847 heap()->tracer(), GCTracer::Scope::MC_SWEEP_OLD, ThreadKind::kMain);
4848 StartSweepSpace(heap()->old_space());
4849 }
4850 {
4851 GCTracer::Scope sweep_scope(
4852 heap()->tracer(), GCTracer::Scope::MC_SWEEP_CODE, ThreadKind::kMain);
4853 StartSweepSpace(heap()->code_space());
4854 }
4855 if (heap()->map_space()) {
4856 GCTracer::Scope sweep_scope(
4857 heap()->tracer(), GCTracer::Scope::MC_SWEEP_MAP, ThreadKind::kMain);
4858 StartSweepSpace(heap()->map_space());
4859 }
4860 sweeper()->StartSweeping();
4861 }
4862 }
4863
4864 namespace {
4865
4866 #ifdef VERIFY_HEAP
4867
4868 class YoungGenerationMarkingVerifier : public MarkingVerifier {
4869 public:
YoungGenerationMarkingVerifier(Heap * heap)4870 explicit YoungGenerationMarkingVerifier(Heap* heap)
4871 : MarkingVerifier(heap),
4872 marking_state_(
4873 heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
4874
bitmap(const MemoryChunk * chunk)4875 ConcurrentBitmap<AccessMode::NON_ATOMIC>* bitmap(
4876 const MemoryChunk* chunk) override {
4877 return marking_state_->bitmap(chunk);
4878 }
4879
IsMarked(HeapObject object)4880 bool IsMarked(HeapObject object) override {
4881 return marking_state_->IsGrey(object);
4882 }
4883
IsBlackOrGrey(HeapObject object)4884 bool IsBlackOrGrey(HeapObject object) override {
4885 return marking_state_->IsBlackOrGrey(object);
4886 }
4887
Run()4888 void Run() override {
4889 VerifyRoots();
4890 VerifyMarking(heap_->new_space());
4891 }
4892
4893 protected:
VerifyMap(Map map)4894 void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
4895
VerifyPointers(ObjectSlot start,ObjectSlot end)4896 void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
4897 VerifyPointersImpl(start, end);
4898 }
4899
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)4900 void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
4901 VerifyPointersImpl(start, end);
4902 }
VerifyCodePointer(CodeObjectSlot slot)4903 void VerifyCodePointer(CodeObjectSlot slot) override {
4904 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
4905 // Code slots never appear in new space because CodeDataContainers, the
4906 // only object that can contain code pointers, are always allocated in
4907 // the old space.
4908 UNREACHABLE();
4909 }
4910
VisitCodeTarget(Code host,RelocInfo * rinfo)4911 void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
4912 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
4913 VerifyHeapObjectImpl(target);
4914 }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4915 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
4916 VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
4917 }
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)4918 void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
4919 VerifyPointersImpl(start, end);
4920 }
4921
4922 private:
VerifyHeapObjectImpl(HeapObject heap_object)4923 V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
4924 CHECK_IMPLIES(Heap::InYoungGeneration(heap_object), IsMarked(heap_object));
4925 }
4926
4927 template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)4928 V8_INLINE void VerifyPointersImpl(TSlot start, TSlot end) {
4929 PtrComprCageBase cage_base =
4930 GetPtrComprCageBaseFromOnHeapAddress(start.address());
4931 for (TSlot slot = start; slot < end; ++slot) {
4932 typename TSlot::TObject object = slot.load(cage_base);
4933 HeapObject heap_object;
4934 // Minor MC treats weak references as strong.
4935 if (object.GetHeapObject(&heap_object)) {
4936 VerifyHeapObjectImpl(heap_object);
4937 }
4938 }
4939 }
4940
4941 MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4942 };
4943
4944 class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
4945 public:
YoungGenerationEvacuationVerifier(Heap * heap)4946 explicit YoungGenerationEvacuationVerifier(Heap* heap)
4947 : EvacuationVerifier(heap) {}
4948
Run()4949 void Run() override {
4950 DCHECK(!heap_->mark_compact_collector()->sweeping_in_progress());
4951 VerifyRoots();
4952 VerifyEvacuation(heap_->new_space());
4953 VerifyEvacuation(heap_->old_space());
4954 VerifyEvacuation(heap_->code_space());
4955 if (heap_->map_space()) VerifyEvacuation(heap_->map_space());
4956 }
4957
4958 protected:
VerifyHeapObjectImpl(HeapObject heap_object)4959 V8_INLINE void VerifyHeapObjectImpl(HeapObject heap_object) {
4960 CHECK_IMPLIES(Heap::InYoungGeneration(heap_object),
4961 Heap::InToPage(heap_object));
4962 }
4963
4964 template <typename TSlot>
VerifyPointersImpl(TSlot start,TSlot end)4965 void VerifyPointersImpl(TSlot start, TSlot end) {
4966 for (TSlot current = start; current < end; ++current) {
4967 typename TSlot::TObject object = current.load(cage_base());
4968 HeapObject heap_object;
4969 if (object.GetHeapObject(&heap_object)) {
4970 VerifyHeapObjectImpl(heap_object);
4971 }
4972 }
4973 }
VerifyMap(Map map)4974 void VerifyMap(Map map) override { VerifyHeapObjectImpl(map); }
VerifyPointers(ObjectSlot start,ObjectSlot end)4975 void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
4976 VerifyPointersImpl(start, end);
4977 }
VerifyPointers(MaybeObjectSlot start,MaybeObjectSlot end)4978 void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
4979 VerifyPointersImpl(start, end);
4980 }
VerifyCodePointer(CodeObjectSlot slot)4981 void VerifyCodePointer(CodeObjectSlot slot) override {
4982 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
4983 Object maybe_code = slot.load(code_cage_base());
4984 HeapObject code;
4985 // The slot might contain smi during CodeDataContainer creation, so skip it.
4986 if (maybe_code.GetHeapObject(&code)) {
4987 VerifyHeapObjectImpl(code);
4988 }
4989 }
VisitCodeTarget(Code host,RelocInfo * rinfo)4990 void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
4991 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
4992 VerifyHeapObjectImpl(target);
4993 }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)4994 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
4995 VerifyHeapObjectImpl(rinfo->target_object(cage_base()));
4996 }
VerifyRootPointers(FullObjectSlot start,FullObjectSlot end)4997 void VerifyRootPointers(FullObjectSlot start, FullObjectSlot end) override {
4998 VerifyPointersImpl(start, end);
4999 }
5000 };
5001
5002 #endif // VERIFY_HEAP
5003
IsUnmarkedObjectForYoungGeneration(Heap * heap,FullObjectSlot p)5004 bool IsUnmarkedObjectForYoungGeneration(Heap* heap, FullObjectSlot p) {
5005 DCHECK_IMPLIES(Heap::InYoungGeneration(*p), Heap::InToPage(*p));
5006 return Heap::InYoungGeneration(*p) && !heap->minor_mark_compact_collector()
5007 ->non_atomic_marking_state()
5008 ->IsGrey(HeapObject::cast(*p));
5009 }
5010
5011 } // namespace
5012
5013 class YoungGenerationMarkingVisitor final
5014 : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
5015 public:
YoungGenerationMarkingVisitor(Isolate * isolate,MinorMarkCompactCollector::MarkingState * marking_state,MinorMarkCompactCollector::MarkingWorklist::Local * worklist_local)5016 YoungGenerationMarkingVisitor(
5017 Isolate* isolate, MinorMarkCompactCollector::MarkingState* marking_state,
5018 MinorMarkCompactCollector::MarkingWorklist::Local* worklist_local)
5019 : NewSpaceVisitor(isolate),
5020 worklist_local_(worklist_local),
5021 marking_state_(marking_state) {}
5022
VisitPointers(HeapObject host,ObjectSlot start,ObjectSlot end)5023 V8_INLINE void VisitPointers(HeapObject host, ObjectSlot start,
5024 ObjectSlot end) final {
5025 VisitPointersImpl(host, start, end);
5026 }
5027
VisitPointers(HeapObject host,MaybeObjectSlot start,MaybeObjectSlot end)5028 V8_INLINE void VisitPointers(HeapObject host, MaybeObjectSlot start,
5029 MaybeObjectSlot end) final {
5030 VisitPointersImpl(host, start, end);
5031 }
5032
VisitCodePointer(HeapObject host,CodeObjectSlot slot)5033 V8_INLINE void VisitCodePointer(HeapObject host,
5034 CodeObjectSlot slot) override {
5035 CHECK(V8_EXTERNAL_CODE_SPACE_BOOL);
5036 // Code slots never appear in new space because CodeDataContainers, the
5037 // only object that can contain code pointers, are always allocated in
5038 // the old space.
5039 UNREACHABLE();
5040 }
5041
VisitPointer(HeapObject host,ObjectSlot slot)5042 V8_INLINE void VisitPointer(HeapObject host, ObjectSlot slot) final {
5043 VisitPointerImpl(host, slot);
5044 }
5045
VisitPointer(HeapObject host,MaybeObjectSlot slot)5046 V8_INLINE void VisitPointer(HeapObject host, MaybeObjectSlot slot) final {
5047 VisitPointerImpl(host, slot);
5048 }
5049
VisitCodeTarget(Code host,RelocInfo * rinfo)5050 V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final {
5051 // Code objects are not expected in new space.
5052 UNREACHABLE();
5053 }
5054
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)5055 V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
5056 // Code objects are not expected in new space.
5057 UNREACHABLE();
5058 }
5059
VisitJSArrayBuffer(Map map,JSArrayBuffer object)5060 V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer object) {
5061 object.YoungMarkExtension();
5062 int size = JSArrayBuffer::BodyDescriptor::SizeOf(map, object);
5063 JSArrayBuffer::BodyDescriptor::IterateBody(map, object, size, this);
5064 return size;
5065 }
5066
5067 private:
5068 template <typename TSlot>
VisitPointersImpl(HeapObject host,TSlot start,TSlot end)5069 V8_INLINE void VisitPointersImpl(HeapObject host, TSlot start, TSlot end) {
5070 for (TSlot slot = start; slot < end; ++slot) {
5071 VisitPointer(host, slot);
5072 }
5073 }
5074
5075 template <typename TSlot>
VisitPointerImpl(HeapObject host,TSlot slot)5076 V8_INLINE void VisitPointerImpl(HeapObject host, TSlot slot) {
5077 typename TSlot::TObject target = *slot;
5078 if (Heap::InYoungGeneration(target)) {
5079 // Treat weak references as strong.
5080 // TODO(marja): Proper weakness handling for minor-mcs.
5081 HeapObject target_object = target.GetHeapObject();
5082 MarkObjectViaMarkingWorklist(target_object);
5083 }
5084 }
5085
MarkObjectViaMarkingWorklist(HeapObject object)5086 inline void MarkObjectViaMarkingWorklist(HeapObject object) {
5087 if (marking_state_->WhiteToGrey(object)) {
5088 // Marking deque overflow is unsupported for the young generation.
5089 worklist_local_->Push(object);
5090 }
5091 }
5092
5093 MinorMarkCompactCollector::MarkingWorklist::Local* worklist_local_;
5094 MinorMarkCompactCollector::MarkingState* marking_state_;
5095 };
5096
SetUp()5097 void MinorMarkCompactCollector::SetUp() {}
5098
TearDown()5099 void MinorMarkCompactCollector::TearDown() {}
5100
5101 // static
5102 constexpr size_t MinorMarkCompactCollector::kMaxParallelTasks;
5103
MinorMarkCompactCollector(Heap * heap)5104 MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
5105 : MarkCompactCollectorBase(heap),
5106 worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
5107 main_thread_worklist_local_(worklist_),
5108 marking_state_(heap->isolate()),
5109 non_atomic_marking_state_(heap->isolate()),
5110 main_marking_visitor_(new YoungGenerationMarkingVisitor(
5111 heap->isolate(), marking_state(), &main_thread_worklist_local_)),
5112 page_parallel_job_semaphore_(0) {}
5113
~MinorMarkCompactCollector()5114 MinorMarkCompactCollector::~MinorMarkCompactCollector() {
5115 delete worklist_;
5116 delete main_marking_visitor_;
5117 }
5118
CleanupPromotedPages()5119 void MinorMarkCompactCollector::CleanupPromotedPages() {
5120 for (Page* p : promoted_pages_) {
5121 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
5122 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
5123 non_atomic_marking_state()->ClearLiveness(p);
5124 }
5125 promoted_pages_.clear();
5126
5127 for (LargePage* p : promoted_large_pages_) {
5128 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
5129 }
5130 promoted_large_pages_.clear();
5131 }
5132
SweepArrayBufferExtensions()5133 void MinorMarkCompactCollector::SweepArrayBufferExtensions() {
5134 heap_->array_buffer_sweeper()->RequestSweep(
5135 ArrayBufferSweeper::SweepingType::kYoung);
5136 }
5137
5138 class YoungGenerationMigrationObserver final : public MigrationObserver {
5139 public:
YoungGenerationMigrationObserver(Heap * heap,MarkCompactCollector * mark_compact_collector)5140 YoungGenerationMigrationObserver(Heap* heap,
5141 MarkCompactCollector* mark_compact_collector)
5142 : MigrationObserver(heap),
5143 mark_compact_collector_(mark_compact_collector) {}
5144
Move(AllocationSpace dest,HeapObject src,HeapObject dst,int size)5145 inline void Move(AllocationSpace dest, HeapObject src, HeapObject dst,
5146 int size) final {
5147 // Migrate color to old generation marking in case the object survived young
5148 // generation garbage collection.
5149 if (heap_->incremental_marking()->IsMarking()) {
5150 DCHECK(
5151 heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
5152 heap_->incremental_marking()->TransferColor(src, dst);
5153 }
5154 }
5155
5156 protected:
5157 base::Mutex mutex_;
5158 MarkCompactCollector* mark_compact_collector_;
5159 };
5160
5161 class YoungGenerationRecordMigratedSlotVisitor final
5162 : public RecordMigratedSlotVisitor {
5163 public:
YoungGenerationRecordMigratedSlotVisitor(MarkCompactCollector * collector)5164 explicit YoungGenerationRecordMigratedSlotVisitor(
5165 MarkCompactCollector* collector)
5166 : RecordMigratedSlotVisitor(collector, nullptr) {}
5167
VisitCodeTarget(Code host,RelocInfo * rinfo)5168 void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
VisitEmbeddedPointer(Code host,RelocInfo * rinfo)5169 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
5170 UNREACHABLE();
5171 }
5172
MarkArrayBufferExtensionPromoted(HeapObject object)5173 void MarkArrayBufferExtensionPromoted(HeapObject object) final {
5174 if (!object.IsJSArrayBuffer()) return;
5175 JSArrayBuffer::cast(object).YoungMarkExtensionPromoted();
5176 }
5177
5178 private:
5179 // Only record slots for host objects that are considered as live by the full
5180 // collector.
IsLive(HeapObject object)5181 inline bool IsLive(HeapObject object) {
5182 return collector_->non_atomic_marking_state()->IsBlack(object);
5183 }
5184
RecordMigratedSlot(HeapObject host,MaybeObject value,Address slot)5185 inline void RecordMigratedSlot(HeapObject host, MaybeObject value,
5186 Address slot) final {
5187 if (value->IsStrongOrWeak()) {
5188 BasicMemoryChunk* p = BasicMemoryChunk::FromAddress(value.ptr());
5189 if (p->InYoungGeneration()) {
5190 DCHECK_IMPLIES(
5191 p->IsToPage(),
5192 p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) || p->IsLargePage());
5193 MemoryChunk* chunk = MemoryChunk::FromHeapObject(host);
5194 DCHECK(chunk->SweepingDone());
5195 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(chunk, slot);
5196 } else if (p->IsEvacuationCandidate() && IsLive(host)) {
5197 if (V8_EXTERNAL_CODE_SPACE_BOOL &&
5198 p->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
5199 RememberedSet<OLD_TO_CODE>::Insert<AccessMode::NON_ATOMIC>(
5200 MemoryChunk::FromHeapObject(host), slot);
5201 } else {
5202 RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
5203 MemoryChunk::FromHeapObject(host), slot);
5204 }
5205 }
5206 }
5207 }
5208 };
5209
UpdatePointersAfterEvacuation()5210 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
5211 TRACE_GC(heap()->tracer(),
5212 GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
5213
5214 PointersUpdatingVisitor updating_visitor(heap());
5215 std::vector<std::unique_ptr<UpdatingItem>> updating_items;
5216
5217 // Create batches of global handles.
5218 CollectToSpaceUpdatingItems(&updating_items);
5219 CollectRememberedSetUpdatingItems(&updating_items, heap()->old_space(),
5220 RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
5221 CollectRememberedSetUpdatingItems(&updating_items, heap()->code_space(),
5222 RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
5223 if (heap()->map_space()) {
5224 CollectRememberedSetUpdatingItems(
5225 &updating_items, heap()->map_space(),
5226 RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
5227 }
5228 CollectRememberedSetUpdatingItems(&updating_items, heap()->lo_space(),
5229 RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
5230 CollectRememberedSetUpdatingItems(&updating_items, heap()->code_lo_space(),
5231 RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
5232
5233 {
5234 TRACE_GC(heap()->tracer(),
5235 GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
5236 heap()->IterateRoots(&updating_visitor,
5237 base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
5238 SkipRoot::kOldGeneration});
5239 }
5240 {
5241 TRACE_GC(heap()->tracer(),
5242 GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
5243 V8::GetCurrentPlatform()
5244 ->PostJob(
5245 v8::TaskPriority::kUserBlocking,
5246 std::make_unique<PointersUpdatingJob>(
5247 isolate(), std::move(updating_items),
5248 GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_PARALLEL,
5249 GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS))
5250 ->Join();
5251 }
5252
5253 {
5254 TRACE_GC(heap()->tracer(),
5255 GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
5256
5257 EvacuationWeakObjectRetainer evacuation_object_retainer;
5258 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
5259
5260 // Update pointers from external string table.
5261 heap()->UpdateYoungReferencesInExternalStringTable(
5262 &UpdateReferenceInExternalStringTableEntry);
5263 }
5264 }
5265
5266 class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
5267 public:
RootMarkingVisitor(MinorMarkCompactCollector * collector)5268 explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
5269 : collector_(collector) {}
5270
VisitRootPointer(Root root,const char * description,FullObjectSlot p)5271 void VisitRootPointer(Root root, const char* description,
5272 FullObjectSlot p) final {
5273 MarkObjectByPointer(p);
5274 }
5275
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)5276 void VisitRootPointers(Root root, const char* description,
5277 FullObjectSlot start, FullObjectSlot end) final {
5278 for (FullObjectSlot p = start; p < end; ++p) {
5279 DCHECK(!MapWord::IsPacked((*p).ptr()));
5280 MarkObjectByPointer(p);
5281 }
5282 }
5283
5284 private:
MarkObjectByPointer(FullObjectSlot p)5285 V8_INLINE void MarkObjectByPointer(FullObjectSlot p) {
5286 if (!(*p).IsHeapObject()) return;
5287 collector_->MarkRootObject(HeapObject::cast(*p));
5288 }
5289 MinorMarkCompactCollector* const collector_;
5290 };
5291
CollectGarbage()5292 void MinorMarkCompactCollector::CollectGarbage() {
5293 // Minor MC does not support processing the ephemeron remembered set.
5294 DCHECK(heap()->ephemeron_remembered_set_.empty());
5295
5296 {
5297 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
5298 heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
5299 }
5300
5301 heap()->array_buffer_sweeper()->EnsureFinished();
5302
5303 MarkLiveObjects();
5304 ClearNonLiveReferences();
5305 #ifdef VERIFY_HEAP
5306 if (FLAG_verify_heap) {
5307 YoungGenerationMarkingVerifier verifier(heap());
5308 verifier.Run();
5309 }
5310 #endif // VERIFY_HEAP
5311
5312 Evacuate();
5313 #ifdef VERIFY_HEAP
5314 if (FLAG_verify_heap) {
5315 YoungGenerationEvacuationVerifier verifier(heap());
5316 verifier.Run();
5317 }
5318 #endif // VERIFY_HEAP
5319
5320 {
5321 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
5322 heap()->incremental_marking()->UpdateMarkingWorklistAfterYoungGenGC();
5323 }
5324
5325 {
5326 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
5327 for (Page* p :
5328 PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
5329 DCHECK_EQ(promoted_pages_.end(),
5330 std::find(promoted_pages_.begin(), promoted_pages_.end(), p));
5331 non_atomic_marking_state()->ClearLiveness(p);
5332 if (FLAG_concurrent_marking) {
5333 // Ensure that concurrent marker does not track pages that are
5334 // going to be unmapped.
5335 heap()->concurrent_marking()->ClearMemoryChunkData(p);
5336 }
5337 }
5338 // Since we promote all surviving large objects immediatelly, all remaining
5339 // large objects must be dead.
5340 // TODO(v8:11685): Don't free all as soon as we have an intermediate
5341 // generation.
5342 heap()->new_lo_space()->FreeDeadObjects([](HeapObject) { return true; });
5343 }
5344
5345 CleanupPromotedPages();
5346
5347 SweepArrayBufferExtensions();
5348 }
5349
MakeIterable(Page * p,FreeSpaceTreatmentMode free_space_mode)5350 void MinorMarkCompactCollector::MakeIterable(
5351 Page* p, FreeSpaceTreatmentMode free_space_mode) {
5352 CHECK(!p->IsLargePage());
5353 // We have to clear the full collectors markbits for the areas that we
5354 // remove here.
5355 MarkCompactCollector* full_collector = heap()->mark_compact_collector();
5356 Address free_start = p->area_start();
5357
5358 for (auto object_and_size :
5359 LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
5360 HeapObject const object = object_and_size.first;
5361 DCHECK(non_atomic_marking_state()->IsGrey(object));
5362 Address free_end = object.address();
5363 if (free_end != free_start) {
5364 CHECK_GT(free_end, free_start);
5365 size_t size = static_cast<size_t>(free_end - free_start);
5366 full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
5367 p->AddressToMarkbitIndex(free_start),
5368 p->AddressToMarkbitIndex(free_end));
5369 if (free_space_mode == ZAP_FREE_SPACE) {
5370 ZapCode(free_start, size);
5371 }
5372 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
5373 ClearRecordedSlots::kNo);
5374 }
5375 PtrComprCageBase cage_base(p->heap()->isolate());
5376 Map map = object.map(cage_base, kAcquireLoad);
5377 int size = object.SizeFromMap(map);
5378 free_start = free_end + size;
5379 }
5380
5381 if (free_start != p->area_end()) {
5382 CHECK_GT(p->area_end(), free_start);
5383 size_t size = static_cast<size_t>(p->area_end() - free_start);
5384 full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
5385 p->AddressToMarkbitIndex(free_start),
5386 p->AddressToMarkbitIndex(p->area_end()));
5387 if (free_space_mode == ZAP_FREE_SPACE) {
5388 ZapCode(free_start, size);
5389 }
5390 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
5391 ClearRecordedSlots::kNo);
5392 }
5393 }
5394
5395 namespace {
5396
5397 // Helper class for pruning the string table.
5398 class YoungGenerationExternalStringTableCleaner : public RootVisitor {
5399 public:
YoungGenerationExternalStringTableCleaner(MinorMarkCompactCollector * collector)5400 YoungGenerationExternalStringTableCleaner(
5401 MinorMarkCompactCollector* collector)
5402 : heap_(collector->heap()),
5403 marking_state_(collector->non_atomic_marking_state()) {}
5404
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)5405 void VisitRootPointers(Root root, const char* description,
5406 FullObjectSlot start, FullObjectSlot end) override {
5407 DCHECK_EQ(static_cast<int>(root),
5408 static_cast<int>(Root::kExternalStringsTable));
5409 // Visit all HeapObject pointers in [start, end).
5410 for (FullObjectSlot p = start; p < end; ++p) {
5411 Object o = *p;
5412 if (o.IsHeapObject()) {
5413 HeapObject heap_object = HeapObject::cast(o);
5414 if (marking_state_->IsWhite(heap_object)) {
5415 if (o.IsExternalString()) {
5416 heap_->FinalizeExternalString(String::cast(*p));
5417 } else {
5418 // The original external string may have been internalized.
5419 DCHECK(o.IsThinString());
5420 }
5421 // Set the entry to the_hole_value (as deleted).
5422 p.store(ReadOnlyRoots(heap_).the_hole_value());
5423 }
5424 }
5425 }
5426 }
5427
5428 private:
5429 Heap* heap_;
5430 MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
5431 };
5432
5433 // Marked young generation objects and all old generation objects will be
5434 // retained.
5435 class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
5436 public:
MinorMarkCompactWeakObjectRetainer(MinorMarkCompactCollector * collector)5437 explicit MinorMarkCompactWeakObjectRetainer(
5438 MinorMarkCompactCollector* collector)
5439 : marking_state_(collector->non_atomic_marking_state()) {}
5440
RetainAs(Object object)5441 Object RetainAs(Object object) override {
5442 HeapObject heap_object = HeapObject::cast(object);
5443 if (!Heap::InYoungGeneration(heap_object)) return object;
5444
5445 // Young generation marking only marks to grey instead of black.
5446 DCHECK(!marking_state_->IsBlack(heap_object));
5447 if (marking_state_->IsGrey(heap_object)) {
5448 return object;
5449 }
5450 return Object();
5451 }
5452
5453 private:
5454 MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
5455 };
5456
5457 } // namespace
5458
ClearNonLiveReferences()5459 void MinorMarkCompactCollector::ClearNonLiveReferences() {
5460 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
5461
5462 {
5463 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
5464 // Internalized strings are always stored in old space, so there is no need
5465 // to clean them here.
5466 YoungGenerationExternalStringTableCleaner external_visitor(this);
5467 heap()->external_string_table_.IterateYoung(&external_visitor);
5468 heap()->external_string_table_.CleanUpYoung();
5469 }
5470
5471 {
5472 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
5473 // Process the weak references.
5474 MinorMarkCompactWeakObjectRetainer retainer(this);
5475 heap()->ProcessYoungWeakReferences(&retainer);
5476 }
5477 }
5478
EvacuatePrologue()5479 void MinorMarkCompactCollector::EvacuatePrologue() {
5480 NewSpace* new_space = heap()->new_space();
5481 // Append the list of new space pages to be processed.
5482 for (Page* p :
5483 PageRange(new_space->first_allocatable_address(), new_space->top())) {
5484 new_space_evacuation_pages_.push_back(p);
5485 }
5486
5487 new_space->Flip();
5488 new_space->ResetLinearAllocationArea();
5489
5490 heap()->new_lo_space()->Flip();
5491 heap()->new_lo_space()->ResetPendingObject();
5492 }
5493
EvacuateEpilogue()5494 void MinorMarkCompactCollector::EvacuateEpilogue() {
5495 heap()->new_space()->set_age_mark(heap()->new_space()->top());
5496 // Give pages that are queued to be freed back to the OS.
5497 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
5498 }
5499
CollectToSpaceUpdatingItems(std::vector<std::unique_ptr<UpdatingItem>> * items)5500 int MinorMarkCompactCollector::CollectToSpaceUpdatingItems(
5501 std::vector<std::unique_ptr<UpdatingItem>>* items) {
5502 // Seed to space pages.
5503 const Address space_start = heap()->new_space()->first_allocatable_address();
5504 const Address space_end = heap()->new_space()->top();
5505 int pages = 0;
5506 for (Page* page : PageRange(space_start, space_end)) {
5507 Address start =
5508 page->Contains(space_start) ? space_start : page->area_start();
5509 Address end = page->Contains(space_end) ? space_end : page->area_end();
5510 items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
5511 pages++;
5512 }
5513 return pages;
5514 }
5515
5516 std::unique_ptr<UpdatingItem>
CreateToSpaceUpdatingItem(MemoryChunk * chunk,Address start,Address end)5517 MinorMarkCompactCollector::CreateToSpaceUpdatingItem(MemoryChunk* chunk,
5518 Address start,
5519 Address end) {
5520 return std::make_unique<ToSpaceUpdatingItem<NonAtomicMarkingState>>(
5521 heap(), chunk, start, end, non_atomic_marking_state());
5522 }
5523
5524 std::unique_ptr<UpdatingItem>
CreateRememberedSetUpdatingItem(MemoryChunk * chunk,RememberedSetUpdatingMode updating_mode)5525 MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
5526 MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
5527 return std::make_unique<RememberedSetUpdatingItem<
5528 NonAtomicMarkingState, GarbageCollector::MINOR_MARK_COMPACTOR>>(
5529 heap(), non_atomic_marking_state(), chunk, updating_mode);
5530 }
5531
5532 class PageMarkingItem;
5533 class RootMarkingItem;
5534 class YoungGenerationMarkingTask;
5535
5536 class YoungGenerationMarkingTask {
5537 public:
YoungGenerationMarkingTask(Isolate * isolate,MinorMarkCompactCollector * collector,MinorMarkCompactCollector::MarkingWorklist * global_worklist)5538 YoungGenerationMarkingTask(
5539 Isolate* isolate, MinorMarkCompactCollector* collector,
5540 MinorMarkCompactCollector::MarkingWorklist* global_worklist)
5541 : marking_worklist_local_(global_worklist),
5542 marking_state_(collector->marking_state()),
5543 visitor_(isolate, marking_state_, &marking_worklist_local_) {
5544 local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
5545 Page::kPageSize);
5546 }
5547
MarkObject(Object object)5548 void MarkObject(Object object) {
5549 if (!Heap::InYoungGeneration(object)) return;
5550 HeapObject heap_object = HeapObject::cast(object);
5551 if (marking_state_->WhiteToGrey(heap_object)) {
5552 const int size = visitor_.Visit(heap_object);
5553 IncrementLiveBytes(heap_object, size);
5554 }
5555 }
5556
EmptyMarkingWorklist()5557 void EmptyMarkingWorklist() {
5558 HeapObject object;
5559 while (marking_worklist_local_.Pop(&object)) {
5560 const int size = visitor_.Visit(object);
5561 IncrementLiveBytes(object, size);
5562 }
5563 }
5564
IncrementLiveBytes(HeapObject object,intptr_t bytes)5565 void IncrementLiveBytes(HeapObject object, intptr_t bytes) {
5566 local_live_bytes_[Page::FromHeapObject(object)] += bytes;
5567 }
5568
FlushLiveBytes()5569 void FlushLiveBytes() {
5570 for (auto pair : local_live_bytes_) {
5571 marking_state_->IncrementLiveBytes(pair.first, pair.second);
5572 }
5573 }
5574
5575 private:
5576 MinorMarkCompactCollector::MarkingWorklist::Local marking_worklist_local_;
5577 MinorMarkCompactCollector::MarkingState* marking_state_;
5578 YoungGenerationMarkingVisitor visitor_;
5579 std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
5580 };
5581
5582 class PageMarkingItem : public ParallelWorkItem {
5583 public:
PageMarkingItem(MemoryChunk * chunk)5584 explicit PageMarkingItem(MemoryChunk* chunk) : chunk_(chunk) {}
5585 ~PageMarkingItem() = default;
5586
Process(YoungGenerationMarkingTask * task)5587 void Process(YoungGenerationMarkingTask* task) {
5588 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
5589 "PageMarkingItem::Process");
5590 base::MutexGuard guard(chunk_->mutex());
5591 MarkUntypedPointers(task);
5592 MarkTypedPointers(task);
5593 }
5594
5595 private:
heap()5596 inline Heap* heap() { return chunk_->heap(); }
5597
MarkUntypedPointers(YoungGenerationMarkingTask * task)5598 void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
5599 InvalidatedSlotsFilter filter = InvalidatedSlotsFilter::OldToNew(chunk_);
5600 RememberedSet<OLD_TO_NEW>::Iterate(
5601 chunk_,
5602 [this, task, &filter](MaybeObjectSlot slot) {
5603 if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
5604 return CheckAndMarkObject(task, slot);
5605 },
5606 SlotSet::FREE_EMPTY_BUCKETS);
5607 }
5608
MarkTypedPointers(YoungGenerationMarkingTask * task)5609 void MarkTypedPointers(YoungGenerationMarkingTask* task) {
5610 RememberedSet<OLD_TO_NEW>::IterateTyped(
5611 chunk_, [=](SlotType slot_type, Address slot) {
5612 return UpdateTypedSlotHelper::UpdateTypedSlot(
5613 heap(), slot_type, slot, [this, task](FullMaybeObjectSlot slot) {
5614 return CheckAndMarkObject(task, slot);
5615 });
5616 });
5617 }
5618
5619 template <typename TSlot>
5620 V8_INLINE SlotCallbackResult
CheckAndMarkObject(YoungGenerationMarkingTask * task,TSlot slot)5621 CheckAndMarkObject(YoungGenerationMarkingTask* task, TSlot slot) {
5622 static_assert(
5623 std::is_same<TSlot, FullMaybeObjectSlot>::value ||
5624 std::is_same<TSlot, MaybeObjectSlot>::value,
5625 "Only FullMaybeObjectSlot and MaybeObjectSlot are expected here");
5626 MaybeObject object = *slot;
5627 if (Heap::InYoungGeneration(object)) {
5628 // Marking happens before flipping the young generation, so the object
5629 // has to be in a to page.
5630 DCHECK(Heap::InToPage(object));
5631 HeapObject heap_object;
5632 bool success = object.GetHeapObject(&heap_object);
5633 USE(success);
5634 DCHECK(success);
5635 task->MarkObject(heap_object);
5636 return KEEP_SLOT;
5637 }
5638 return REMOVE_SLOT;
5639 }
5640
5641 MemoryChunk* chunk_;
5642 };
5643
5644 class YoungGenerationMarkingJob : public v8::JobTask {
5645 public:
YoungGenerationMarkingJob(Isolate * isolate,MinorMarkCompactCollector * collector,MinorMarkCompactCollector::MarkingWorklist * global_worklist,std::vector<PageMarkingItem> marking_items)5646 YoungGenerationMarkingJob(
5647 Isolate* isolate, MinorMarkCompactCollector* collector,
5648 MinorMarkCompactCollector::MarkingWorklist* global_worklist,
5649 std::vector<PageMarkingItem> marking_items)
5650 : isolate_(isolate),
5651 collector_(collector),
5652 global_worklist_(global_worklist),
5653 marking_items_(std::move(marking_items)),
5654 remaining_marking_items_(marking_items_.size()),
5655 generator_(marking_items_.size()) {}
5656
Run(JobDelegate * delegate)5657 void Run(JobDelegate* delegate) override {
5658 if (delegate->IsJoiningThread()) {
5659 TRACE_GC(collector_->heap()->tracer(),
5660 GCTracer::Scope::MINOR_MC_MARK_PARALLEL);
5661 ProcessItems(delegate);
5662 } else {
5663 TRACE_GC_EPOCH(collector_->heap()->tracer(),
5664 GCTracer::Scope::MINOR_MC_BACKGROUND_MARKING,
5665 ThreadKind::kBackground);
5666 ProcessItems(delegate);
5667 }
5668 }
5669
GetMaxConcurrency(size_t worker_count) const5670 size_t GetMaxConcurrency(size_t worker_count) const override {
5671 // Pages are not private to markers but we can still use them to estimate
5672 // the amount of marking that is required.
5673 const int kPagesPerTask = 2;
5674 size_t items = remaining_marking_items_.load(std::memory_order_relaxed);
5675 size_t num_tasks =
5676 std::max((items + 1) / kPagesPerTask, global_worklist_->Size());
5677 if (!FLAG_parallel_marking) {
5678 num_tasks = std::min<size_t>(1, num_tasks);
5679 }
5680 return std::min<size_t>(num_tasks,
5681 MinorMarkCompactCollector::kMaxParallelTasks);
5682 }
5683
5684 private:
ProcessItems(JobDelegate * delegate)5685 void ProcessItems(JobDelegate* delegate) {
5686 double marking_time = 0.0;
5687 {
5688 TimedScope scope(&marking_time);
5689 YoungGenerationMarkingTask task(isolate_, collector_, global_worklist_);
5690 ProcessMarkingItems(&task);
5691 task.EmptyMarkingWorklist();
5692 task.FlushLiveBytes();
5693 }
5694 if (FLAG_trace_minor_mc_parallel_marking) {
5695 PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
5696 static_cast<void*>(this), marking_time);
5697 }
5698 }
5699
ProcessMarkingItems(YoungGenerationMarkingTask * task)5700 void ProcessMarkingItems(YoungGenerationMarkingTask* task) {
5701 while (remaining_marking_items_.load(std::memory_order_relaxed) > 0) {
5702 base::Optional<size_t> index = generator_.GetNext();
5703 if (!index) return;
5704 for (size_t i = *index; i < marking_items_.size(); ++i) {
5705 auto& work_item = marking_items_[i];
5706 if (!work_item.TryAcquire()) break;
5707 work_item.Process(task);
5708 task->EmptyMarkingWorklist();
5709 if (remaining_marking_items_.fetch_sub(1, std::memory_order_relaxed) <=
5710 1) {
5711 return;
5712 }
5713 }
5714 }
5715 }
5716
5717 Isolate* isolate_;
5718 MinorMarkCompactCollector* collector_;
5719 MinorMarkCompactCollector::MarkingWorklist* global_worklist_;
5720 std::vector<PageMarkingItem> marking_items_;
5721 std::atomic_size_t remaining_marking_items_{0};
5722 IndexGenerator generator_;
5723 };
5724
MarkRootSetInParallel(RootMarkingVisitor * root_visitor)5725 void MinorMarkCompactCollector::MarkRootSetInParallel(
5726 RootMarkingVisitor* root_visitor) {
5727 {
5728 std::vector<PageMarkingItem> marking_items;
5729
5730 // Seed the root set (roots + old->new set).
5731 {
5732 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
5733 isolate()->global_handles()->IdentifyWeakUnmodifiedObjects(
5734 &JSObject::IsUnmodifiedApiObject);
5735 // MinorMC treats all weak roots except for global handles as strong.
5736 // That is why we don't set skip_weak = true here and instead visit
5737 // global handles separately.
5738 heap()->IterateRoots(
5739 root_visitor, base::EnumSet<SkipRoot>{SkipRoot::kExternalStringTable,
5740 SkipRoot::kGlobalHandles,
5741 SkipRoot::kOldGeneration});
5742 isolate()->global_handles()->IterateYoungStrongAndDependentRoots(
5743 root_visitor);
5744 // Create items for each page.
5745 RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
5746 heap(), [&marking_items](MemoryChunk* chunk) {
5747 marking_items.emplace_back(chunk);
5748 });
5749 }
5750
5751 // Add tasks and run in parallel.
5752 {
5753 // The main thread might hold local items, while GlobalPoolSize() == 0.
5754 // Flush to ensure these items are visible globally and picked up by the
5755 // job.
5756 main_thread_worklist_local_.Publish();
5757 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
5758 V8::GetCurrentPlatform()
5759 ->PostJob(v8::TaskPriority::kUserBlocking,
5760 std::make_unique<YoungGenerationMarkingJob>(
5761 isolate(), this, worklist(), std::move(marking_items)))
5762 ->Join();
5763
5764 DCHECK(worklist()->IsEmpty());
5765 DCHECK(main_thread_worklist_local_.IsLocalEmpty());
5766 }
5767 }
5768 }
5769
MarkLiveObjects()5770 void MinorMarkCompactCollector::MarkLiveObjects() {
5771 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
5772
5773 PostponeInterruptsScope postpone(isolate());
5774
5775 RootMarkingVisitor root_visitor(this);
5776
5777 MarkRootSetInParallel(&root_visitor);
5778
5779 // Mark rest on the main thread.
5780 {
5781 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
5782 DrainMarkingWorklist();
5783 }
5784
5785 {
5786 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
5787 isolate()->global_handles()->MarkYoungWeakDeadObjectsPending(
5788 &IsUnmarkedObjectForYoungGeneration);
5789 isolate()->global_handles()->IterateYoungWeakDeadObjectsForFinalizers(
5790 &root_visitor);
5791 isolate()->global_handles()->IterateYoungWeakObjectsForPhantomHandles(
5792 &root_visitor, &IsUnmarkedObjectForYoungGeneration);
5793 DrainMarkingWorklist();
5794 }
5795
5796 if (FLAG_minor_mc_trace_fragmentation) {
5797 TraceFragmentation();
5798 }
5799 }
5800
DrainMarkingWorklist()5801 void MinorMarkCompactCollector::DrainMarkingWorklist() {
5802 PtrComprCageBase cage_base(isolate());
5803 HeapObject object;
5804 while (main_thread_worklist_local_.Pop(&object)) {
5805 DCHECK(!object.IsFreeSpaceOrFiller(cage_base));
5806 DCHECK(object.IsHeapObject());
5807 DCHECK(heap()->Contains(object));
5808 DCHECK(non_atomic_marking_state()->IsGrey(object));
5809 main_marking_visitor()->Visit(object);
5810 }
5811 DCHECK(main_thread_worklist_local_.IsLocalEmpty());
5812 }
5813
TraceFragmentation()5814 void MinorMarkCompactCollector::TraceFragmentation() {
5815 NewSpace* new_space = heap()->new_space();
5816 PtrComprCageBase cage_base(isolate());
5817 const std::array<size_t, 4> free_size_class_limits = {0, 1024, 2048, 4096};
5818 size_t free_bytes_of_class[free_size_class_limits.size()] = {0};
5819 size_t live_bytes = 0;
5820 size_t allocatable_bytes = 0;
5821 for (Page* p :
5822 PageRange(new_space->first_allocatable_address(), new_space->top())) {
5823 Address free_start = p->area_start();
5824 for (auto object_and_size : LiveObjectRange<kGreyObjects>(
5825 p, non_atomic_marking_state()->bitmap(p))) {
5826 HeapObject const object = object_and_size.first;
5827 Address free_end = object.address();
5828 if (free_end != free_start) {
5829 size_t free_bytes = free_end - free_start;
5830 int free_bytes_index = 0;
5831 for (auto free_size_class_limit : free_size_class_limits) {
5832 if (free_bytes >= free_size_class_limit) {
5833 free_bytes_of_class[free_bytes_index] += free_bytes;
5834 }
5835 free_bytes_index++;
5836 }
5837 }
5838 Map map = object.map(cage_base, kAcquireLoad);
5839 int size = object.SizeFromMap(map);
5840 live_bytes += size;
5841 free_start = free_end + size;
5842 }
5843 size_t area_end =
5844 p->Contains(new_space->top()) ? new_space->top() : p->area_end();
5845 if (free_start != area_end) {
5846 size_t free_bytes = area_end - free_start;
5847 int free_bytes_index = 0;
5848 for (auto free_size_class_limit : free_size_class_limits) {
5849 if (free_bytes >= free_size_class_limit) {
5850 free_bytes_of_class[free_bytes_index] += free_bytes;
5851 }
5852 free_bytes_index++;
5853 }
5854 }
5855 allocatable_bytes += area_end - p->area_start();
5856 CHECK_EQ(allocatable_bytes, live_bytes + free_bytes_of_class[0]);
5857 }
5858 PrintIsolate(
5859 isolate(),
5860 "Minor Mark-Compact Fragmentation: allocatable_bytes=%zu live_bytes=%zu "
5861 "free_bytes=%zu free_bytes_1K=%zu free_bytes_2K=%zu free_bytes_4K=%zu\n",
5862 allocatable_bytes, live_bytes, free_bytes_of_class[0],
5863 free_bytes_of_class[1], free_bytes_of_class[2], free_bytes_of_class[3]);
5864 }
5865
Evacuate()5866 void MinorMarkCompactCollector::Evacuate() {
5867 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
5868 base::MutexGuard guard(heap()->relocation_mutex());
5869
5870 {
5871 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
5872 EvacuatePrologue();
5873 }
5874
5875 {
5876 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
5877 EvacuatePagesInParallel();
5878 }
5879
5880 if (!FLAG_minor_mc_sweeping) UpdatePointersAfterEvacuation();
5881
5882 {
5883 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
5884 if (!heap()->new_space()->Rebalance()) {
5885 heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
5886 }
5887 }
5888
5889 {
5890 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
5891 for (Page* p : new_space_evacuation_pages_) {
5892 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
5893 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
5894 promoted_pages_.push_back(p);
5895 }
5896 }
5897 new_space_evacuation_pages_.clear();
5898 }
5899
5900 {
5901 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
5902 EvacuateEpilogue();
5903 }
5904 }
5905
5906 namespace {
5907
5908 class YoungGenerationEvacuator : public Evacuator {
5909 public:
YoungGenerationEvacuator(MinorMarkCompactCollector * collector)5910 explicit YoungGenerationEvacuator(MinorMarkCompactCollector* collector)
5911 : Evacuator(collector->heap(), &record_visitor_, &local_allocator_,
5912 AlwaysPromoteYoung::kNo),
5913 record_visitor_(collector->heap()->mark_compact_collector()),
5914 local_allocator_(
5915 heap_, CompactionSpaceKind::kCompactionSpaceForMinorMarkCompact),
5916 collector_(collector) {}
5917
GetBackgroundTracingScope()5918 GCTracer::Scope::ScopeId GetBackgroundTracingScope() override {
5919 return GCTracer::Scope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
5920 }
5921
GetTracingScope()5922 GCTracer::Scope::ScopeId GetTracingScope() override {
5923 return GCTracer::Scope::MINOR_MC_EVACUATE_COPY_PARALLEL;
5924 }
5925
5926 protected:
5927 void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
5928
5929 YoungGenerationRecordMigratedSlotVisitor record_visitor_;
5930 EvacuationAllocator local_allocator_;
5931 MinorMarkCompactCollector* collector_;
5932 };
5933
RawEvacuatePage(MemoryChunk * chunk,intptr_t * live_bytes)5934 void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
5935 intptr_t* live_bytes) {
5936 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
5937 "YoungGenerationEvacuator::RawEvacuatePage");
5938 MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
5939 collector_->non_atomic_marking_state();
5940 *live_bytes = marking_state->live_bytes(chunk);
5941 switch (ComputeEvacuationMode(chunk)) {
5942 case kObjectsNewToOld:
5943 DCHECK(!FLAG_minor_mc_sweeping);
5944 LiveObjectVisitor::VisitGreyObjectsNoFail(
5945 chunk, marking_state, &new_space_visitor_,
5946 LiveObjectVisitor::kClearMarkbits);
5947 break;
5948 case kPageNewToOld:
5949 LiveObjectVisitor::VisitGreyObjectsNoFail(
5950 chunk, marking_state, &new_to_old_page_visitor_,
5951 LiveObjectVisitor::kKeepMarking);
5952 new_to_old_page_visitor_.account_moved_bytes(
5953 marking_state->live_bytes(chunk));
5954 if (!chunk->IsLargePage()) {
5955 if (heap()->ShouldZapGarbage()) {
5956 collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
5957 } else if (heap()->incremental_marking()->IsMarking()) {
5958 // When incremental marking is on, we need to clear the mark bits of
5959 // the full collector. We cannot yet discard the young generation mark
5960 // bits as they are still relevant for pointers updating.
5961 collector_->MakeIterable(static_cast<Page*>(chunk),
5962 IGNORE_FREE_SPACE);
5963 }
5964 }
5965 break;
5966 case kPageNewToNew:
5967 LiveObjectVisitor::VisitGreyObjectsNoFail(
5968 chunk, marking_state, &new_to_new_page_visitor_,
5969 LiveObjectVisitor::kKeepMarking);
5970 new_to_new_page_visitor_.account_moved_bytes(
5971 marking_state->live_bytes(chunk));
5972 DCHECK(!chunk->IsLargePage());
5973 if (heap()->ShouldZapGarbage()) {
5974 collector_->MakeIterable(static_cast<Page*>(chunk), ZAP_FREE_SPACE);
5975 } else if (heap()->incremental_marking()->IsMarking()) {
5976 // When incremental marking is on, we need to clear the mark bits of
5977 // the full collector. We cannot yet discard the young generation mark
5978 // bits as they are still relevant for pointers updating.
5979 collector_->MakeIterable(static_cast<Page*>(chunk), IGNORE_FREE_SPACE);
5980 }
5981 break;
5982 case kObjectsOldToOld:
5983 UNREACHABLE();
5984 }
5985 }
5986
5987 } // namespace
5988
EvacuatePagesInParallel()5989 void MinorMarkCompactCollector::EvacuatePagesInParallel() {
5990 std::vector<std::pair<ParallelWorkItem, MemoryChunk*>> evacuation_items;
5991 intptr_t live_bytes = 0;
5992
5993 for (Page* page : new_space_evacuation_pages_) {
5994 intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
5995 if (live_bytes_on_page == 0) continue;
5996 live_bytes += live_bytes_on_page;
5997 if (FLAG_minor_mc_sweeping ||
5998 ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
5999 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
6000 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
6001 } else {
6002 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
6003 }
6004 }
6005 evacuation_items.emplace_back(ParallelWorkItem{}, page);
6006 }
6007
6008 // Promote young generation large objects.
6009 for (auto it = heap()->new_lo_space()->begin();
6010 it != heap()->new_lo_space()->end();) {
6011 LargePage* current = *it;
6012 it++;
6013 HeapObject object = current->GetObject();
6014 DCHECK(!non_atomic_marking_state_.IsBlack(object));
6015 if (non_atomic_marking_state_.IsGrey(object)) {
6016 heap_->lo_space()->PromoteNewLargeObject(current);
6017 current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
6018 promoted_large_pages_.push_back(current);
6019 evacuation_items.emplace_back(ParallelWorkItem{}, current);
6020 }
6021 }
6022 if (evacuation_items.empty()) return;
6023
6024 YoungGenerationMigrationObserver observer(heap(),
6025 heap()->mark_compact_collector());
6026 const auto pages_count = evacuation_items.size();
6027 const auto wanted_num_tasks =
6028 CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
6029 this, std::move(evacuation_items), &observer);
6030
6031 if (FLAG_trace_evacuation) {
6032 TraceEvacuation(isolate(), pages_count, wanted_num_tasks, live_bytes, 0);
6033 }
6034 }
6035
6036 } // namespace internal
6037 } // namespace v8
6038