1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/incremental-marking.h"
6
7 #include "src/codegen/compilation-cache.h"
8 #include "src/execution/vm-state-inl.h"
9 #include "src/handles/global-handles.h"
10 #include "src/heap/concurrent-marking.h"
11 #include "src/heap/embedder-tracing.h"
12 #include "src/heap/gc-idle-time-handler.h"
13 #include "src/heap/gc-tracer-inl.h"
14 #include "src/heap/gc-tracer.h"
15 #include "src/heap/heap-inl.h"
16 #include "src/heap/heap.h"
17 #include "src/heap/incremental-marking-inl.h"
18 #include "src/heap/mark-compact-inl.h"
19 #include "src/heap/mark-compact.h"
20 #include "src/heap/marking-barrier.h"
21 #include "src/heap/marking-visitor-inl.h"
22 #include "src/heap/marking-visitor.h"
23 #include "src/heap/memory-chunk.h"
24 #include "src/heap/object-stats.h"
25 #include "src/heap/objects-visiting-inl.h"
26 #include "src/heap/objects-visiting.h"
27 #include "src/heap/safepoint.h"
28 #include "src/init/v8.h"
29 #include "src/logging/runtime-call-stats-scope.h"
30 #include "src/numbers/conversions.h"
31 #include "src/objects/data-handler-inl.h"
32 #include "src/objects/embedder-data-array-inl.h"
33 #include "src/objects/hash-table-inl.h"
34 #include "src/objects/slots-inl.h"
35 #include "src/objects/transitions-inl.h"
36 #include "src/objects/visitors.h"
37 #include "src/tracing/trace-event.h"
38 #include "src/utils/utils.h"
39
40 namespace v8 {
41 namespace internal {
42
Step(int bytes_allocated,Address addr,size_t size)43 void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
44 size_t size) {
45 Heap* heap = incremental_marking_->heap();
46 VMState<GC> state(heap->isolate());
47 RCS_SCOPE(heap->isolate(),
48 RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
49 incremental_marking_->AdvanceOnAllocation();
50 // AdvanceIncrementalMarkingOnAllocation can start incremental marking.
51 incremental_marking_->EnsureBlackAllocated(addr, size);
52 }
53
IncrementalMarking(Heap * heap,WeakObjects * weak_objects)54 IncrementalMarking::IncrementalMarking(Heap* heap, WeakObjects* weak_objects)
55 : heap_(heap),
56 collector_(heap->mark_compact_collector()),
57 weak_objects_(weak_objects),
58 new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
59 old_generation_observer_(this, kOldGenerationAllocatedThreshold),
60 marking_state_(heap->isolate()),
61 atomic_marking_state_(heap->isolate()),
62 non_atomic_marking_state_(heap->isolate()) {
63 SetState(STOPPED);
64 }
65
MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj)66 void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
67 HeapObject obj) {
68 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
69 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
70 marking_state()->WhiteToGrey(obj);
71 collector_->VisitObject(obj);
72 }
73
MarkBlackBackground(HeapObject obj,int object_size)74 void IncrementalMarking::MarkBlackBackground(HeapObject obj, int object_size) {
75 MarkBit mark_bit = atomic_marking_state()->MarkBitFrom(obj);
76 Marking::MarkBlack<AccessMode::ATOMIC>(mark_bit);
77 MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
78 IncrementLiveBytesBackground(chunk, static_cast<intptr_t>(object_size));
79 }
80
NotifyLeftTrimming(HeapObject from,HeapObject to)81 void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
82 DCHECK(IsMarking());
83 DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
84 DCHECK_EQ(MemoryChunk::FromHeapObject(from), MemoryChunk::FromHeapObject(to));
85 DCHECK_NE(from, to);
86
87 MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
88
89 if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
90 // Nothing to do if the object is in black area.
91 return;
92 }
93 MarkBlackAndVisitObjectDueToLayoutChange(from);
94 DCHECK(marking_state()->IsBlack(from));
95 // Mark the new address as black.
96 if (from.address() + kTaggedSize == to.address()) {
97 // The old and the new markbits overlap. The |to| object has the
98 // grey color. To make it black, we need to set the second bit.
99 DCHECK(new_mark_bit.Get<kAtomicity>());
100 new_mark_bit.Next().Set<kAtomicity>();
101 } else {
102 bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
103 DCHECK(success);
104 USE(success);
105 }
106 DCHECK(marking_state()->IsBlack(to));
107 }
108
WasActivated()109 bool IncrementalMarking::WasActivated() { return was_activated_; }
110
CanBeActivated()111 bool IncrementalMarking::CanBeActivated() {
112 // Only start incremental marking in a safe state:
113 // 1) when incremental marking is turned on
114 // 2) when we are currently not in a GC, and
115 // 3) when we are currently not serializing or deserializing the heap, and
116 // 4) not a shared heap.
117 return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
118 heap_->deserialization_complete() &&
119 !heap_->isolate()->serializer_enabled() && !heap_->IsShared();
120 }
121
IsBelowActivationThresholds() const122 bool IncrementalMarking::IsBelowActivationThresholds() const {
123 return heap_->OldGenerationSizeOfObjects() <= kV8ActivationThreshold &&
124 heap_->EmbedderSizeOfObjects() <= kEmbedderActivationThreshold;
125 }
126
Start(GarbageCollectionReason gc_reason)127 void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
128 DCHECK(!collector_->sweeping_in_progress());
129 DCHECK(!heap_->IsShared());
130
131 if (FLAG_trace_incremental_marking) {
132 const size_t old_generation_size_mb =
133 heap()->OldGenerationSizeOfObjects() / MB;
134 const size_t old_generation_limit_mb =
135 heap()->old_generation_allocation_limit() / MB;
136 const size_t global_size_mb = heap()->GlobalSizeOfObjects() / MB;
137 const size_t global_limit_mb = heap()->global_allocation_limit() / MB;
138 heap()->isolate()->PrintWithTimestamp(
139 "[IncrementalMarking] Start (%s): (size/limit/slack) v8: %zuMB / %zuMB "
140 "/ %zuMB global: %zuMB / %zuMB / %zuMB\n",
141 Heap::GarbageCollectionReasonToString(gc_reason),
142 old_generation_size_mb, old_generation_limit_mb,
143 old_generation_size_mb > old_generation_limit_mb
144 ? 0
145 : old_generation_limit_mb - old_generation_size_mb,
146 global_size_mb, global_limit_mb,
147 global_size_mb > global_limit_mb ? 0
148 : global_limit_mb - global_size_mb);
149 }
150 DCHECK(FLAG_incremental_marking);
151 DCHECK(state_ == STOPPED);
152 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
153 DCHECK(!heap_->isolate()->serializer_enabled());
154
155 Counters* counters = heap_->isolate()->counters();
156
157 counters->incremental_marking_reason()->AddSample(
158 static_cast<int>(gc_reason));
159 NestedTimedHistogramScope incremental_marking_scope(
160 counters->gc_incremental_marking_start());
161 TRACE_EVENT1(
162 "v8", "V8.GCIncrementalMarkingStart", "epoch",
163 heap_->tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL_START));
164 TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START,
165 ThreadKind::kMain);
166 heap_->tracer()->NotifyIncrementalMarkingStart();
167
168 start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
169 time_to_force_completion_ = 0.0;
170 initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
171 old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
172 bytes_marked_ = 0;
173 scheduled_bytes_to_mark_ = 0;
174 schedule_update_time_ms_ = start_time_ms_;
175 bytes_marked_concurrently_ = 0;
176 was_activated_ = true;
177
178 StartMarking();
179
180 heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
181 &new_generation_observer_);
182 incremental_marking_job()->Start(heap_);
183 }
184
185 class IncrementalMarkingRootMarkingVisitor final : public RootVisitor {
186 public:
IncrementalMarkingRootMarkingVisitor(Heap * heap)187 explicit IncrementalMarkingRootMarkingVisitor(Heap* heap)
188 : heap_(heap), incremental_marking_(heap->incremental_marking()) {}
189
VisitRootPointer(Root root,const char * description,FullObjectSlot p)190 void VisitRootPointer(Root root, const char* description,
191 FullObjectSlot p) override {
192 DCHECK(!MapWord::IsPacked((*p).ptr()));
193 MarkObjectByPointer(root, p);
194 }
195
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)196 void VisitRootPointers(Root root, const char* description,
197 FullObjectSlot start, FullObjectSlot end) override {
198 for (FullObjectSlot p = start; p < end; ++p) {
199 DCHECK(!MapWord::IsPacked((*p).ptr()));
200 MarkObjectByPointer(root, p);
201 }
202 }
203
204 private:
MarkObjectByPointer(Root root,FullObjectSlot p)205 void MarkObjectByPointer(Root root, FullObjectSlot p) {
206 Object object = *p;
207 if (!object.IsHeapObject()) return;
208 DCHECK(!MapWord::IsPacked(object.ptr()));
209 HeapObject heap_object = HeapObject::cast(object);
210
211 if (heap_object.InSharedHeap()) return;
212
213 if (incremental_marking_->WhiteToGreyAndPush(heap_object)) {
214 if (V8_UNLIKELY(FLAG_track_retaining_path)) {
215 heap_->AddRetainingRoot(root, heap_object);
216 }
217 }
218 }
219
220 Heap* const heap_;
221 IncrementalMarking* const incremental_marking_;
222 };
223
224 namespace {
225
MarkRoots(Heap * heap)226 void MarkRoots(Heap* heap) {
227 IncrementalMarkingRootMarkingVisitor visitor(heap);
228 heap->IterateRoots(
229 &visitor,
230 base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kMainThreadHandles,
231 SkipRoot::kWeak});
232 }
233
234 } // namespace
235
MarkRootsForTesting()236 void IncrementalMarking::MarkRootsForTesting() { MarkRoots(heap_); }
237
StartMarking()238 void IncrementalMarking::StartMarking() {
239 if (heap_->isolate()->serializer_enabled()) {
240 // Black allocation currently starts when we start incremental marking,
241 // but we cannot enable black allocation while deserializing. Hence, we
242 // have to delay the start of incremental marking in that case.
243 if (FLAG_trace_incremental_marking) {
244 heap()->isolate()->PrintWithTimestamp(
245 "[IncrementalMarking] Start delayed - serializer\n");
246 }
247 return;
248 }
249 if (FLAG_trace_incremental_marking) {
250 heap()->isolate()->PrintWithTimestamp(
251 "[IncrementalMarking] Start marking\n");
252 }
253
254 heap_->InvokeIncrementalMarkingPrologueCallbacks();
255
256 is_compacting_ = collector_->StartCompaction(
257 MarkCompactCollector::StartCompactionMode::kIncremental);
258
259 auto embedder_flags = heap_->flags_for_embedder_tracer();
260 {
261 TRACE_GC(heap()->tracer(),
262 GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
263 // PrepareForTrace should be called before visitor initialization in
264 // StartMarking. It is only used with CppHeap.
265 heap_->local_embedder_heap_tracer()->PrepareForTrace(embedder_flags);
266 }
267
268 collector_->StartMarking();
269
270 SetState(MARKING);
271
272 MarkingBarrier::ActivateAll(heap(), is_compacting_);
273 GlobalHandles::EnableMarkingBarrier(heap()->isolate());
274
275 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
276
277 StartBlackAllocation();
278
279 MarkRoots(heap_);
280
281 if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
282 heap_->concurrent_marking()->ScheduleJob();
283 }
284
285 // Ready to start incremental marking.
286 if (FLAG_trace_incremental_marking) {
287 heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
288 }
289
290 {
291 // TracePrologue may call back into V8 in corner cases, requiring that
292 // marking (including write barriers) is fully set up.
293 TRACE_GC(heap()->tracer(),
294 GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
295 heap_->local_embedder_heap_tracer()->TracePrologue(embedder_flags);
296 }
297
298 heap_->InvokeIncrementalMarkingEpilogueCallbacks();
299 }
300
StartBlackAllocation()301 void IncrementalMarking::StartBlackAllocation() {
302 DCHECK(!black_allocation_);
303 DCHECK(IsMarking());
304 black_allocation_ = true;
305 heap()->old_space()->MarkLinearAllocationAreaBlack();
306 if (heap()->map_space()) heap()->map_space()->MarkLinearAllocationAreaBlack();
307 heap()->code_space()->MarkLinearAllocationAreaBlack();
308 heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
309 local_heap->MarkLinearAllocationAreaBlack();
310 });
311 if (FLAG_trace_incremental_marking) {
312 heap()->isolate()->PrintWithTimestamp(
313 "[IncrementalMarking] Black allocation started\n");
314 }
315 }
316
PauseBlackAllocation()317 void IncrementalMarking::PauseBlackAllocation() {
318 DCHECK(IsMarking());
319 heap()->old_space()->UnmarkLinearAllocationArea();
320 if (heap()->map_space()) heap()->map_space()->UnmarkLinearAllocationArea();
321 heap()->code_space()->UnmarkLinearAllocationArea();
322 heap()->safepoint()->IterateLocalHeaps(
323 [](LocalHeap* local_heap) { local_heap->UnmarkLinearAllocationArea(); });
324 if (FLAG_trace_incremental_marking) {
325 heap()->isolate()->PrintWithTimestamp(
326 "[IncrementalMarking] Black allocation paused\n");
327 }
328 black_allocation_ = false;
329 }
330
FinishBlackAllocation()331 void IncrementalMarking::FinishBlackAllocation() {
332 if (black_allocation_) {
333 black_allocation_ = false;
334 if (FLAG_trace_incremental_marking) {
335 heap()->isolate()->PrintWithTimestamp(
336 "[IncrementalMarking] Black allocation finished\n");
337 }
338 }
339 }
340
EnsureBlackAllocated(Address allocated,size_t size)341 void IncrementalMarking::EnsureBlackAllocated(Address allocated, size_t size) {
342 if (black_allocation() && allocated != kNullAddress) {
343 HeapObject object = HeapObject::FromAddress(allocated);
344 if (marking_state()->IsWhite(object) && !Heap::InYoungGeneration(object)) {
345 if (heap_->IsLargeObject(object)) {
346 marking_state()->WhiteToBlack(object);
347 } else {
348 Page::FromAddress(allocated)->CreateBlackArea(allocated,
349 allocated + size);
350 }
351 }
352 }
353 }
354
ShouldRetainMap(Map map,int age)355 bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
356 if (age == 0) {
357 // The map has aged. Do not retain this map.
358 return false;
359 }
360 Object constructor = map.GetConstructor();
361 if (!constructor.IsHeapObject() ||
362 marking_state()->IsWhite(HeapObject::cast(constructor))) {
363 // The constructor is dead, no new objects with this map can
364 // be created. Do not retain this map.
365 return false;
366 }
367 return true;
368 }
369
RetainMaps()370 void IncrementalMarking::RetainMaps() {
371 // Do not retain dead maps if flag disables it or there is
372 // - memory pressure (reduce_memory_footprint_),
373 // - GC is requested by tests or dev-tools (abort_incremental_marking_).
374 bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
375 FLAG_retain_maps_for_n_gc == 0;
376 std::vector<WeakArrayList> retained_maps_list = heap()->FindAllRetainedMaps();
377
378 for (WeakArrayList retained_maps : retained_maps_list) {
379 int length = retained_maps.length();
380
381 for (int i = 0; i < length; i += 2) {
382 MaybeObject value = retained_maps.Get(i);
383 HeapObject map_heap_object;
384 if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
385 continue;
386 }
387 int age = retained_maps.Get(i + 1).ToSmi().value();
388 int new_age;
389 Map map = Map::cast(map_heap_object);
390 if (!map_retaining_is_disabled && marking_state()->IsWhite(map)) {
391 if (ShouldRetainMap(map, age)) {
392 WhiteToGreyAndPush(map);
393 if (V8_UNLIKELY(FLAG_track_retaining_path)) {
394 heap_->AddRetainingRoot(Root::kRetainMaps, map);
395 }
396 }
397 Object prototype = map.prototype();
398 if (age > 0 && prototype.IsHeapObject() &&
399 marking_state()->IsWhite(HeapObject::cast(prototype))) {
400 // The prototype is not marked, age the map.
401 new_age = age - 1;
402 } else {
403 // The prototype and the constructor are marked, this map keeps only
404 // transition tree alive, not JSObjects. Do not age the map.
405 new_age = age;
406 }
407 } else {
408 new_age = FLAG_retain_maps_for_n_gc;
409 }
410 // Compact the array and update the age.
411 if (new_age != age) {
412 retained_maps.Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
413 }
414 }
415 }
416 }
417
FinalizeIncrementally()418 void IncrementalMarking::FinalizeIncrementally() {
419 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
420 DCHECK(!finalize_marking_completed_);
421 DCHECK(IsMarking());
422
423 double start = heap_->MonotonicallyIncreasingTimeInMs();
424
425 // Map retaining is needed for performance, not correctness,
426 // so we can do it only once at the beginning of the finalization.
427 RetainMaps();
428
429 finalize_marking_completed_ = true;
430
431 if (FLAG_trace_incremental_marking) {
432 double end = heap_->MonotonicallyIncreasingTimeInMs();
433 double delta = end - start;
434 heap()->isolate()->PrintWithTimestamp(
435 "[IncrementalMarking] Finalize incrementally spent %.1f ms.\n", delta);
436 }
437 }
438
UpdateMarkingWorklistAfterYoungGenGC()439 void IncrementalMarking::UpdateMarkingWorklistAfterYoungGenGC() {
440 if (!IsMarking()) return;
441
442 Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
443
444 MinorMarkCompactCollector::MarkingState* minor_marking_state =
445 heap()->minor_mark_compact_collector()->marking_state();
446
447 collector_->local_marking_worklists()->Publish();
448 MarkingBarrier::PublishAll(heap());
449 PtrComprCageBase cage_base(heap_->isolate());
450 collector_->marking_worklists()->Update([
451 #ifdef DEBUG
452 // this is referred inside DCHECK.
453 this,
454 #endif
455 minor_marking_state, cage_base,
456 filler_map](
457 HeapObject obj,
458 HeapObject* out) -> bool {
459 DCHECK(obj.IsHeapObject());
460 // Only pointers to from space have to be updated.
461 if (Heap::InFromPage(obj)) {
462 DCHECK_IMPLIES(FLAG_minor_mc_sweeping, minor_marking_state->IsWhite(obj));
463 MapWord map_word = obj.map_word(cage_base, kRelaxedLoad);
464 DCHECK_IMPLIES(FLAG_minor_mc_sweeping, !map_word.IsForwardingAddress());
465 if (!map_word.IsForwardingAddress()) {
466 // There may be objects on the marking deque that do not exist
467 // anymore, e.g. left trimmed objects or objects from the root set
468 // (frames). If these object are dead at scavenging time, their
469 // marking deque entries will not point to forwarding addresses.
470 // Hence, we can discard them.
471 return false;
472 }
473 HeapObject dest = map_word.ToForwardingAddress();
474 DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj.IsFreeSpaceOrFiller());
475 if (dest.InSharedHeap()) {
476 // Object got promoted into the shared heap. Drop it from the client
477 // heap marking worklist.
478 return false;
479 }
480 *out = dest;
481 return true;
482 } else if (Heap::InToPage(obj)) {
483 // The object may be on a large page or on a page that was moved in
484 // new space.
485 DCHECK(Heap::IsLargeObject(obj) || Page::FromHeapObject(obj)->IsFlagSet(
486 Page::PAGE_NEW_NEW_PROMOTION));
487 if (minor_marking_state->IsWhite(obj)) {
488 return false;
489 }
490 // Either a large object or an object marked by the minor
491 // mark-compactor.
492 *out = obj;
493 return true;
494 } else {
495 // The object may be on a page that was moved from new to old space.
496 // Only applicable during minor MC garbage collections.
497 if (!Heap::IsLargeObject(obj) &&
498 Page::FromHeapObject(obj)->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
499 if (minor_marking_state->IsWhite(obj)) {
500 return false;
501 }
502 *out = obj;
503 return true;
504 }
505 DCHECK_IMPLIES(marking_state()->IsWhite(obj),
506 obj.IsFreeSpaceOrFiller(cage_base));
507 // Skip one word filler objects that appear on the
508 // stack when we perform in place array shift.
509 if (obj.map(cage_base) != filler_map) {
510 *out = obj;
511 return true;
512 }
513 return false;
514 }
515 });
516
517 collector_->local_weak_objects()->Publish();
518 weak_objects_->UpdateAfterScavenge();
519 }
520
UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space)521 void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
522 size_t dead_bytes_in_new_space) {
523 if (!IsMarking()) return;
524 bytes_marked_ -= std::min(bytes_marked_, dead_bytes_in_new_space);
525 }
526
ProcessBlackAllocatedObject(HeapObject obj)527 void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject obj) {
528 if (IsMarking() && marking_state()->IsBlack(obj)) {
529 collector_->RevisitObject(obj);
530 }
531 }
532
EmbedderStep(double expected_duration_ms,double * duration_ms)533 StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
534 double* duration_ms) {
535 if (!ShouldDoEmbedderStep()) {
536 *duration_ms = 0.0;
537 return StepResult::kNoImmediateWork;
538 }
539
540 constexpr size_t kObjectsToProcessBeforeDeadlineCheck = 500;
541
542 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
543 LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer();
544 const double start = heap_->MonotonicallyIncreasingTimeInMs();
545 const double deadline = start + expected_duration_ms;
546 bool empty_worklist = true;
547 if (local_marking_worklists()->PublishWrapper()) {
548 DCHECK(local_marking_worklists()->IsWrapperEmpty());
549 } else {
550 // Cannot directly publish wrapper objects.
551 LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
552 HeapObject object;
553 size_t cnt = 0;
554 while (local_marking_worklists()->PopWrapper(&object)) {
555 scope.TracePossibleWrapper(JSObject::cast(object));
556 if (++cnt == kObjectsToProcessBeforeDeadlineCheck) {
557 if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) {
558 empty_worklist = false;
559 break;
560 }
561 cnt = 0;
562 }
563 }
564 }
565 // |deadline - heap_->MonotonicallyIncreasingTimeInMs()| could be negative,
566 // which means |local_tracer| won't do any actual tracing, so there is no
567 // need to check for |deadline <= heap_->MonotonicallyIncreasingTimeInMs()|.
568 bool remote_tracing_done =
569 local_tracer->Trace(deadline - heap_->MonotonicallyIncreasingTimeInMs());
570 double current = heap_->MonotonicallyIncreasingTimeInMs();
571 local_tracer->SetEmbedderWorklistEmpty(empty_worklist);
572 *duration_ms = current - start;
573 return (empty_worklist && remote_tracing_done)
574 ? StepResult::kNoImmediateWork
575 : StepResult::kMoreWorkRemaining;
576 }
577
Stop()578 bool IncrementalMarking::Stop() {
579 if (IsStopped()) return false;
580
581 if (FLAG_trace_incremental_marking) {
582 int old_generation_size_mb =
583 static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
584 int old_generation_limit_mb =
585 static_cast<int>(heap()->old_generation_allocation_limit() / MB);
586 heap()->isolate()->PrintWithTimestamp(
587 "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
588 "overshoot %dMB\n",
589 old_generation_size_mb, old_generation_limit_mb,
590 std::max(0, old_generation_size_mb - old_generation_limit_mb));
591 }
592
593 for (SpaceIterator it(heap_); it.HasNext();) {
594 Space* space = it.Next();
595 if (space == heap_->new_space()) {
596 space->RemoveAllocationObserver(&new_generation_observer_);
597 } else {
598 space->RemoveAllocationObserver(&old_generation_observer_);
599 }
600 }
601
602 heap_->isolate()->stack_guard()->ClearGC();
603 SetState(STOPPED);
604 is_compacting_ = false;
605 FinishBlackAllocation();
606
607 // Merge live bytes counters of background threads
608 for (const auto& pair : background_live_bytes_) {
609 MemoryChunk* memory_chunk = pair.first;
610 intptr_t live_bytes = pair.second;
611 if (live_bytes) {
612 marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
613 }
614 }
615 background_live_bytes_.clear();
616
617 return true;
618 }
619
FinalizeMarking(CompletionAction action)620 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
621 DCHECK(!finalize_marking_completed_);
622 if (FLAG_trace_incremental_marking) {
623 heap()->isolate()->PrintWithTimestamp(
624 "[IncrementalMarking] requesting finalization of incremental "
625 "marking.\n");
626 }
627 request_type_ = GCRequestType::FINALIZATION;
628 if (action == GC_VIA_STACK_GUARD) {
629 heap_->isolate()->stack_guard()->RequestGC();
630 }
631 }
632
CurrentTimeToMarkingTask() const633 double IncrementalMarking::CurrentTimeToMarkingTask() const {
634 const double recorded_time_to_marking_task =
635 heap_->tracer()->AverageTimeToIncrementalMarkingTask();
636 const double current_time_to_marking_task =
637 incremental_marking_job_.CurrentTimeToTask(heap_);
638 if (recorded_time_to_marking_task == 0.0) return 0.0;
639 return std::max(recorded_time_to_marking_task, current_time_to_marking_task);
640 }
641
MarkingComplete(CompletionAction action)642 void IncrementalMarking::MarkingComplete(CompletionAction action) {
643 // Allowed overshoot percantage of incremental marking walltime.
644 constexpr double kAllowedOvershoot = 0.1;
645 // Minimum overshoot in ms. This is used to allow moving away from stack when
646 // marking was fast.
647 constexpr double kMinOvershootMs = 50;
648
649 if (action == GC_VIA_STACK_GUARD) {
650 if (time_to_force_completion_ == 0.0) {
651 const double now = heap_->MonotonicallyIncreasingTimeInMs();
652 const double overshoot_ms =
653 std::max(kMinOvershootMs, (now - start_time_ms_) * kAllowedOvershoot);
654 const double time_to_marking_task = CurrentTimeToMarkingTask();
655 if (time_to_marking_task == 0.0 || time_to_marking_task > overshoot_ms) {
656 if (FLAG_trace_incremental_marking) {
657 heap()->isolate()->PrintWithTimestamp(
658 "[IncrementalMarking] Not delaying marking completion. time to "
659 "task: %fms allowed overshoot: %fms\n",
660 time_to_marking_task, overshoot_ms);
661 }
662 } else {
663 time_to_force_completion_ = now + overshoot_ms;
664 if (FLAG_trace_incremental_marking) {
665 heap()->isolate()->PrintWithTimestamp(
666 "[IncrementalMarking] Delaying GC via stack guard. time to task: "
667 "%fms "
668 "allowed overshoot: %fms\n",
669 time_to_marking_task, overshoot_ms);
670 }
671 incremental_marking_job_.ScheduleTask(
672 heap(), IncrementalMarkingJob::TaskType::kNormal);
673 return;
674 }
675 }
676 if (heap()->MonotonicallyIncreasingTimeInMs() < time_to_force_completion_) {
677 if (FLAG_trace_incremental_marking) {
678 heap()->isolate()->PrintWithTimestamp(
679 "[IncrementalMarking] Delaying GC via stack guard. time left: "
680 "%fms\n",
681 time_to_force_completion_ -
682 heap_->MonotonicallyIncreasingTimeInMs());
683 }
684 return;
685 }
686 }
687
688 SetState(COMPLETE);
689 // We will set the stack guard to request a GC now. This will mean the rest
690 // of the GC gets performed as soon as possible (we can't do a GC here in a
691 // record-write context). If a few things get allocated between now and then
692 // that shouldn't make us do a scavenge and keep being incremental.
693 if (FLAG_trace_incremental_marking) {
694 heap()->isolate()->PrintWithTimestamp(
695 "[IncrementalMarking] Complete (normal).\n");
696 }
697 request_type_ = GCRequestType::COMPLETE_MARKING;
698 if (action == GC_VIA_STACK_GUARD) {
699 heap_->isolate()->stack_guard()->RequestGC();
700 }
701 }
702
Epilogue()703 void IncrementalMarking::Epilogue() {
704 was_activated_ = false;
705 finalize_marking_completed_ = false;
706 }
707
ShouldDoEmbedderStep()708 bool IncrementalMarking::ShouldDoEmbedderStep() {
709 return state_ == MARKING && FLAG_incremental_marking_wrappers &&
710 heap_->local_embedder_heap_tracer()->InUse();
711 }
712
FastForwardSchedule()713 void IncrementalMarking::FastForwardSchedule() {
714 if (scheduled_bytes_to_mark_ < bytes_marked_) {
715 scheduled_bytes_to_mark_ = bytes_marked_;
716 if (FLAG_trace_incremental_marking) {
717 heap_->isolate()->PrintWithTimestamp(
718 "[IncrementalMarking] Fast-forwarded schedule\n");
719 }
720 }
721 }
722
FastForwardScheduleIfCloseToFinalization()723 void IncrementalMarking::FastForwardScheduleIfCloseToFinalization() {
724 // Consider marking close to finalization if 75% of the initial old
725 // generation was marked.
726 if (bytes_marked_ > 3 * (initial_old_generation_size_ / 4)) {
727 FastForwardSchedule();
728 }
729 }
730
ScheduleBytesToMarkBasedOnTime(double time_ms)731 void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
732 // Time interval that should be sufficient to complete incremental marking.
733 constexpr double kTargetMarkingWallTimeInMs = 500;
734 constexpr double kMinTimeBetweenScheduleInMs = 10;
735 if (schedule_update_time_ms_ + kMinTimeBetweenScheduleInMs > time_ms) return;
736 double delta_ms =
737 std::min(time_ms - schedule_update_time_ms_, kTargetMarkingWallTimeInMs);
738 schedule_update_time_ms_ = time_ms;
739
740 size_t bytes_to_mark =
741 (delta_ms / kTargetMarkingWallTimeInMs) * initial_old_generation_size_;
742 AddScheduledBytesToMark(bytes_to_mark);
743
744 if (FLAG_trace_incremental_marking) {
745 heap_->isolate()->PrintWithTimestamp(
746 "[IncrementalMarking] Scheduled %zuKB to mark based on time delta "
747 "%.1fms\n",
748 bytes_to_mark / KB, delta_ms);
749 }
750 }
751
752 namespace {
CombineStepResults(StepResult a,StepResult b)753 StepResult CombineStepResults(StepResult a, StepResult b) {
754 DCHECK_NE(StepResult::kWaitingForFinalization, a);
755 DCHECK_NE(StepResult::kWaitingForFinalization, b);
756 if (a == StepResult::kMoreWorkRemaining ||
757 b == StepResult::kMoreWorkRemaining)
758 return StepResult::kMoreWorkRemaining;
759 return StepResult::kNoImmediateWork;
760 }
761 } // anonymous namespace
762
AdvanceWithDeadline(double deadline_in_ms,CompletionAction completion_action,StepOrigin step_origin)763 StepResult IncrementalMarking::AdvanceWithDeadline(
764 double deadline_in_ms, CompletionAction completion_action,
765 StepOrigin step_origin) {
766 NestedTimedHistogramScope incremental_marking_scope(
767 heap_->isolate()->counters()->gc_incremental_marking());
768 TRACE_EVENT1("v8", "V8.GCIncrementalMarking", "epoch",
769 heap_->tracer()->CurrentEpoch(GCTracer::Scope::MC_INCREMENTAL));
770 TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
771 ThreadKind::kMain);
772 DCHECK(!IsStopped());
773
774 ScheduleBytesToMarkBasedOnTime(heap()->MonotonicallyIncreasingTimeInMs());
775 FastForwardScheduleIfCloseToFinalization();
776 return Step(kStepSizeInMs, completion_action, step_origin);
777 }
778
StepSizeToKeepUpWithAllocations()779 size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
780 // Update bytes_allocated_ based on the allocation counter.
781 size_t current_counter = heap_->OldGenerationAllocationCounter();
782 size_t result = current_counter - old_generation_allocation_counter_;
783 old_generation_allocation_counter_ = current_counter;
784 return result;
785 }
786
StepSizeToMakeProgress()787 size_t IncrementalMarking::StepSizeToMakeProgress() {
788 const size_t kTargetStepCount = 256;
789 const size_t kTargetStepCountAtOOM = 32;
790 const size_t kMaxStepSizeInByte = 256 * KB;
791 size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
792
793 if (!heap()->CanExpandOldGeneration(oom_slack)) {
794 return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
795 }
796
797 return std::min(std::max({initial_old_generation_size_ / kTargetStepCount,
798 IncrementalMarking::kMinStepSizeInBytes}),
799 kMaxStepSizeInByte);
800 }
801
AddScheduledBytesToMark(size_t bytes_to_mark)802 void IncrementalMarking::AddScheduledBytesToMark(size_t bytes_to_mark) {
803 if (scheduled_bytes_to_mark_ + bytes_to_mark < scheduled_bytes_to_mark_) {
804 // The overflow case.
805 scheduled_bytes_to_mark_ = std::numeric_limits<std::size_t>::max();
806 } else {
807 scheduled_bytes_to_mark_ += bytes_to_mark;
808 }
809 }
810
ScheduleBytesToMarkBasedOnAllocation()811 void IncrementalMarking::ScheduleBytesToMarkBasedOnAllocation() {
812 size_t progress_bytes = StepSizeToMakeProgress();
813 size_t allocation_bytes = StepSizeToKeepUpWithAllocations();
814 size_t bytes_to_mark = progress_bytes + allocation_bytes;
815 AddScheduledBytesToMark(bytes_to_mark);
816
817 if (FLAG_trace_incremental_marking) {
818 heap_->isolate()->PrintWithTimestamp(
819 "[IncrementalMarking] Scheduled %zuKB to mark based on allocation "
820 "(progress=%zuKB, allocation=%zuKB)\n",
821 bytes_to_mark / KB, progress_bytes / KB, allocation_bytes / KB);
822 }
823 }
824
FetchBytesMarkedConcurrently()825 void IncrementalMarking::FetchBytesMarkedConcurrently() {
826 if (FLAG_concurrent_marking) {
827 size_t current_bytes_marked_concurrently =
828 heap()->concurrent_marking()->TotalMarkedBytes();
829 // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
830 // short period of time when a concurrent marking task is finishing.
831 if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
832 bytes_marked_ +=
833 current_bytes_marked_concurrently - bytes_marked_concurrently_;
834 bytes_marked_concurrently_ = current_bytes_marked_concurrently;
835 }
836 if (FLAG_trace_incremental_marking) {
837 heap_->isolate()->PrintWithTimestamp(
838 "[IncrementalMarking] Marked %zuKB on background threads\n",
839 heap_->concurrent_marking()->TotalMarkedBytes() / KB);
840 }
841 }
842 }
843
ComputeStepSizeInBytes(StepOrigin step_origin)844 size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
845 FetchBytesMarkedConcurrently();
846 if (FLAG_trace_incremental_marking) {
847 if (scheduled_bytes_to_mark_ > bytes_marked_) {
848 heap_->isolate()->PrintWithTimestamp(
849 "[IncrementalMarking] Marker is %zuKB behind schedule\n",
850 (scheduled_bytes_to_mark_ - bytes_marked_) / KB);
851 } else {
852 heap_->isolate()->PrintWithTimestamp(
853 "[IncrementalMarking] Marker is %zuKB ahead of schedule\n",
854 (bytes_marked_ - scheduled_bytes_to_mark_) / KB);
855 }
856 }
857 // Allow steps on allocation to get behind the schedule by small ammount.
858 // This gives higher priority to steps in tasks.
859 size_t kScheduleMarginInBytes = step_origin == StepOrigin::kV8 ? 1 * MB : 0;
860 if (bytes_marked_ + kScheduleMarginInBytes > scheduled_bytes_to_mark_)
861 return 0;
862 return scheduled_bytes_to_mark_ - bytes_marked_ - kScheduleMarginInBytes;
863 }
864
AdvanceOnAllocation()865 void IncrementalMarking::AdvanceOnAllocation() {
866 // Code using an AlwaysAllocateScope assumes that the GC state does not
867 // change; that implies that no marking steps must be performed.
868 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
869 state_ != MARKING || heap_->always_allocate()) {
870 return;
871 }
872 NestedTimedHistogramScope incremental_marking_scope(
873 heap_->isolate()->counters()->gc_incremental_marking());
874 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
875 TRACE_GC_EPOCH(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL,
876 ThreadKind::kMain);
877 ScheduleBytesToMarkBasedOnAllocation();
878 Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
879 }
880
Step(double max_step_size_in_ms,CompletionAction action,StepOrigin step_origin)881 StepResult IncrementalMarking::Step(double max_step_size_in_ms,
882 CompletionAction action,
883 StepOrigin step_origin) {
884 double start = heap_->MonotonicallyIncreasingTimeInMs();
885
886 StepResult combined_result = StepResult::kMoreWorkRemaining;
887 size_t bytes_to_process = 0;
888 size_t v8_bytes_processed = 0;
889 double embedder_duration = 0.0;
890 double embedder_deadline = 0.0;
891 if (state_ == MARKING) {
892 if (FLAG_concurrent_marking) {
893 // It is safe to merge back all objects that were on hold to the shared
894 // work list at Step because we are at a safepoint where all objects
895 // are properly initialized.
896 local_marking_worklists()->MergeOnHold();
897 }
898
899 // Only print marking worklist in debug mode to save ~40KB of code size.
900 #ifdef DEBUG
901 if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
902 FLAG_trace_gc_verbose) {
903 collector_->marking_worklists()->Print();
904 }
905 #endif
906 if (FLAG_trace_incremental_marking) {
907 heap_->isolate()->PrintWithTimestamp(
908 "[IncrementalMarking] Marking speed %.fKB/ms\n",
909 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
910 }
911 // The first step after Scavenge will see many allocated bytes.
912 // Cap the step size to distribute the marking work more uniformly.
913 const double marking_speed =
914 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond();
915 size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
916 max_step_size_in_ms, marking_speed);
917 bytes_to_process =
918 std::min(ComputeStepSizeInBytes(step_origin), max_step_size);
919 bytes_to_process = std::max({bytes_to_process, kMinStepSizeInBytes});
920
921 // Perform a single V8 and a single embedder step. In case both have been
922 // observed as empty back to back, we can finalize.
923 //
924 // This ignores that case where the embedder finds new V8-side objects. The
925 // assumption is that large graphs are well connected and can mostly be
926 // processed on their own. For small graphs, helping is not necessary.
927 std::tie(v8_bytes_processed, std::ignore) =
928 collector_->ProcessMarkingWorklist(bytes_to_process);
929 StepResult v8_result = local_marking_worklists()->IsEmpty()
930 ? StepResult::kNoImmediateWork
931 : StepResult::kMoreWorkRemaining;
932 StepResult embedder_result = StepResult::kNoImmediateWork;
933 if (heap_->local_embedder_heap_tracer()->InUse()) {
934 embedder_deadline =
935 std::min(max_step_size_in_ms,
936 static_cast<double>(bytes_to_process) / marking_speed);
937 // TODO(chromium:1056170): Replace embedder_deadline with bytes_to_process
938 // after migrating blink to the cppgc library and after v8 can directly
939 // push objects to Oilpan.
940 embedder_result = EmbedderStep(embedder_deadline, &embedder_duration);
941 }
942 bytes_marked_ += v8_bytes_processed;
943 combined_result = CombineStepResults(v8_result, embedder_result);
944
945 if (combined_result == StepResult::kNoImmediateWork) {
946 if (!finalize_marking_completed_) {
947 FinalizeMarking(action);
948 FastForwardSchedule();
949 combined_result = StepResult::kWaitingForFinalization;
950 incremental_marking_job()->Start(heap_);
951 } else {
952 MarkingComplete(action);
953 combined_result = StepResult::kWaitingForFinalization;
954 }
955 }
956 if (FLAG_concurrent_marking) {
957 local_marking_worklists()->ShareWork();
958 heap_->concurrent_marking()->RescheduleJobIfNeeded();
959 }
960 }
961 if (state_ == MARKING) {
962 // Note that we do not report any marked by in case of finishing sweeping as
963 // we did not process the marking worklist.
964 const double v8_duration =
965 heap_->MonotonicallyIncreasingTimeInMs() - start - embedder_duration;
966 heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
967 }
968 if (FLAG_trace_incremental_marking) {
969 heap_->isolate()->PrintWithTimestamp(
970 "[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms (%fms) "
971 "in %.1f\n",
972 step_origin == StepOrigin::kV8 ? "in v8" : "in task",
973 v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration,
974 embedder_deadline, heap_->MonotonicallyIncreasingTimeInMs() - start);
975 }
976 return combined_result;
977 }
978
979 } // namespace internal
980 } // namespace v8
981