• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/incremental-marking.h"
6 
7 #include "src/codegen/compilation-cache.h"
8 #include "src/execution/vm-state-inl.h"
9 #include "src/heap/array-buffer-sweeper.h"
10 #include "src/heap/concurrent-marking.h"
11 #include "src/heap/embedder-tracing.h"
12 #include "src/heap/gc-idle-time-handler.h"
13 #include "src/heap/gc-tracer.h"
14 #include "src/heap/heap-inl.h"
15 #include "src/heap/incremental-marking-inl.h"
16 #include "src/heap/mark-compact-inl.h"
17 #include "src/heap/marking-barrier.h"
18 #include "src/heap/marking-visitor-inl.h"
19 #include "src/heap/marking-visitor.h"
20 #include "src/heap/memory-chunk.h"
21 #include "src/heap/object-stats.h"
22 #include "src/heap/objects-visiting-inl.h"
23 #include "src/heap/objects-visiting.h"
24 #include "src/heap/safepoint.h"
25 #include "src/heap/sweeper.h"
26 #include "src/init/v8.h"
27 #include "src/numbers/conversions.h"
28 #include "src/objects/data-handler-inl.h"
29 #include "src/objects/embedder-data-array-inl.h"
30 #include "src/objects/hash-table-inl.h"
31 #include "src/objects/slots-inl.h"
32 #include "src/objects/transitions-inl.h"
33 #include "src/objects/visitors.h"
34 #include "src/tracing/trace-event.h"
35 #include "src/utils/utils.h"
36 
37 namespace v8 {
38 namespace internal {
39 
Step(int bytes_allocated,Address addr,size_t size)40 void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
41                                         size_t size) {
42   Heap* heap = incremental_marking_->heap();
43   VMState<GC> state(heap->isolate());
44   RuntimeCallTimerScope runtime_timer(
45       heap->isolate(),
46       RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
47   incremental_marking_->AdvanceOnAllocation();
48   // AdvanceIncrementalMarkingOnAllocation can start incremental marking.
49   incremental_marking_->EnsureBlackAllocated(addr, size);
50 }
51 
IncrementalMarking(Heap * heap,WeakObjects * weak_objects)52 IncrementalMarking::IncrementalMarking(Heap* heap,
53                                        WeakObjects* weak_objects)
54     : heap_(heap),
55       collector_(heap->mark_compact_collector()),
56       weak_objects_(weak_objects),
57       new_generation_observer_(this, kYoungGenerationAllocatedThreshold),
58       old_generation_observer_(this, kOldGenerationAllocatedThreshold) {
59   SetState(STOPPED);
60 }
61 
MarkBlackAndVisitObjectDueToLayoutChange(HeapObject obj)62 void IncrementalMarking::MarkBlackAndVisitObjectDueToLayoutChange(
63     HeapObject obj) {
64   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingLayoutChange");
65   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_LAYOUT_CHANGE);
66   marking_state()->WhiteToGrey(obj);
67   collector_->VisitObject(obj);
68 }
69 
MarkBlackBackground(HeapObject obj,int object_size)70 void IncrementalMarking::MarkBlackBackground(HeapObject obj, int object_size) {
71   MarkBit mark_bit = atomic_marking_state()->MarkBitFrom(obj);
72   Marking::MarkBlack<AccessMode::ATOMIC>(mark_bit);
73   MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
74   IncrementLiveBytesBackground(chunk, static_cast<intptr_t>(object_size));
75 }
76 
NotifyLeftTrimming(HeapObject from,HeapObject to)77 void IncrementalMarking::NotifyLeftTrimming(HeapObject from, HeapObject to) {
78   DCHECK(IsMarking());
79   DCHECK(MemoryChunk::FromHeapObject(from)->SweepingDone());
80   DCHECK_EQ(MemoryChunk::FromHeapObject(from), MemoryChunk::FromHeapObject(to));
81   DCHECK_NE(from, to);
82 
83   MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
84 
85   if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
86     // Nothing to do if the object is in black area.
87     return;
88   }
89   MarkBlackAndVisitObjectDueToLayoutChange(from);
90   DCHECK(marking_state()->IsBlack(from));
91   // Mark the new address as black.
92   if (from.address() + kTaggedSize == to.address()) {
93     // The old and the new markbits overlap. The |to| object has the
94     // grey color. To make it black, we need to set the second bit.
95     DCHECK(new_mark_bit.Get<kAtomicity>());
96     new_mark_bit.Next().Set<kAtomicity>();
97   } else {
98     bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
99     DCHECK(success);
100     USE(success);
101   }
102   DCHECK(marking_state()->IsBlack(to));
103 }
104 
105 class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
106  public:
IncrementalMarkingRootMarkingVisitor(IncrementalMarking * incremental_marking)107   explicit IncrementalMarkingRootMarkingVisitor(
108       IncrementalMarking* incremental_marking)
109       : heap_(incremental_marking->heap()) {}
110 
VisitRootPointer(Root root,const char * description,FullObjectSlot p)111   void VisitRootPointer(Root root, const char* description,
112                         FullObjectSlot p) override {
113     MarkObjectByPointer(p);
114   }
115 
VisitRootPointers(Root root,const char * description,FullObjectSlot start,FullObjectSlot end)116   void VisitRootPointers(Root root, const char* description,
117                          FullObjectSlot start, FullObjectSlot end) override {
118     for (FullObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
119   }
120 
121  private:
MarkObjectByPointer(FullObjectSlot p)122   void MarkObjectByPointer(FullObjectSlot p) {
123     Object obj = *p;
124     if (!obj.IsHeapObject()) return;
125 
126     heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
127   }
128 
129   Heap* heap_;
130 };
131 
132 
WasActivated()133 bool IncrementalMarking::WasActivated() { return was_activated_; }
134 
135 
CanBeActivated()136 bool IncrementalMarking::CanBeActivated() {
137   // Only start incremental marking in a safe state: 1) when incremental
138   // marking is turned on, 2) when we are currently not in a GC, and
139   // 3) when we are currently not serializing or deserializing the heap.
140   return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
141          heap_->deserialization_complete() &&
142          !heap_->isolate()->serializer_enabled();
143 }
144 
IsBelowActivationThresholds() const145 bool IncrementalMarking::IsBelowActivationThresholds() const {
146   return heap_->OldGenerationSizeOfObjects() <= kV8ActivationThreshold &&
147          heap_->GlobalSizeOfObjects() <= kGlobalActivationThreshold;
148 }
149 
Start(GarbageCollectionReason gc_reason)150 void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
151   if (FLAG_trace_incremental_marking) {
152     const size_t old_generation_size_mb =
153         heap()->OldGenerationSizeOfObjects() / MB;
154     const size_t old_generation_limit_mb =
155         heap()->old_generation_allocation_limit() / MB;
156     const size_t global_size_mb = heap()->GlobalSizeOfObjects() / MB;
157     const size_t global_limit_mb = heap()->global_allocation_limit() / MB;
158     heap()->isolate()->PrintWithTimestamp(
159         "[IncrementalMarking] Start (%s): (size/limit/slack) v8: %zuMB / %zuMB "
160         "/ %zuMB global: %zuMB / %zuMB / %zuMB\n",
161         Heap::GarbageCollectionReasonToString(gc_reason),
162         old_generation_size_mb, old_generation_limit_mb,
163         old_generation_size_mb > old_generation_limit_mb
164             ? 0
165             : old_generation_limit_mb - old_generation_size_mb,
166         global_size_mb, global_limit_mb,
167         global_size_mb > global_limit_mb ? 0
168                                          : global_limit_mb - global_size_mb);
169   }
170   DCHECK(FLAG_incremental_marking);
171   DCHECK(state_ == STOPPED);
172   DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
173   DCHECK(!heap_->isolate()->serializer_enabled());
174 
175   Counters* counters = heap_->isolate()->counters();
176 
177   counters->incremental_marking_reason()->AddSample(
178       static_cast<int>(gc_reason));
179   HistogramTimerScope incremental_marking_scope(
180       counters->gc_incremental_marking_start());
181   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
182   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START);
183   heap_->tracer()->NotifyIncrementalMarkingStart();
184 
185   start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
186   time_to_force_completion_ = 0.0;
187   initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
188   old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
189   bytes_marked_ = 0;
190   scheduled_bytes_to_mark_ = 0;
191   schedule_update_time_ms_ = start_time_ms_;
192   bytes_marked_concurrently_ = 0;
193   was_activated_ = true;
194 
195   {
196     TRACE_GC(heap()->tracer(),
197              GCTracer::Scope::MC_INCREMENTAL_SWEEP_ARRAY_BUFFERS);
198     heap_->array_buffer_sweeper()->EnsureFinished();
199   }
200 
201   if (!collector_->sweeping_in_progress()) {
202     StartMarking();
203   } else {
204     if (FLAG_trace_incremental_marking) {
205       heap()->isolate()->PrintWithTimestamp(
206           "[IncrementalMarking] Start sweeping.\n");
207     }
208     SetState(SWEEPING);
209   }
210 
211   heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
212                                            &new_generation_observer_);
213   incremental_marking_job()->Start(heap_);
214 }
215 
216 
StartMarking()217 void IncrementalMarking::StartMarking() {
218   if (heap_->isolate()->serializer_enabled()) {
219     // Black allocation currently starts when we start incremental marking,
220     // but we cannot enable black allocation while deserializing. Hence, we
221     // have to delay the start of incremental marking in that case.
222     if (FLAG_trace_incremental_marking) {
223       heap()->isolate()->PrintWithTimestamp(
224           "[IncrementalMarking] Start delayed - serializer\n");
225     }
226     return;
227   }
228   if (FLAG_trace_incremental_marking) {
229     heap()->isolate()->PrintWithTimestamp(
230         "[IncrementalMarking] Start marking\n");
231   }
232 
233   heap_->InvokeIncrementalMarkingPrologueCallbacks();
234 
235   is_compacting_ = !FLAG_never_compact && collector_->StartCompaction();
236   collector_->StartMarking();
237 
238   SetState(MARKING);
239 
240   MarkingBarrier::ActivateAll(heap(), is_compacting_);
241 
242   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
243 
244   StartBlackAllocation();
245 
246   MarkRoots();
247 
248   if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
249     heap_->concurrent_marking()->ScheduleJob();
250   }
251 
252   // Ready to start incremental marking.
253   if (FLAG_trace_incremental_marking) {
254     heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
255   }
256 
257   {
258     // TracePrologue may call back into V8 in corner cases, requiring that
259     // marking (including write barriers) is fully set up.
260     TRACE_GC(heap()->tracer(),
261              GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
262     heap_->local_embedder_heap_tracer()->TracePrologue(
263         heap_->flags_for_embedder_tracer());
264   }
265 
266   heap_->InvokeIncrementalMarkingEpilogueCallbacks();
267 }
268 
StartBlackAllocation()269 void IncrementalMarking::StartBlackAllocation() {
270   DCHECK(!black_allocation_);
271   DCHECK(IsMarking());
272   black_allocation_ = true;
273   heap()->old_space()->MarkLinearAllocationAreaBlack();
274   heap()->map_space()->MarkLinearAllocationAreaBlack();
275   heap()->code_space()->MarkLinearAllocationAreaBlack();
276   if (FLAG_local_heaps) {
277     heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
278       local_heap->MarkLinearAllocationAreaBlack();
279     });
280   }
281   if (FLAG_trace_incremental_marking) {
282     heap()->isolate()->PrintWithTimestamp(
283         "[IncrementalMarking] Black allocation started\n");
284   }
285 }
286 
PauseBlackAllocation()287 void IncrementalMarking::PauseBlackAllocation() {
288   DCHECK(IsMarking());
289   heap()->old_space()->UnmarkLinearAllocationArea();
290   heap()->map_space()->UnmarkLinearAllocationArea();
291   heap()->code_space()->UnmarkLinearAllocationArea();
292   if (FLAG_local_heaps) {
293     heap()->safepoint()->IterateLocalHeaps([](LocalHeap* local_heap) {
294       local_heap->UnmarkLinearAllocationArea();
295     });
296   }
297   if (FLAG_trace_incremental_marking) {
298     heap()->isolate()->PrintWithTimestamp(
299         "[IncrementalMarking] Black allocation paused\n");
300   }
301   black_allocation_ = false;
302 }
303 
FinishBlackAllocation()304 void IncrementalMarking::FinishBlackAllocation() {
305   if (black_allocation_) {
306     black_allocation_ = false;
307     if (FLAG_trace_incremental_marking) {
308       heap()->isolate()->PrintWithTimestamp(
309           "[IncrementalMarking] Black allocation finished\n");
310     }
311   }
312 }
313 
EnsureBlackAllocated(Address allocated,size_t size)314 void IncrementalMarking::EnsureBlackAllocated(Address allocated, size_t size) {
315   if (black_allocation() && allocated != kNullAddress) {
316     HeapObject object = HeapObject::FromAddress(allocated);
317     if (marking_state()->IsWhite(object) && !Heap::InYoungGeneration(object)) {
318       if (heap_->IsLargeObject(object)) {
319         marking_state()->WhiteToBlack(object);
320       } else {
321         Page::FromAddress(allocated)->CreateBlackArea(allocated,
322                                                       allocated + size);
323       }
324     }
325   }
326 }
327 
MarkRoots()328 void IncrementalMarking::MarkRoots() {
329   DCHECK(!finalize_marking_completed_);
330   DCHECK(IsMarking());
331 
332   IncrementalMarkingRootMarkingVisitor visitor(this);
333   heap_->IterateRoots(
334       &visitor, base::EnumSet<SkipRoot>{SkipRoot::kStack, SkipRoot::kWeak});
335 }
336 
ShouldRetainMap(Map map,int age)337 bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
338   if (age == 0) {
339     // The map has aged. Do not retain this map.
340     return false;
341   }
342   Object constructor = map.GetConstructor();
343   if (!constructor.IsHeapObject() ||
344       marking_state()->IsWhite(HeapObject::cast(constructor))) {
345     // The constructor is dead, no new objects with this map can
346     // be created. Do not retain this map.
347     return false;
348   }
349   return true;
350 }
351 
352 
RetainMaps()353 void IncrementalMarking::RetainMaps() {
354   // Do not retain dead maps if flag disables it or there is
355   // - memory pressure (reduce_memory_footprint_),
356   // - GC is requested by tests or dev-tools (abort_incremental_marking_).
357   bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
358                                    FLAG_retain_maps_for_n_gc == 0;
359   std::vector<WeakArrayList> retained_maps_list = heap()->FindAllRetainedMaps();
360 
361   for (WeakArrayList retained_maps : retained_maps_list) {
362     int length = retained_maps.length();
363 
364     for (int i = 0; i < length; i += 2) {
365       MaybeObject value = retained_maps.Get(i);
366       HeapObject map_heap_object;
367       if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
368         continue;
369       }
370       int age = retained_maps.Get(i + 1).ToSmi().value();
371       int new_age;
372       Map map = Map::cast(map_heap_object);
373       if (!map_retaining_is_disabled && marking_state()->IsWhite(map)) {
374         if (ShouldRetainMap(map, age)) {
375           WhiteToGreyAndPush(map);
376         }
377         Object prototype = map.prototype();
378         if (age > 0 && prototype.IsHeapObject() &&
379             marking_state()->IsWhite(HeapObject::cast(prototype))) {
380           // The prototype is not marked, age the map.
381           new_age = age - 1;
382         } else {
383           // The prototype and the constructor are marked, this map keeps only
384           // transition tree alive, not JSObjects. Do not age the map.
385           new_age = age;
386         }
387       } else {
388         new_age = FLAG_retain_maps_for_n_gc;
389       }
390       // Compact the array and update the age.
391       if (new_age != age) {
392         retained_maps.Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
393       }
394     }
395   }
396 }
397 
FinalizeIncrementally()398 void IncrementalMarking::FinalizeIncrementally() {
399   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
400   DCHECK(!finalize_marking_completed_);
401   DCHECK(IsMarking());
402 
403   double start = heap_->MonotonicallyIncreasingTimeInMs();
404 
405   // After finishing incremental marking, we try to discover all unmarked
406   // objects to reduce the marking load in the final pause.
407   // 1) We scan and mark the roots again to find all changes to the root set.
408   // 2) Age and retain maps embedded in optimized code.
409   MarkRoots();
410 
411   // Map retaining is needed for perfromance, not correctness,
412   // so we can do it only once at the beginning of the finalization.
413   RetainMaps();
414 
415   MarkingBarrier::PublishAll(heap());
416 
417   finalize_marking_completed_ = true;
418 
419   if (FLAG_trace_incremental_marking) {
420     double end = heap_->MonotonicallyIncreasingTimeInMs();
421     double delta = end - start;
422     heap()->isolate()->PrintWithTimestamp(
423         "[IncrementalMarking] Finalize incrementally spent %.1f ms.\n", delta);
424   }
425 }
426 
UpdateMarkingWorklistAfterScavenge()427 void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
428   if (!IsMarking()) return;
429 
430   Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
431 
432 #ifdef ENABLE_MINOR_MC
433   MinorMarkCompactCollector::MarkingState* minor_marking_state =
434       heap()->minor_mark_compact_collector()->marking_state();
435 #endif  // ENABLE_MINOR_MC
436 
437   collector_->local_marking_worklists()->Publish();
438   MarkingBarrier::PublishAll(heap());
439   collector_->marking_worklists()->Update(
440       [
441 #ifdef DEBUG
442           // this is referred inside DCHECK.
443           this,
444 #endif
445 #ifdef ENABLE_MINOR_MC
446           minor_marking_state,
447 #endif
448           filler_map](HeapObject obj, HeapObject* out) -> bool {
449         DCHECK(obj.IsHeapObject());
450         // Only pointers to from space have to be updated.
451         if (Heap::InFromPage(obj)) {
452           MapWord map_word = obj.map_word();
453           if (!map_word.IsForwardingAddress()) {
454             // There may be objects on the marking deque that do not exist
455             // anymore, e.g. left trimmed objects or objects from the root set
456             // (frames). If these object are dead at scavenging time, their
457             // marking deque entries will not point to forwarding addresses.
458             // Hence, we can discard them.
459             return false;
460           }
461           HeapObject dest = map_word.ToForwardingAddress();
462           DCHECK_IMPLIES(marking_state()->IsWhite(obj),
463                          obj.IsFreeSpaceOrFiller());
464           *out = dest;
465           return true;
466         } else if (Heap::InToPage(obj)) {
467           // The object may be on a large page or on a page that was moved in
468           // new space.
469           DCHECK(Heap::IsLargeObject(obj) ||
470                  Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE));
471 #ifdef ENABLE_MINOR_MC
472           if (minor_marking_state->IsWhite(obj)) {
473             return false;
474           }
475 #endif  // ENABLE_MINOR_MC
476         // Either a large object or an object marked by the minor
477         // mark-compactor.
478           *out = obj;
479           return true;
480         } else {
481           // The object may be on a page that was moved from new to old space.
482           // Only applicable during minor MC garbage collections.
483           if (Page::FromHeapObject(obj)->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
484 #ifdef ENABLE_MINOR_MC
485             if (minor_marking_state->IsWhite(obj)) {
486               return false;
487             }
488 #endif  // ENABLE_MINOR_MC
489             *out = obj;
490             return true;
491           }
492           DCHECK_IMPLIES(marking_state()->IsWhite(obj),
493                          obj.IsFreeSpaceOrFiller());
494           // Skip one word filler objects that appear on the
495           // stack when we perform in place array shift.
496           if (obj.map() != filler_map) {
497             *out = obj;
498             return true;
499           }
500           return false;
501         }
502       });
503 
504   weak_objects_->UpdateAfterScavenge();
505 }
506 
UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space)507 void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
508     size_t dead_bytes_in_new_space) {
509   if (!IsMarking()) return;
510   bytes_marked_ -= Min(bytes_marked_, dead_bytes_in_new_space);
511 }
512 
ProcessBlackAllocatedObject(HeapObject obj)513 void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject obj) {
514   if (IsMarking() && marking_state()->IsBlack(obj)) {
515     collector_->RevisitObject(obj);
516   }
517 }
518 
EmbedderStep(double expected_duration_ms,double * duration_ms)519 StepResult IncrementalMarking::EmbedderStep(double expected_duration_ms,
520                                             double* duration_ms) {
521   if (!ShouldDoEmbedderStep()) {
522     *duration_ms = 0.0;
523     return StepResult::kNoImmediateWork;
524   }
525 
526   constexpr size_t kObjectsToProcessBeforeDeadlineCheck = 500;
527 
528   TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
529   LocalEmbedderHeapTracer* local_tracer = heap_->local_embedder_heap_tracer();
530   const double start = heap_->MonotonicallyIncreasingTimeInMs();
531   const double deadline = start + expected_duration_ms;
532   bool empty_worklist;
533   {
534     LocalEmbedderHeapTracer::ProcessingScope scope(local_tracer);
535     HeapObject object;
536     size_t cnt = 0;
537     empty_worklist = true;
538     while (local_marking_worklists()->PopEmbedder(&object)) {
539       scope.TracePossibleWrapper(JSObject::cast(object));
540       if (++cnt == kObjectsToProcessBeforeDeadlineCheck) {
541         if (deadline <= heap_->MonotonicallyIncreasingTimeInMs()) {
542           empty_worklist = false;
543           break;
544         }
545         cnt = 0;
546       }
547     }
548   }
549   // |deadline - heap_->MonotonicallyIncreasingTimeInMs()| could be negative,
550   // which means |local_tracer| won't do any actual tracing, so there is no
551   // need to check for |deadline <= heap_->MonotonicallyIncreasingTimeInMs()|.
552   bool remote_tracing_done =
553       local_tracer->Trace(deadline - heap_->MonotonicallyIncreasingTimeInMs());
554   double current = heap_->MonotonicallyIncreasingTimeInMs();
555   local_tracer->SetEmbedderWorklistEmpty(empty_worklist);
556   *duration_ms = current - start;
557   return (empty_worklist && remote_tracing_done)
558              ? StepResult::kNoImmediateWork
559              : StepResult::kMoreWorkRemaining;
560 }
561 
Hurry()562 void IncrementalMarking::Hurry() {
563   if (!local_marking_worklists()->IsEmpty()) {
564     double start = 0.0;
565     if (FLAG_trace_incremental_marking) {
566       start = heap_->MonotonicallyIncreasingTimeInMs();
567       if (FLAG_trace_incremental_marking) {
568         heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
569       }
570     }
571     collector_->ProcessMarkingWorklist(0);
572     SetState(COMPLETE);
573     if (FLAG_trace_incremental_marking) {
574       double end = heap_->MonotonicallyIncreasingTimeInMs();
575       double delta = end - start;
576       if (FLAG_trace_incremental_marking) {
577         heap()->isolate()->PrintWithTimestamp(
578             "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
579             static_cast<int>(delta));
580       }
581     }
582   }
583 }
584 
585 
Stop()586 void IncrementalMarking::Stop() {
587   if (IsStopped()) return;
588   if (FLAG_trace_incremental_marking) {
589     int old_generation_size_mb =
590         static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
591     int old_generation_limit_mb =
592         static_cast<int>(heap()->old_generation_allocation_limit() / MB);
593     heap()->isolate()->PrintWithTimestamp(
594         "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
595         "overshoot %dMB\n",
596         old_generation_size_mb, old_generation_limit_mb,
597         Max(0, old_generation_size_mb - old_generation_limit_mb));
598   }
599 
600   SpaceIterator it(heap_);
601   while (it.HasNext()) {
602     Space* space = it.Next();
603     if (space == heap_->new_space()) {
604       space->RemoveAllocationObserver(&new_generation_observer_);
605     } else {
606       space->RemoveAllocationObserver(&old_generation_observer_);
607     }
608   }
609 
610   heap_->isolate()->stack_guard()->ClearGC();
611   SetState(STOPPED);
612   is_compacting_ = false;
613   FinishBlackAllocation();
614 
615   if (FLAG_local_heaps) {
616     // Merge live bytes counters of background threads
617     for (auto pair : background_live_bytes_) {
618       MemoryChunk* memory_chunk = pair.first;
619       intptr_t live_bytes = pair.second;
620 
621       if (live_bytes) {
622         marking_state()->IncrementLiveBytes(memory_chunk, live_bytes);
623       }
624     }
625 
626     background_live_bytes_.clear();
627   }
628 }
629 
630 
Finalize()631 void IncrementalMarking::Finalize() {
632   Hurry();
633   Stop();
634 }
635 
636 
FinalizeMarking(CompletionAction action)637 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
638   DCHECK(!finalize_marking_completed_);
639   if (FLAG_trace_incremental_marking) {
640     heap()->isolate()->PrintWithTimestamp(
641         "[IncrementalMarking] requesting finalization of incremental "
642         "marking.\n");
643   }
644   request_type_ = FINALIZATION;
645   if (action == GC_VIA_STACK_GUARD) {
646     heap_->isolate()->stack_guard()->RequestGC();
647   }
648 }
649 
CurrentTimeToMarkingTask() const650 double IncrementalMarking::CurrentTimeToMarkingTask() const {
651   const double recorded_time_to_marking_task =
652       heap_->tracer()->AverageTimeToIncrementalMarkingTask();
653   const double current_time_to_marking_task =
654       incremental_marking_job_.CurrentTimeToTask(heap_);
655   if (recorded_time_to_marking_task == 0.0) return 0.0;
656   return Max(recorded_time_to_marking_task, current_time_to_marking_task);
657 }
658 
MarkingComplete(CompletionAction action)659 void IncrementalMarking::MarkingComplete(CompletionAction action) {
660   // Allowed overshoot percantage of incremental marking walltime.
661   constexpr double kAllowedOvershoot = 0.1;
662   // Minimum overshoot in ms. This is used to allow moving away from stack when
663   // marking was fast.
664   constexpr double kMinOvershootMs = 50;
665 
666   if (action == GC_VIA_STACK_GUARD) {
667     if (time_to_force_completion_ == 0.0) {
668       const double now = heap_->MonotonicallyIncreasingTimeInMs();
669       const double overshoot_ms =
670           Max(kMinOvershootMs, (now - start_time_ms_) * kAllowedOvershoot);
671       const double time_to_marking_task = CurrentTimeToMarkingTask();
672       if (time_to_marking_task == 0.0 || time_to_marking_task > overshoot_ms) {
673         if (FLAG_trace_incremental_marking) {
674           heap()->isolate()->PrintWithTimestamp(
675               "[IncrementalMarking] Not delaying marking completion. time to "
676               "task: %fms allowed overshoot: %fms\n",
677               time_to_marking_task, overshoot_ms);
678         }
679       } else {
680         time_to_force_completion_ = now + overshoot_ms;
681         if (FLAG_trace_incremental_marking) {
682           heap()->isolate()->PrintWithTimestamp(
683               "[IncrementalMarking] Delaying GC via stack guard. time to task: "
684               "%fms "
685               "allowed overshoot: %fms\n",
686               time_to_marking_task, overshoot_ms);
687         }
688         incremental_marking_job_.ScheduleTask(
689             heap(), IncrementalMarkingJob::TaskType::kNormal);
690         return;
691       }
692     }
693     if (heap()->MonotonicallyIncreasingTimeInMs() < time_to_force_completion_) {
694       if (FLAG_trace_incremental_marking) {
695         heap()->isolate()->PrintWithTimestamp(
696             "[IncrementalMarking] Delaying GC via stack guard. time left: "
697             "%fms\n",
698             time_to_force_completion_ -
699                 heap_->MonotonicallyIncreasingTimeInMs());
700       }
701       return;
702     }
703   }
704 
705   SetState(COMPLETE);
706   // We will set the stack guard to request a GC now.  This will mean the rest
707   // of the GC gets performed as soon as possible (we can't do a GC here in a
708   // record-write context).  If a few things get allocated between now and then
709   // that shouldn't make us do a scavenge and keep being incremental.
710   if (FLAG_trace_incremental_marking) {
711     heap()->isolate()->PrintWithTimestamp(
712         "[IncrementalMarking] Complete (normal).\n");
713   }
714   request_type_ = COMPLETE_MARKING;
715   if (action == GC_VIA_STACK_GUARD) {
716     heap_->isolate()->stack_guard()->RequestGC();
717   }
718 }
719 
Epilogue()720 void IncrementalMarking::Epilogue() {
721   was_activated_ = false;
722   finalize_marking_completed_ = false;
723 }
724 
ShouldDoEmbedderStep()725 bool IncrementalMarking::ShouldDoEmbedderStep() {
726   return state_ == MARKING && FLAG_incremental_marking_wrappers &&
727          heap_->local_embedder_heap_tracer()->InUse();
728 }
729 
FastForwardSchedule()730 void IncrementalMarking::FastForwardSchedule() {
731   if (scheduled_bytes_to_mark_ < bytes_marked_) {
732     scheduled_bytes_to_mark_ = bytes_marked_;
733     if (FLAG_trace_incremental_marking) {
734       heap_->isolate()->PrintWithTimestamp(
735           "[IncrementalMarking] Fast-forwarded schedule\n");
736     }
737   }
738 }
739 
FastForwardScheduleIfCloseToFinalization()740 void IncrementalMarking::FastForwardScheduleIfCloseToFinalization() {
741   // Consider marking close to finalization if 75% of the initial old
742   // generation was marked.
743   if (bytes_marked_ > 3 * (initial_old_generation_size_ / 4)) {
744     FastForwardSchedule();
745   }
746 }
747 
ScheduleBytesToMarkBasedOnTime(double time_ms)748 void IncrementalMarking::ScheduleBytesToMarkBasedOnTime(double time_ms) {
749   // Time interval that should be sufficient to complete incremental marking.
750   constexpr double kTargetMarkingWallTimeInMs = 500;
751   constexpr double kMinTimeBetweenScheduleInMs = 10;
752   if (schedule_update_time_ms_ + kMinTimeBetweenScheduleInMs > time_ms) return;
753   double delta_ms =
754       Min(time_ms - schedule_update_time_ms_, kTargetMarkingWallTimeInMs);
755   schedule_update_time_ms_ = time_ms;
756 
757   size_t bytes_to_mark =
758       (delta_ms / kTargetMarkingWallTimeInMs) * initial_old_generation_size_;
759   AddScheduledBytesToMark(bytes_to_mark);
760 
761   if (FLAG_trace_incremental_marking) {
762     heap_->isolate()->PrintWithTimestamp(
763         "[IncrementalMarking] Scheduled %zuKB to mark based on time delta "
764         "%.1fms\n",
765         bytes_to_mark / KB, delta_ms);
766   }
767 }
768 
769 namespace {
CombineStepResults(StepResult a,StepResult b)770 StepResult CombineStepResults(StepResult a, StepResult b) {
771   DCHECK_NE(StepResult::kWaitingForFinalization, a);
772   DCHECK_NE(StepResult::kWaitingForFinalization, b);
773   if (a == StepResult::kMoreWorkRemaining ||
774       b == StepResult::kMoreWorkRemaining)
775     return StepResult::kMoreWorkRemaining;
776   return StepResult::kNoImmediateWork;
777 }
778 }  // anonymous namespace
779 
AdvanceWithDeadline(double deadline_in_ms,CompletionAction completion_action,StepOrigin step_origin)780 StepResult IncrementalMarking::AdvanceWithDeadline(
781     double deadline_in_ms, CompletionAction completion_action,
782     StepOrigin step_origin) {
783   HistogramTimerScope incremental_marking_scope(
784       heap_->isolate()->counters()->gc_incremental_marking());
785   TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
786   TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
787   DCHECK(!IsStopped());
788 
789   ScheduleBytesToMarkBasedOnTime(heap()->MonotonicallyIncreasingTimeInMs());
790   FastForwardScheduleIfCloseToFinalization();
791   return Step(kStepSizeInMs, completion_action, step_origin);
792 }
793 
FinalizeSweeping()794 void IncrementalMarking::FinalizeSweeping() {
795   DCHECK(state_ == SWEEPING);
796   if (ContinueConcurrentSweeping()) {
797     if (FLAG_stress_incremental_marking) {
798       // To start concurrent marking a bit earlier, support concurrent sweepers
799       // from main thread by sweeping some pages.
800       SupportConcurrentSweeping();
801     }
802     return;
803   }
804 
805   SafepointScope scope(heap());
806   collector_->EnsureSweepingCompleted();
807   DCHECK(!collector_->sweeping_in_progress());
808 #ifdef DEBUG
809   heap_->VerifyCountersAfterSweeping();
810 #endif
811   StartMarking();
812 }
813 
ContinueConcurrentSweeping()814 bool IncrementalMarking::ContinueConcurrentSweeping() {
815   if (!collector_->sweeping_in_progress()) return false;
816   return FLAG_concurrent_sweeping &&
817          collector_->sweeper()->AreSweeperTasksRunning();
818 }
819 
SupportConcurrentSweeping()820 void IncrementalMarking::SupportConcurrentSweeping() {
821   collector_->sweeper()->SupportConcurrentSweeping();
822 }
823 
StepSizeToKeepUpWithAllocations()824 size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
825   // Update bytes_allocated_ based on the allocation counter.
826   size_t current_counter = heap_->OldGenerationAllocationCounter();
827   size_t result = current_counter - old_generation_allocation_counter_;
828   old_generation_allocation_counter_ = current_counter;
829   return result;
830 }
831 
StepSizeToMakeProgress()832 size_t IncrementalMarking::StepSizeToMakeProgress() {
833   const size_t kTargetStepCount = 256;
834   const size_t kTargetStepCountAtOOM = 32;
835   const size_t kMaxStepSizeInByte = 256 * KB;
836   size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
837 
838   if (!heap()->CanExpandOldGeneration(oom_slack)) {
839     return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
840   }
841 
842   return Min(Max(initial_old_generation_size_ / kTargetStepCount,
843                  IncrementalMarking::kMinStepSizeInBytes),
844              kMaxStepSizeInByte);
845 }
846 
AddScheduledBytesToMark(size_t bytes_to_mark)847 void IncrementalMarking::AddScheduledBytesToMark(size_t bytes_to_mark) {
848   if (scheduled_bytes_to_mark_ + bytes_to_mark < scheduled_bytes_to_mark_) {
849     // The overflow case.
850     scheduled_bytes_to_mark_ = std::numeric_limits<std::size_t>::max();
851   } else {
852     scheduled_bytes_to_mark_ += bytes_to_mark;
853   }
854 }
855 
ScheduleBytesToMarkBasedOnAllocation()856 void IncrementalMarking::ScheduleBytesToMarkBasedOnAllocation() {
857   size_t progress_bytes = StepSizeToMakeProgress();
858   size_t allocation_bytes = StepSizeToKeepUpWithAllocations();
859   size_t bytes_to_mark = progress_bytes + allocation_bytes;
860   AddScheduledBytesToMark(bytes_to_mark);
861 
862   if (FLAG_trace_incremental_marking) {
863     heap_->isolate()->PrintWithTimestamp(
864         "[IncrementalMarking] Scheduled %zuKB to mark based on allocation "
865         "(progress=%zuKB, allocation=%zuKB)\n",
866         bytes_to_mark / KB, progress_bytes / KB, allocation_bytes / KB);
867   }
868 }
869 
FetchBytesMarkedConcurrently()870 void IncrementalMarking::FetchBytesMarkedConcurrently() {
871   if (FLAG_concurrent_marking) {
872     size_t current_bytes_marked_concurrently =
873         heap()->concurrent_marking()->TotalMarkedBytes();
874     // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
875     // short period of time when a concurrent marking task is finishing.
876     if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
877       bytes_marked_ +=
878           current_bytes_marked_concurrently - bytes_marked_concurrently_;
879       bytes_marked_concurrently_ = current_bytes_marked_concurrently;
880     }
881     if (FLAG_trace_incremental_marking) {
882       heap_->isolate()->PrintWithTimestamp(
883           "[IncrementalMarking] Marked %zuKB on background threads\n",
884           heap_->concurrent_marking()->TotalMarkedBytes() / KB);
885     }
886   }
887 }
888 
ComputeStepSizeInBytes(StepOrigin step_origin)889 size_t IncrementalMarking::ComputeStepSizeInBytes(StepOrigin step_origin) {
890   FetchBytesMarkedConcurrently();
891   if (FLAG_trace_incremental_marking) {
892     if (scheduled_bytes_to_mark_ > bytes_marked_) {
893       heap_->isolate()->PrintWithTimestamp(
894           "[IncrementalMarking] Marker is %zuKB behind schedule\n",
895           (scheduled_bytes_to_mark_ - bytes_marked_) / KB);
896     } else {
897       heap_->isolate()->PrintWithTimestamp(
898           "[IncrementalMarking] Marker is %zuKB ahead of schedule\n",
899           (bytes_marked_ - scheduled_bytes_to_mark_) / KB);
900     }
901   }
902   // Allow steps on allocation to get behind the schedule by small ammount.
903   // This gives higher priority to steps in tasks.
904   size_t kScheduleMarginInBytes = step_origin == StepOrigin::kV8 ? 1 * MB : 0;
905   if (bytes_marked_ + kScheduleMarginInBytes > scheduled_bytes_to_mark_)
906     return 0;
907   return scheduled_bytes_to_mark_ - bytes_marked_ - kScheduleMarginInBytes;
908 }
909 
AdvanceOnAllocation()910 void IncrementalMarking::AdvanceOnAllocation() {
911   // Code using an AlwaysAllocateScope assumes that the GC state does not
912   // change; that implies that no marking steps must be performed.
913   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
914       (state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
915     return;
916   }
917   HistogramTimerScope incremental_marking_scope(
918       heap_->isolate()->counters()->gc_incremental_marking());
919   TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
920   TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
921   ScheduleBytesToMarkBasedOnAllocation();
922   Step(kMaxStepSizeInMs, GC_VIA_STACK_GUARD, StepOrigin::kV8);
923 }
924 
Step(double max_step_size_in_ms,CompletionAction action,StepOrigin step_origin)925 StepResult IncrementalMarking::Step(double max_step_size_in_ms,
926                                     CompletionAction action,
927                                     StepOrigin step_origin) {
928   double start = heap_->MonotonicallyIncreasingTimeInMs();
929 
930   if (state_ == SWEEPING) {
931     TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
932     FinalizeSweeping();
933   }
934 
935   StepResult combined_result = StepResult::kMoreWorkRemaining;
936   size_t bytes_to_process = 0;
937   size_t v8_bytes_processed = 0;
938   double embedder_duration = 0.0;
939   double embedder_deadline = 0.0;
940   if (state_ == MARKING) {
941     if (FLAG_concurrent_marking) {
942       // It is safe to merge back all objects that were on hold to the shared
943       // work list at Step because we are at a safepoint where all objects
944       // are properly initialized.
945       local_marking_worklists()->MergeOnHold();
946     }
947 
948 // Only print marking worklist in debug mode to save ~40KB of code size.
949 #ifdef DEBUG
950     if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
951         FLAG_trace_gc_verbose) {
952       collector_->marking_worklists()->Print();
953     }
954 #endif
955     if (FLAG_trace_incremental_marking) {
956       heap_->isolate()->PrintWithTimestamp(
957           "[IncrementalMarking] Marking speed %.fKB/ms\n",
958           heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
959     }
960     // The first step after Scavenge will see many allocated bytes.
961     // Cap the step size to distribute the marking work more uniformly.
962     const double marking_speed =
963         heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond();
964     size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
965         max_step_size_in_ms, marking_speed);
966     bytes_to_process = Min(ComputeStepSizeInBytes(step_origin), max_step_size);
967     bytes_to_process = Max(bytes_to_process, kMinStepSizeInBytes);
968 
969     // Perform a single V8 and a single embedder step. In case both have been
970     // observed as empty back to back, we can finalize.
971     //
972     // This ignores that case where the embedder finds new V8-side objects. The
973     // assumption is that large graphs are well connected and can mostly be
974     // processed on their own. For small graphs, helping is not necessary.
975     v8_bytes_processed = collector_->ProcessMarkingWorklist(bytes_to_process);
976     StepResult v8_result = local_marking_worklists()->IsEmpty()
977                                ? StepResult::kNoImmediateWork
978                                : StepResult::kMoreWorkRemaining;
979     StepResult embedder_result = StepResult::kNoImmediateWork;
980     if (heap_->local_embedder_heap_tracer()->InUse()) {
981       embedder_deadline =
982           Min(max_step_size_in_ms,
983               static_cast<double>(bytes_to_process) / marking_speed);
984       // TODO(chromium:1056170): Replace embedder_deadline with bytes_to_process
985       // after migrating blink to the cppgc library and after v8 can directly
986       // push objects to Oilpan.
987       embedder_result = EmbedderStep(embedder_deadline, &embedder_duration);
988     }
989     bytes_marked_ += v8_bytes_processed;
990     combined_result = CombineStepResults(v8_result, embedder_result);
991 
992     if (combined_result == StepResult::kNoImmediateWork) {
993       if (!finalize_marking_completed_) {
994         FinalizeMarking(action);
995         FastForwardSchedule();
996         combined_result = StepResult::kWaitingForFinalization;
997         incremental_marking_job()->Start(heap_);
998       } else {
999         MarkingComplete(action);
1000         combined_result = StepResult::kWaitingForFinalization;
1001       }
1002     }
1003     if (FLAG_concurrent_marking) {
1004       local_marking_worklists()->ShareWork();
1005       heap_->concurrent_marking()->RescheduleJobIfNeeded();
1006     }
1007   }
1008   if (state_ == MARKING) {
1009     // Note that we do not report any marked by in case of finishing sweeping as
1010     // we did not process the marking worklist.
1011     const double v8_duration =
1012         heap_->MonotonicallyIncreasingTimeInMs() - start - embedder_duration;
1013     heap_->tracer()->AddIncrementalMarkingStep(v8_duration, v8_bytes_processed);
1014   }
1015   if (FLAG_trace_incremental_marking) {
1016     heap_->isolate()->PrintWithTimestamp(
1017         "[IncrementalMarking] Step %s V8: %zuKB (%zuKB), embedder: %fms (%fms) "
1018         "in %.1f\n",
1019         step_origin == StepOrigin::kV8 ? "in v8" : "in task",
1020         v8_bytes_processed / KB, bytes_to_process / KB, embedder_duration,
1021         embedder_deadline, heap_->MonotonicallyIncreasingTimeInMs() - start);
1022   }
1023   return combined_result;
1024 }
1025 
1026 }  // namespace internal
1027 }  // namespace v8
1028