• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/incremental-marking.h"
6 
7 #include "src/code-stubs.h"
8 #include "src/compilation-cache.h"
9 #include "src/conversions.h"
10 #include "src/heap/gc-idle-time-handler.h"
11 #include "src/heap/gc-tracer.h"
12 #include "src/heap/mark-compact-inl.h"
13 #include "src/heap/object-stats.h"
14 #include "src/heap/objects-visiting-inl.h"
15 #include "src/heap/objects-visiting.h"
16 #include "src/tracing/trace-event.h"
17 #include "src/v8.h"
18 
19 namespace v8 {
20 namespace internal {
21 
IdleStepActions()22 IncrementalMarking::StepActions IncrementalMarking::IdleStepActions() {
23   return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
24                      IncrementalMarking::FORCE_MARKING,
25                      IncrementalMarking::DO_NOT_FORCE_COMPLETION);
26 }
27 
IncrementalMarking(Heap * heap)28 IncrementalMarking::IncrementalMarking(Heap* heap)
29     : heap_(heap),
30       observer_(*this, kAllocatedThreshold),
31       state_(STOPPED),
32       is_compacting_(false),
33       steps_count_(0),
34       old_generation_space_available_at_start_of_incremental_(0),
35       old_generation_space_used_at_start_of_incremental_(0),
36       bytes_rescanned_(0),
37       should_hurry_(false),
38       marking_speed_(0),
39       bytes_scanned_(0),
40       allocated_(0),
41       write_barriers_invoked_since_last_step_(0),
42       idle_marking_delay_counter_(0),
43       unscanned_bytes_of_large_object_(0),
44       was_activated_(false),
45       black_allocation_(false),
46       finalize_marking_completed_(false),
47       incremental_marking_finalization_rounds_(0),
48       request_type_(NONE) {}
49 
BaseRecordWrite(HeapObject * obj,Object * value)50 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
51   HeapObject* value_heap_obj = HeapObject::cast(value);
52   MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
53   DCHECK(!Marking::IsImpossible(value_bit));
54 
55   MarkBit obj_bit = Marking::MarkBitFrom(obj);
56   DCHECK(!Marking::IsImpossible(obj_bit));
57   bool is_black = Marking::IsBlack(obj_bit);
58 
59   if (is_black && Marking::IsWhite(value_bit)) {
60     WhiteToGreyAndPush(value_heap_obj, value_bit);
61     RestartIfNotMarking();
62   }
63   return is_compacting_ && is_black;
64 }
65 
66 
RecordWriteSlow(HeapObject * obj,Object ** slot,Object * value)67 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
68                                          Object* value) {
69   if (BaseRecordWrite(obj, value) && slot != NULL) {
70     // Object is not going to be rescanned we need to record the slot.
71     heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
72   }
73 }
74 
75 
RecordWriteFromCode(HeapObject * obj,Object ** slot,Isolate * isolate)76 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
77                                              Isolate* isolate) {
78   DCHECK(obj->IsHeapObject());
79   IncrementalMarking* marking = isolate->heap()->incremental_marking();
80 
81   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
82   int counter = chunk->write_barrier_counter();
83   if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
84     marking->write_barriers_invoked_since_last_step_ +=
85         MemoryChunk::kWriteBarrierCounterGranularity -
86         chunk->write_barrier_counter();
87     chunk->set_write_barrier_counter(
88         MemoryChunk::kWriteBarrierCounterGranularity);
89   }
90 
91   marking->RecordWrite(obj, slot, *slot);
92 }
93 
94 // static
RecordWriteOfCodeEntryFromCode(JSFunction * host,Object ** slot,Isolate * isolate)95 void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
96                                                         Object** slot,
97                                                         Isolate* isolate) {
98   DCHECK(host->IsJSFunction());
99   IncrementalMarking* marking = isolate->heap()->incremental_marking();
100   Code* value = Code::cast(
101       Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
102   marking->RecordWriteOfCodeEntry(host, slot, value);
103 }
104 
RecordCodeTargetPatch(Code * host,Address pc,HeapObject * value)105 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
106                                                HeapObject* value) {
107   if (IsMarking()) {
108     RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
109     RecordWriteIntoCode(host, &rinfo, value);
110   }
111 }
112 
113 
RecordCodeTargetPatch(Address pc,HeapObject * value)114 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
115   if (IsMarking()) {
116     Code* host = heap_->isolate()
117                      ->inner_pointer_to_code_cache()
118                      ->GcSafeFindCodeForInnerPointer(pc);
119     RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
120     RecordWriteIntoCode(host, &rinfo, value);
121   }
122 }
123 
124 
RecordWriteOfCodeEntrySlow(JSFunction * host,Object ** slot,Code * value)125 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
126                                                     Object** slot,
127                                                     Code* value) {
128   if (BaseRecordWrite(host, value)) {
129     DCHECK(slot != NULL);
130     heap_->mark_compact_collector()->RecordCodeEntrySlot(
131         host, reinterpret_cast<Address>(slot), value);
132   }
133 }
134 
RecordWriteIntoCodeSlow(Code * host,RelocInfo * rinfo,Object * value)135 void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
136                                                  Object* value) {
137   if (BaseRecordWrite(host, value)) {
138     // Object is not going to be rescanned.  We need to record the slot.
139     heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
140   }
141 }
142 
143 
WhiteToGreyAndPush(HeapObject * obj,MarkBit mark_bit)144 void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
145   Marking::WhiteToGrey(mark_bit);
146   heap_->mark_compact_collector()->marking_deque()->Push(obj);
147 }
148 
149 
MarkObjectGreyDoNotEnqueue(Object * obj)150 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
151   if (obj->IsHeapObject()) {
152     HeapObject* heap_obj = HeapObject::cast(obj);
153     MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
154     if (Marking::IsBlack(mark_bit)) {
155       MemoryChunk::IncrementLiveBytesFromGC(heap_obj, -heap_obj->Size());
156     }
157     Marking::AnyToGrey(mark_bit);
158   }
159 }
160 
161 
MarkBlackOrKeepBlack(HeapObject * heap_object,MarkBit mark_bit,int size)162 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
163                                         MarkBit mark_bit, int size) {
164   DCHECK(!Marking::IsImpossible(mark_bit));
165   if (Marking::IsBlack(mark_bit)) return;
166   Marking::MarkBlack(mark_bit);
167   MemoryChunk::IncrementLiveBytesFromGC(heap_object, size);
168 }
169 
170 
171 class IncrementalMarkingMarkingVisitor
172     : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
173  public:
Initialize()174   static void Initialize() {
175     StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
176     table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
177     table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
178     table_.Register(kVisitJSRegExp, &VisitJSRegExp);
179     if (FLAG_track_gc_object_stats) {
180       IncrementalMarkingObjectStatsVisitor::Initialize(&table_);
181     }
182   }
183 
184   static const int kProgressBarScanningChunk = 32 * 1024;
185 
VisitFixedArrayIncremental(Map * map,HeapObject * object)186   static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
187     MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
188     // TODO(mstarzinger): Move setting of the flag to the allocation site of
189     // the array. The visitor should just check the flag.
190     if (FLAG_use_marking_progress_bar &&
191         chunk->owner()->identity() == LO_SPACE) {
192       chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
193     }
194     if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
195       Heap* heap = map->GetHeap();
196       // When using a progress bar for large fixed arrays, scan only a chunk of
197       // the array and try to push it onto the marking deque again until it is
198       // fully scanned. Fall back to scanning it through to the end in case this
199       // fails because of a full deque.
200       int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
201       int start_offset =
202           Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
203       int end_offset =
204           Min(object_size, start_offset + kProgressBarScanningChunk);
205       int already_scanned_offset = start_offset;
206       bool scan_until_end = false;
207       do {
208         VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
209                       HeapObject::RawField(object, end_offset));
210         start_offset = end_offset;
211         end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
212         scan_until_end =
213             heap->mark_compact_collector()->marking_deque()->IsFull();
214       } while (scan_until_end && start_offset < object_size);
215       chunk->set_progress_bar(start_offset);
216       if (start_offset < object_size) {
217         if (Marking::IsGrey(Marking::MarkBitFrom(object))) {
218           heap->mark_compact_collector()->marking_deque()->Unshift(object);
219         } else {
220           DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
221           heap->mark_compact_collector()->UnshiftBlack(object);
222         }
223         heap->incremental_marking()->NotifyIncompleteScanOfObject(
224             object_size - (start_offset - already_scanned_offset));
225       }
226     } else {
227       FixedArrayVisitor::Visit(map, object);
228     }
229   }
230 
VisitNativeContextIncremental(Map * map,HeapObject * object)231   static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
232     Context* context = Context::cast(object);
233 
234     // We will mark cache black with a separate pass when we finish marking.
235     // Note that GC can happen when the context is not fully initialized,
236     // so the cache can be undefined.
237     Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
238     if (!cache->IsUndefined(map->GetIsolate())) {
239       MarkObjectGreyDoNotEnqueue(cache);
240     }
241     VisitNativeContext(map, context);
242   }
243 
INLINE(static void VisitPointer (Heap * heap,HeapObject * object,Object ** p))244   INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
245     Object* target = *p;
246     if (target->IsHeapObject()) {
247       heap->mark_compact_collector()->RecordSlot(object, p, target);
248       MarkObject(heap, target);
249     }
250   }
251 
INLINE(static void VisitPointers (Heap * heap,HeapObject * object,Object ** start,Object ** end))252   INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
253                                    Object** start, Object** end)) {
254     for (Object** p = start; p < end; p++) {
255       Object* target = *p;
256       if (target->IsHeapObject()) {
257         heap->mark_compact_collector()->RecordSlot(object, p, target);
258         MarkObject(heap, target);
259       }
260     }
261   }
262 
263   // Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject (Heap * heap,Object * obj))264   INLINE(static void MarkObject(Heap* heap, Object* obj)) {
265     IncrementalMarking::MarkObject(heap, HeapObject::cast(obj));
266   }
267 
268   // Marks the object black without pushing it on the marking stack.
269   // Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush (Heap * heap,Object * obj))270   INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
271     HeapObject* heap_object = HeapObject::cast(obj);
272     MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
273     if (Marking::IsWhite(mark_bit)) {
274       Marking::MarkBlack(mark_bit);
275       MemoryChunk::IncrementLiveBytesFromGC(heap_object, heap_object->Size());
276       return true;
277     }
278     return false;
279   }
280 };
281 
IterateBlackObject(HeapObject * object)282 void IncrementalMarking::IterateBlackObject(HeapObject* object) {
283   if (IsMarking() && Marking::IsBlack(Marking::MarkBitFrom(object))) {
284     Page* page = Page::FromAddress(object->address());
285     if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
286       // IterateBlackObject requires us to visit the whole object.
287       page->ResetProgressBar();
288     }
289     IncrementalMarkingMarkingVisitor::IterateBody(object->map(), object);
290   }
291 }
292 
293 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
294  public:
IncrementalMarkingRootMarkingVisitor(IncrementalMarking * incremental_marking)295   explicit IncrementalMarkingRootMarkingVisitor(
296       IncrementalMarking* incremental_marking)
297       : heap_(incremental_marking->heap()) {}
298 
VisitPointer(Object ** p)299   void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
300 
VisitPointers(Object ** start,Object ** end)301   void VisitPointers(Object** start, Object** end) override {
302     for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
303   }
304 
305  private:
MarkObjectByPointer(Object ** p)306   void MarkObjectByPointer(Object** p) {
307     Object* obj = *p;
308     if (!obj->IsHeapObject()) return;
309 
310     IncrementalMarking::MarkObject(heap_, HeapObject::cast(obj));
311   }
312 
313   Heap* heap_;
314 };
315 
316 
Initialize()317 void IncrementalMarking::Initialize() {
318   IncrementalMarkingMarkingVisitor::Initialize();
319 }
320 
321 
SetOldSpacePageFlags(MemoryChunk * chunk,bool is_marking,bool is_compacting)322 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
323                                               bool is_marking,
324                                               bool is_compacting) {
325   if (is_marking) {
326     chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
327     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
328   } else {
329     chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
330     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
331   }
332 }
333 
334 
SetNewSpacePageFlags(MemoryChunk * chunk,bool is_marking)335 void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
336                                               bool is_marking) {
337   chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
338   if (is_marking) {
339     chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
340   } else {
341     chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
342   }
343 }
344 
345 
DeactivateIncrementalWriteBarrierForSpace(PagedSpace * space)346 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
347     PagedSpace* space) {
348   for (Page* p : *space) {
349     SetOldSpacePageFlags(p, false, false);
350   }
351 }
352 
353 
DeactivateIncrementalWriteBarrierForSpace(NewSpace * space)354 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
355     NewSpace* space) {
356   for (Page* p : *space) {
357     SetNewSpacePageFlags(p, false);
358   }
359 }
360 
361 
DeactivateIncrementalWriteBarrier()362 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
363   DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
364   DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
365   DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
366   DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
367 
368   for (LargePage* lop : *heap_->lo_space()) {
369     SetOldSpacePageFlags(lop, false, false);
370   }
371 }
372 
373 
ActivateIncrementalWriteBarrier(PagedSpace * space)374 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
375   for (Page* p : *space) {
376     SetOldSpacePageFlags(p, true, is_compacting_);
377   }
378 }
379 
380 
ActivateIncrementalWriteBarrier(NewSpace * space)381 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
382   for (Page* p : *space) {
383     SetNewSpacePageFlags(p, true);
384   }
385 }
386 
387 
ActivateIncrementalWriteBarrier()388 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
389   ActivateIncrementalWriteBarrier(heap_->old_space());
390   ActivateIncrementalWriteBarrier(heap_->map_space());
391   ActivateIncrementalWriteBarrier(heap_->code_space());
392   ActivateIncrementalWriteBarrier(heap_->new_space());
393 
394   for (LargePage* lop : *heap_->lo_space()) {
395     SetOldSpacePageFlags(lop, true, is_compacting_);
396   }
397 }
398 
399 
ShouldActivateEvenWithoutIdleNotification()400 bool IncrementalMarking::ShouldActivateEvenWithoutIdleNotification() {
401 #ifndef DEBUG
402   static const intptr_t kActivationThreshold = 8 * MB;
403 #else
404   // TODO(gc) consider setting this to some low level so that some
405   // debug tests run with incremental marking and some without.
406   static const intptr_t kActivationThreshold = 0;
407 #endif
408   // Don't switch on for very small heaps.
409   return CanBeActivated() &&
410          heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold &&
411          heap_->HeapIsFullEnoughToStartIncrementalMarking(
412              heap_->old_generation_allocation_limit());
413 }
414 
415 
WasActivated()416 bool IncrementalMarking::WasActivated() { return was_activated_; }
417 
418 
CanBeActivated()419 bool IncrementalMarking::CanBeActivated() {
420   // Only start incremental marking in a safe state: 1) when incremental
421   // marking is turned on, 2) when we are currently not in a GC, and
422   // 3) when we are currently not serializing or deserializing the heap.
423   return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
424          heap_->deserialization_complete() &&
425          !heap_->isolate()->serializer_enabled();
426 }
427 
428 
ActivateGeneratedStub(Code * stub)429 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
430   DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
431 
432   if (!IsMarking()) {
433     // Initially stub is generated in STORE_BUFFER_ONLY mode thus
434     // we don't need to do anything if incremental marking is
435     // not active.
436   } else if (IsCompacting()) {
437     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
438   } else {
439     RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
440   }
441 }
442 
443 
NotifyOfHighPromotionRate()444 void IncrementalMarking::NotifyOfHighPromotionRate() {
445   if (IsMarking()) {
446     if (marking_speed_ < kFastMarking) {
447       if (FLAG_trace_gc) {
448         PrintIsolate(heap()->isolate(),
449                      "Increasing marking speed to %d "
450                      "due to high promotion rate\n",
451                      static_cast<int>(kFastMarking));
452       }
453       marking_speed_ = kFastMarking;
454     }
455   }
456 }
457 
458 
PatchIncrementalMarkingRecordWriteStubs(Heap * heap,RecordWriteStub::Mode mode)459 static void PatchIncrementalMarkingRecordWriteStubs(
460     Heap* heap, RecordWriteStub::Mode mode) {
461   UnseededNumberDictionary* stubs = heap->code_stubs();
462 
463   int capacity = stubs->Capacity();
464   Isolate* isolate = heap->isolate();
465   for (int i = 0; i < capacity; i++) {
466     Object* k = stubs->KeyAt(i);
467     if (stubs->IsKey(isolate, k)) {
468       uint32_t key = NumberToUint32(k);
469 
470       if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
471         Object* e = stubs->ValueAt(i);
472         if (e->IsCode()) {
473           RecordWriteStub::Patch(Code::cast(e), mode);
474         }
475       }
476     }
477   }
478 }
479 
480 
Start(const char * reason)481 void IncrementalMarking::Start(const char* reason) {
482   if (FLAG_trace_incremental_marking) {
483     PrintF("[IncrementalMarking] Start (%s)\n",
484            (reason == nullptr) ? "unknown reason" : reason);
485   }
486   DCHECK(FLAG_incremental_marking);
487   DCHECK(state_ == STOPPED);
488   DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
489   DCHECK(!heap_->isolate()->serializer_enabled());
490 
491   HistogramTimerScope incremental_marking_scope(
492       heap_->isolate()->counters()->gc_incremental_marking_start());
493   TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
494   ResetStepCounters();
495 
496   was_activated_ = true;
497 
498   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
499     StartMarking();
500   } else {
501     if (FLAG_trace_incremental_marking) {
502       PrintF("[IncrementalMarking] Start sweeping.\n");
503     }
504     state_ = SWEEPING;
505   }
506 
507   heap_->new_space()->AddAllocationObserver(&observer_);
508 
509   incremental_marking_job()->Start(heap_);
510 }
511 
512 
StartMarking()513 void IncrementalMarking::StartMarking() {
514   if (heap_->isolate()->serializer_enabled()) {
515     // Black allocation currently starts when we start incremental marking,
516     // but we cannot enable black allocation while deserializing. Hence, we
517     // have to delay the start of incremental marking in that case.
518     if (FLAG_trace_incremental_marking) {
519       PrintF("[IncrementalMarking] Start delayed - serializer\n");
520     }
521     return;
522   }
523   if (FLAG_trace_incremental_marking) {
524     PrintF("[IncrementalMarking] Start marking\n");
525   }
526 
527   is_compacting_ = !FLAG_never_compact &&
528                    heap_->mark_compact_collector()->StartCompaction(
529                        MarkCompactCollector::INCREMENTAL_COMPACTION);
530 
531   state_ = MARKING;
532 
533   if (heap_->UsingEmbedderHeapTracer()) {
534     heap_->mark_compact_collector()->embedder_heap_tracer()->TracePrologue();
535   }
536 
537   RecordWriteStub::Mode mode = is_compacting_
538                                    ? RecordWriteStub::INCREMENTAL_COMPACTION
539                                    : RecordWriteStub::INCREMENTAL;
540 
541   PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
542 
543   heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize(
544       MarkCompactCollector::kMaxMarkingDequeSize);
545 
546   ActivateIncrementalWriteBarrier();
547 
548 // Marking bits are cleared by the sweeper.
549 #ifdef VERIFY_HEAP
550   if (FLAG_verify_heap) {
551     heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
552   }
553 #endif
554 
555   heap_->CompletelyClearInstanceofCache();
556   heap_->isolate()->compilation_cache()->MarkCompactPrologue();
557 
558   // Mark strong roots grey.
559   IncrementalMarkingRootMarkingVisitor visitor(this);
560   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
561 
562   // Ready to start incremental marking.
563   if (FLAG_trace_incremental_marking) {
564     PrintF("[IncrementalMarking] Running\n");
565   }
566 }
567 
StartBlackAllocation()568 void IncrementalMarking::StartBlackAllocation() {
569   DCHECK(FLAG_black_allocation);
570   DCHECK(IsMarking());
571   black_allocation_ = true;
572   OldSpace* old_space = heap()->old_space();
573   old_space->EmptyAllocationInfo();
574   old_space->free_list()->Reset();
575   if (FLAG_trace_incremental_marking) {
576     PrintF("[IncrementalMarking] Black allocation started\n");
577   }
578 }
579 
FinishBlackAllocation()580 void IncrementalMarking::FinishBlackAllocation() {
581   if (black_allocation_) {
582     black_allocation_ = false;
583     if (FLAG_trace_incremental_marking) {
584       PrintF("[IncrementalMarking] Black allocation finished\n");
585     }
586   }
587 }
588 
MarkRoots()589 void IncrementalMarking::MarkRoots() {
590   DCHECK(!finalize_marking_completed_);
591   DCHECK(IsMarking());
592 
593   IncrementalMarkingRootMarkingVisitor visitor(this);
594   heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
595 }
596 
597 
MarkObjectGroups()598 void IncrementalMarking::MarkObjectGroups() {
599   DCHECK(!heap_->UsingEmbedderHeapTracer());
600   DCHECK(!finalize_marking_completed_);
601   DCHECK(IsMarking());
602 
603   IncrementalMarkingRootMarkingVisitor visitor(this);
604   heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkObject);
605   heap_->isolate()->global_handles()->IterateObjectGroups(
606       &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
607   heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
608   heap_->isolate()->global_handles()->RemoveObjectGroups();
609 }
610 
611 
ProcessWeakCells()612 void IncrementalMarking::ProcessWeakCells() {
613   DCHECK(!finalize_marking_completed_);
614   DCHECK(IsMarking());
615 
616   Object* the_hole_value = heap()->the_hole_value();
617   Object* weak_cell_obj = heap()->encountered_weak_cells();
618   Object* weak_cell_head = Smi::FromInt(0);
619   WeakCell* prev_weak_cell_obj = NULL;
620   while (weak_cell_obj != Smi::FromInt(0)) {
621     WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
622     // We do not insert cleared weak cells into the list, so the value
623     // cannot be a Smi here.
624     HeapObject* value = HeapObject::cast(weak_cell->value());
625     // Remove weak cells with live objects from the list, they do not need
626     // clearing.
627     if (MarkCompactCollector::IsMarked(value)) {
628       // Record slot, if value is pointing to an evacuation candidate.
629       Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
630       heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
631       // Remove entry somewhere after top.
632       if (prev_weak_cell_obj != NULL) {
633         prev_weak_cell_obj->set_next(weak_cell->next());
634       }
635       weak_cell_obj = weak_cell->next();
636       weak_cell->clear_next(the_hole_value);
637     } else {
638       if (weak_cell_head == Smi::FromInt(0)) {
639         weak_cell_head = weak_cell;
640       }
641       prev_weak_cell_obj = weak_cell;
642       weak_cell_obj = weak_cell->next();
643     }
644   }
645   // Top may have changed.
646   heap()->set_encountered_weak_cells(weak_cell_head);
647 }
648 
649 
ShouldRetainMap(Map * map,int age)650 bool ShouldRetainMap(Map* map, int age) {
651   if (age == 0) {
652     // The map has aged. Do not retain this map.
653     return false;
654   }
655   Object* constructor = map->GetConstructor();
656   if (!constructor->IsHeapObject() ||
657       Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(constructor)))) {
658     // The constructor is dead, no new objects with this map can
659     // be created. Do not retain this map.
660     return false;
661   }
662   return true;
663 }
664 
665 
RetainMaps()666 void IncrementalMarking::RetainMaps() {
667   // Do not retain dead maps if flag disables it or there is
668   // - memory pressure (reduce_memory_footprint_),
669   // - GC is requested by tests or dev-tools (abort_incremental_marking_).
670   bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
671                                    heap()->ShouldAbortIncrementalMarking() ||
672                                    FLAG_retain_maps_for_n_gc == 0;
673   ArrayList* retained_maps = heap()->retained_maps();
674   int length = retained_maps->Length();
675   // The number_of_disposed_maps separates maps in the retained_maps
676   // array that were created before and after context disposal.
677   // We do not age and retain disposed maps to avoid memory leaks.
678   int number_of_disposed_maps = heap()->number_of_disposed_maps_;
679   for (int i = 0; i < length; i += 2) {
680     DCHECK(retained_maps->Get(i)->IsWeakCell());
681     WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
682     if (cell->cleared()) continue;
683     int age = Smi::cast(retained_maps->Get(i + 1))->value();
684     int new_age;
685     Map* map = Map::cast(cell->value());
686     MarkBit map_mark = Marking::MarkBitFrom(map);
687     if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
688         Marking::IsWhite(map_mark)) {
689       if (ShouldRetainMap(map, age)) {
690         MarkObject(heap(), map);
691       }
692       Object* prototype = map->prototype();
693       if (age > 0 && prototype->IsHeapObject() &&
694           Marking::IsWhite(Marking::MarkBitFrom(HeapObject::cast(prototype)))) {
695         // The prototype is not marked, age the map.
696         new_age = age - 1;
697       } else {
698         // The prototype and the constructor are marked, this map keeps only
699         // transition tree alive, not JSObjects. Do not age the map.
700         new_age = age;
701       }
702     } else {
703       new_age = FLAG_retain_maps_for_n_gc;
704     }
705     // Compact the array and update the age.
706     if (new_age != age) {
707       retained_maps->Set(i + 1, Smi::FromInt(new_age));
708     }
709   }
710 }
711 
712 
FinalizeIncrementally()713 void IncrementalMarking::FinalizeIncrementally() {
714   DCHECK(!finalize_marking_completed_);
715   DCHECK(IsMarking());
716 
717   double start = heap_->MonotonicallyIncreasingTimeInMs();
718 
719   int old_marking_deque_top =
720       heap_->mark_compact_collector()->marking_deque()->top();
721 
722   // After finishing incremental marking, we try to discover all unmarked
723   // objects to reduce the marking load in the final pause.
724   // 1) We scan and mark the roots again to find all changes to the root set.
725   // 2) We mark the object groups.
726   // 3) Age and retain maps embedded in optimized code.
727   // 4) Remove weak cell with live values from the list of weak cells, they
728   // do not need processing during GC.
729   MarkRoots();
730   if (!heap_->UsingEmbedderHeapTracer()) {
731     MarkObjectGroups();
732   }
733   if (incremental_marking_finalization_rounds_ == 0) {
734     // Map retaining is needed for perfromance, not correctness,
735     // so we can do it only once at the beginning of the finalization.
736     RetainMaps();
737   }
738   ProcessWeakCells();
739 
740   int marking_progress =
741       abs(old_marking_deque_top -
742           heap_->mark_compact_collector()->marking_deque()->top());
743 
744   double end = heap_->MonotonicallyIncreasingTimeInMs();
745   double delta = end - start;
746   heap_->tracer()->AddMarkingTime(delta);
747   heap_->tracer()->AddIncrementalMarkingFinalizationStep(delta);
748   if (FLAG_trace_incremental_marking) {
749     PrintF(
750         "[IncrementalMarking] Finalize incrementally round %d, "
751         "spent %d ms, marking progress %d.\n",
752         static_cast<int>(delta), incremental_marking_finalization_rounds_,
753         marking_progress);
754   }
755 
756   ++incremental_marking_finalization_rounds_;
757   if ((incremental_marking_finalization_rounds_ >=
758        FLAG_max_incremental_marking_finalization_rounds) ||
759       (marking_progress <
760        FLAG_min_progress_during_incremental_marking_finalization)) {
761     finalize_marking_completed_ = true;
762   }
763 
764   if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
765       !black_allocation_) {
766     // TODO(hpayer): Move to an earlier point as soon as we make faster marking
767     // progress.
768     StartBlackAllocation();
769   }
770 }
771 
772 
UpdateMarkingDequeAfterScavenge()773 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
774   if (!IsMarking()) return;
775 
776   MarkingDeque* marking_deque =
777       heap_->mark_compact_collector()->marking_deque();
778   int current = marking_deque->bottom();
779   int mask = marking_deque->mask();
780   int limit = marking_deque->top();
781   HeapObject** array = marking_deque->array();
782   int new_top = current;
783 
784   Map* filler_map = heap_->one_pointer_filler_map();
785 
786   while (current != limit) {
787     HeapObject* obj = array[current];
788     DCHECK(obj->IsHeapObject());
789     current = ((current + 1) & mask);
790     // Only pointers to from space have to be updated.
791     if (heap_->InFromSpace(obj)) {
792       MapWord map_word = obj->map_word();
793       // There may be objects on the marking deque that do not exist anymore,
794       // e.g. left trimmed objects or objects from the root set (frames).
795       // If these object are dead at scavenging time, their marking deque
796       // entries will not point to forwarding addresses. Hence, we can discard
797       // them.
798       if (map_word.IsForwardingAddress()) {
799         HeapObject* dest = map_word.ToForwardingAddress();
800         if (Page::FromAddress(dest->address())->IsFlagSet(Page::BLACK_PAGE))
801           continue;
802         array[new_top] = dest;
803         new_top = ((new_top + 1) & mask);
804         DCHECK(new_top != marking_deque->bottom());
805 #ifdef DEBUG
806         MarkBit mark_bit = Marking::MarkBitFrom(obj);
807         DCHECK(Marking::IsGrey(mark_bit) ||
808                (obj->IsFiller() && Marking::IsWhite(mark_bit)));
809 #endif
810       }
811     } else if (obj->map() != filler_map) {
812       // Skip one word filler objects that appear on the
813       // stack when we perform in place array shift.
814       array[new_top] = obj;
815       new_top = ((new_top + 1) & mask);
816       DCHECK(new_top != marking_deque->bottom());
817 #ifdef DEBUG
818       MarkBit mark_bit = Marking::MarkBitFrom(obj);
819       MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
820       DCHECK(Marking::IsGrey(mark_bit) ||
821              (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
822              (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
823               Marking::IsBlack(mark_bit)));
824 #endif
825     }
826   }
827   marking_deque->set_top(new_top);
828 }
829 
830 
VisitObject(Map * map,HeapObject * obj,int size)831 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
832   MarkObject(heap_, map);
833 
834   IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
835 
836   MarkBit mark_bit = Marking::MarkBitFrom(obj);
837 #if ENABLE_SLOW_DCHECKS
838   MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
839   SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
840               (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
841               (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
842                Marking::IsBlack(mark_bit)));
843 #endif
844   MarkBlackOrKeepBlack(obj, mark_bit, size);
845 }
846 
847 
MarkObject(Heap * heap,HeapObject * obj)848 void IncrementalMarking::MarkObject(Heap* heap, HeapObject* obj) {
849   MarkBit mark_bit = Marking::MarkBitFrom(obj);
850   if (Marking::IsWhite(mark_bit)) {
851     heap->incremental_marking()->WhiteToGreyAndPush(obj, mark_bit);
852   }
853 }
854 
855 
ProcessMarkingDeque(intptr_t bytes_to_process)856 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
857   intptr_t bytes_processed = 0;
858   Map* one_pointer_filler_map = heap_->one_pointer_filler_map();
859   Map* two_pointer_filler_map = heap_->two_pointer_filler_map();
860   MarkingDeque* marking_deque =
861       heap_->mark_compact_collector()->marking_deque();
862   while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
863     HeapObject* obj = marking_deque->Pop();
864 
865     // Explicitly skip one and two word fillers. Incremental markbit patterns
866     // are correct only for objects that occupy at least two words.
867     // Moreover, slots filtering for left-trimmed arrays works only when
868     // the distance between the old array start and the new array start
869     // is greater than two if both starts are marked.
870     Map* map = obj->map();
871     if (map == one_pointer_filler_map || map == two_pointer_filler_map)
872       continue;
873 
874     int size = obj->SizeFromMap(map);
875     unscanned_bytes_of_large_object_ = 0;
876     VisitObject(map, obj, size);
877     bytes_processed += size - unscanned_bytes_of_large_object_;
878   }
879   return bytes_processed;
880 }
881 
882 
ProcessMarkingDeque()883 void IncrementalMarking::ProcessMarkingDeque() {
884   Map* filler_map = heap_->one_pointer_filler_map();
885   MarkingDeque* marking_deque =
886       heap_->mark_compact_collector()->marking_deque();
887   while (!marking_deque->IsEmpty()) {
888     HeapObject* obj = marking_deque->Pop();
889 
890     // Explicitly skip one word fillers. Incremental markbit patterns are
891     // correct only for objects that occupy at least two words.
892     Map* map = obj->map();
893     if (map == filler_map) continue;
894 
895     VisitObject(map, obj, obj->SizeFromMap(map));
896   }
897 }
898 
899 
Hurry()900 void IncrementalMarking::Hurry() {
901   // A scavenge may have pushed new objects on the marking deque (due to black
902   // allocation) even in COMPLETE state. This may happen if scavenges are
903   // forced e.g. in tests. It should not happen when COMPLETE was set when
904   // incremental marking finished and a regular GC was triggered after that
905   // because should_hurry_ will force a full GC.
906   if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
907     double start = 0.0;
908     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
909       start = heap_->MonotonicallyIncreasingTimeInMs();
910       if (FLAG_trace_incremental_marking) {
911         PrintF("[IncrementalMarking] Hurry\n");
912       }
913     }
914     // TODO(gc) hurry can mark objects it encounters black as mutator
915     // was stopped.
916     ProcessMarkingDeque();
917     state_ = COMPLETE;
918     if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
919       double end = heap_->MonotonicallyIncreasingTimeInMs();
920       double delta = end - start;
921       heap_->tracer()->AddMarkingTime(delta);
922       if (FLAG_trace_incremental_marking) {
923         PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
924                static_cast<int>(delta));
925       }
926     }
927   }
928 
929   Object* context = heap_->native_contexts_list();
930   while (!context->IsUndefined(heap_->isolate())) {
931     // GC can happen when the context is not fully initialized,
932     // so the cache can be undefined.
933     HeapObject* cache = HeapObject::cast(
934         Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
935     if (!cache->IsUndefined(heap_->isolate())) {
936       MarkBit mark_bit = Marking::MarkBitFrom(cache);
937       if (Marking::IsGrey(mark_bit)) {
938         Marking::GreyToBlack(mark_bit);
939         MemoryChunk::IncrementLiveBytesFromGC(cache, cache->Size());
940       }
941     }
942     context = Context::cast(context)->next_context_link();
943   }
944 }
945 
946 
Stop()947 void IncrementalMarking::Stop() {
948   if (IsStopped()) return;
949   if (FLAG_trace_incremental_marking) {
950     PrintF("[IncrementalMarking] Stopping.\n");
951   }
952 
953   heap_->new_space()->RemoveAllocationObserver(&observer_);
954   IncrementalMarking::set_should_hurry(false);
955   ResetStepCounters();
956   if (IsMarking()) {
957     PatchIncrementalMarkingRecordWriteStubs(heap_,
958                                             RecordWriteStub::STORE_BUFFER_ONLY);
959     DeactivateIncrementalWriteBarrier();
960   }
961   heap_->isolate()->stack_guard()->ClearGC();
962   state_ = STOPPED;
963   is_compacting_ = false;
964   FinishBlackAllocation();
965 }
966 
967 
Finalize()968 void IncrementalMarking::Finalize() {
969   Hurry();
970   Stop();
971 }
972 
973 
FinalizeMarking(CompletionAction action)974 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
975   DCHECK(!finalize_marking_completed_);
976   if (FLAG_trace_incremental_marking) {
977     PrintF(
978         "[IncrementalMarking] requesting finalization of incremental "
979         "marking.\n");
980   }
981   request_type_ = FINALIZATION;
982   if (action == GC_VIA_STACK_GUARD) {
983     heap_->isolate()->stack_guard()->RequestGC();
984   }
985 }
986 
987 
MarkingComplete(CompletionAction action)988 void IncrementalMarking::MarkingComplete(CompletionAction action) {
989   state_ = COMPLETE;
990   // We will set the stack guard to request a GC now.  This will mean the rest
991   // of the GC gets performed as soon as possible (we can't do a GC here in a
992   // record-write context).  If a few things get allocated between now and then
993   // that shouldn't make us do a scavenge and keep being incremental, so we set
994   // the should-hurry flag to indicate that there can't be much work left to do.
995   set_should_hurry(true);
996   if (FLAG_trace_incremental_marking) {
997     PrintF("[IncrementalMarking] Complete (normal).\n");
998   }
999   request_type_ = COMPLETE_MARKING;
1000   if (action == GC_VIA_STACK_GUARD) {
1001     heap_->isolate()->stack_guard()->RequestGC();
1002   }
1003 }
1004 
1005 
Epilogue()1006 void IncrementalMarking::Epilogue() {
1007   was_activated_ = false;
1008   finalize_marking_completed_ = false;
1009   incremental_marking_finalization_rounds_ = 0;
1010 }
1011 
AdvanceIncrementalMarking(double deadline_in_ms,IncrementalMarking::StepActions step_actions)1012 double IncrementalMarking::AdvanceIncrementalMarking(
1013     double deadline_in_ms, IncrementalMarking::StepActions step_actions) {
1014   DCHECK(!IsStopped());
1015 
1016   intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
1017       GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs,
1018       heap()
1019           ->tracer()
1020           ->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
1021   double remaining_time_in_ms = 0.0;
1022   intptr_t bytes_processed = 0;
1023 
1024   do {
1025     bytes_processed =
1026         Step(step_size_in_bytes, step_actions.completion_action,
1027              step_actions.force_marking, step_actions.force_completion);
1028     remaining_time_in_ms =
1029         deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
1030   } while (bytes_processed > 0 &&
1031            remaining_time_in_ms >=
1032                2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
1033            !IsComplete() &&
1034            !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
1035   return remaining_time_in_ms;
1036 }
1037 
1038 
OldSpaceStep(intptr_t allocated)1039 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
1040   if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
1041     heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
1042                                     "old space step");
1043   } else {
1044     Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
1045   }
1046 }
1047 
1048 
SpeedUp()1049 void IncrementalMarking::SpeedUp() {
1050   bool speed_up = false;
1051 
1052   if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
1053     if (FLAG_trace_incremental_marking) {
1054       PrintIsolate(heap()->isolate(), "Speed up marking after %d steps\n",
1055                    static_cast<int>(kMarkingSpeedAccellerationInterval));
1056     }
1057     speed_up = true;
1058   }
1059 
1060   bool space_left_is_very_small =
1061       (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
1062 
1063   bool only_1_nth_of_space_that_was_available_still_left =
1064       (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
1065        old_generation_space_available_at_start_of_incremental_);
1066 
1067   if (space_left_is_very_small ||
1068       only_1_nth_of_space_that_was_available_still_left) {
1069     if (FLAG_trace_incremental_marking)
1070       PrintIsolate(heap()->isolate(),
1071                    "Speed up marking because of low space left\n");
1072     speed_up = true;
1073   }
1074 
1075   bool size_of_old_space_multiplied_by_n_during_marking =
1076       (heap_->PromotedTotalSize() >
1077        (marking_speed_ + 1) *
1078            old_generation_space_used_at_start_of_incremental_);
1079   if (size_of_old_space_multiplied_by_n_during_marking) {
1080     speed_up = true;
1081     if (FLAG_trace_incremental_marking) {
1082       PrintIsolate(heap()->isolate(),
1083                    "Speed up marking because of heap size increase\n");
1084     }
1085   }
1086 
1087   int64_t promoted_during_marking =
1088       heap_->PromotedTotalSize() -
1089       old_generation_space_used_at_start_of_incremental_;
1090   intptr_t delay = marking_speed_ * MB;
1091   intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
1092 
1093   // We try to scan at at least twice the speed that we are allocating.
1094   if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
1095     if (FLAG_trace_incremental_marking) {
1096       PrintIsolate(heap()->isolate(),
1097                    "Speed up marking because marker was not keeping up\n");
1098     }
1099     speed_up = true;
1100   }
1101 
1102   if (speed_up) {
1103     if (state_ != MARKING) {
1104       if (FLAG_trace_incremental_marking) {
1105         PrintIsolate(heap()->isolate(),
1106                      "Postponing speeding up marking until marking starts\n");
1107       }
1108     } else {
1109       marking_speed_ += kMarkingSpeedAccelleration;
1110       marking_speed_ = static_cast<int>(
1111           Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
1112       if (FLAG_trace_incremental_marking) {
1113         PrintIsolate(heap()->isolate(), "Marking speed increased to %d\n",
1114                      marking_speed_);
1115       }
1116     }
1117   }
1118 }
1119 
FinalizeSweeping()1120 void IncrementalMarking::FinalizeSweeping() {
1121   DCHECK(state_ == SWEEPING);
1122   if (heap_->mark_compact_collector()->sweeping_in_progress() &&
1123       (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() ||
1124        !FLAG_concurrent_sweeping)) {
1125     heap_->mark_compact_collector()->EnsureSweepingCompleted();
1126   }
1127   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1128     bytes_scanned_ = 0;
1129     StartMarking();
1130   }
1131 }
1132 
Step(intptr_t allocated_bytes,CompletionAction action,ForceMarkingAction marking,ForceCompletionAction completion)1133 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
1134                                   CompletionAction action,
1135                                   ForceMarkingAction marking,
1136                                   ForceCompletionAction completion) {
1137   DCHECK(allocated_bytes >= 0);
1138 
1139   if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
1140       (state_ != SWEEPING && state_ != MARKING)) {
1141     return 0;
1142   }
1143 
1144   allocated_ += allocated_bytes;
1145 
1146   if (marking == DO_NOT_FORCE_MARKING && allocated_ < kAllocatedThreshold &&
1147       write_barriers_invoked_since_last_step_ <
1148           kWriteBarriersInvokedThreshold) {
1149     return 0;
1150   }
1151 
1152   // If an idle notification happened recently, we delay marking steps.
1153   if (marking == DO_NOT_FORCE_MARKING &&
1154       heap_->RecentIdleNotificationHappened()) {
1155     return 0;
1156   }
1157 
1158   intptr_t bytes_processed = 0;
1159   {
1160     HistogramTimerScope incremental_marking_scope(
1161         heap_->isolate()->counters()->gc_incremental_marking());
1162     TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
1163     double start = heap_->MonotonicallyIncreasingTimeInMs();
1164 
1165     // The marking speed is driven either by the allocation rate or by the rate
1166     // at which we are having to check the color of objects in the write
1167     // barrier.
1168     // It is possible for a tight non-allocating loop to run a lot of write
1169     // barriers before we get here and check them (marking can only take place
1170     // on
1171     // allocation), so to reduce the lumpiness we don't use the write barriers
1172     // invoked since last step directly to determine the amount of work to do.
1173     intptr_t bytes_to_process =
1174         marking_speed_ *
1175         Max(allocated_, write_barriers_invoked_since_last_step_);
1176     allocated_ = 0;
1177     write_barriers_invoked_since_last_step_ = 0;
1178 
1179     bytes_scanned_ += bytes_to_process;
1180 
1181     // TODO(hpayer): Do not account for sweeping finalization while marking.
1182     if (state_ == SWEEPING) {
1183       FinalizeSweeping();
1184     }
1185 
1186     if (state_ == MARKING) {
1187       bytes_processed = ProcessMarkingDeque(bytes_to_process);
1188       if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
1189         if (completion == FORCE_COMPLETION ||
1190             IsIdleMarkingDelayCounterLimitReached()) {
1191           if (!finalize_marking_completed_) {
1192             FinalizeMarking(action);
1193           } else {
1194             MarkingComplete(action);
1195           }
1196         } else {
1197           IncrementIdleMarkingDelayCounter();
1198         }
1199       }
1200     }
1201 
1202     steps_count_++;
1203 
1204     // Speed up marking if we are marking too slow or if we are almost done
1205     // with marking.
1206     SpeedUp();
1207 
1208     double end = heap_->MonotonicallyIncreasingTimeInMs();
1209     double duration = (end - start);
1210     // Note that we report zero bytes here when sweeping was in progress or
1211     // when we just started incremental marking. In these cases we did not
1212     // process the marking deque.
1213     heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1214   }
1215   return bytes_processed;
1216 }
1217 
1218 
ResetStepCounters()1219 void IncrementalMarking::ResetStepCounters() {
1220   steps_count_ = 0;
1221   old_generation_space_available_at_start_of_incremental_ =
1222       SpaceLeftInOldSpace();
1223   old_generation_space_used_at_start_of_incremental_ =
1224       heap_->PromotedTotalSize();
1225   bytes_rescanned_ = 0;
1226   marking_speed_ = kInitialMarkingSpeed;
1227   bytes_scanned_ = 0;
1228   write_barriers_invoked_since_last_step_ = 0;
1229 }
1230 
1231 
SpaceLeftInOldSpace()1232 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1233   return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1234 }
1235 
1236 
IsIdleMarkingDelayCounterLimitReached()1237 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1238   return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1239 }
1240 
1241 
IncrementIdleMarkingDelayCounter()1242 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1243   idle_marking_delay_counter_++;
1244 }
1245 
1246 
ClearIdleMarkingDelayCounter()1247 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1248   idle_marking_delay_counter_ = 0;
1249 }
1250 }  // namespace internal
1251 }  // namespace v8
1252