1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/incremental-marking.h"
6
7 #include "src/code-stubs.h"
8 #include "src/compilation-cache.h"
9 #include "src/conversions.h"
10 #include "src/heap/gc-idle-time-handler.h"
11 #include "src/heap/gc-tracer.h"
12 #include "src/heap/heap-inl.h"
13 #include "src/heap/mark-compact-inl.h"
14 #include "src/heap/object-stats.h"
15 #include "src/heap/objects-visiting-inl.h"
16 #include "src/heap/objects-visiting.h"
17 #include "src/tracing/trace-event.h"
18 #include "src/v8.h"
19
20 namespace v8 {
21 namespace internal {
22
IncrementalMarking(Heap * heap)23 IncrementalMarking::IncrementalMarking(Heap* heap)
24 : heap_(heap),
25 state_(STOPPED),
26 initial_old_generation_size_(0),
27 bytes_marked_ahead_of_schedule_(0),
28 unscanned_bytes_of_large_object_(0),
29 idle_marking_delay_counter_(0),
30 incremental_marking_finalization_rounds_(0),
31 is_compacting_(false),
32 should_hurry_(false),
33 was_activated_(false),
34 black_allocation_(false),
35 finalize_marking_completed_(false),
36 trace_wrappers_toggle_(false),
37 request_type_(NONE),
38 new_generation_observer_(*this, kAllocatedThreshold),
39 old_generation_observer_(*this, kAllocatedThreshold) {}
40
BaseRecordWrite(HeapObject * obj,Object * value)41 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
42 HeapObject* value_heap_obj = HeapObject::cast(value);
43 DCHECK(!ObjectMarking::IsImpossible(value_heap_obj));
44 DCHECK(!ObjectMarking::IsImpossible(obj));
45 const bool is_black = ObjectMarking::IsBlack(obj);
46
47 if (is_black && ObjectMarking::IsWhite(value_heap_obj)) {
48 WhiteToGreyAndPush(value_heap_obj);
49 RestartIfNotMarking();
50 }
51 return is_compacting_ && is_black;
52 }
53
54
RecordWriteSlow(HeapObject * obj,Object ** slot,Object * value)55 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
56 Object* value) {
57 if (BaseRecordWrite(obj, value) && slot != NULL) {
58 // Object is not going to be rescanned we need to record the slot.
59 heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
60 }
61 }
62
63
RecordWriteFromCode(HeapObject * obj,Object ** slot,Isolate * isolate)64 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
65 Isolate* isolate) {
66 DCHECK(obj->IsHeapObject());
67 isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
68 }
69
70 // static
RecordWriteOfCodeEntryFromCode(JSFunction * host,Object ** slot,Isolate * isolate)71 void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
72 Object** slot,
73 Isolate* isolate) {
74 DCHECK(host->IsJSFunction());
75 IncrementalMarking* marking = isolate->heap()->incremental_marking();
76 Code* value = Code::cast(
77 Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
78 marking->RecordWriteOfCodeEntry(host, slot, value);
79 }
80
RecordCodeTargetPatch(Code * host,Address pc,HeapObject * value)81 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
82 HeapObject* value) {
83 if (IsMarking()) {
84 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
85 RecordWriteIntoCode(host, &rinfo, value);
86 }
87 }
88
89
RecordCodeTargetPatch(Address pc,HeapObject * value)90 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
91 if (IsMarking()) {
92 Code* host = heap_->isolate()
93 ->inner_pointer_to_code_cache()
94 ->GcSafeFindCodeForInnerPointer(pc);
95 RelocInfo rinfo(heap_->isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
96 RecordWriteIntoCode(host, &rinfo, value);
97 }
98 }
99
100
RecordWriteOfCodeEntrySlow(JSFunction * host,Object ** slot,Code * value)101 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
102 Object** slot,
103 Code* value) {
104 if (BaseRecordWrite(host, value)) {
105 DCHECK(slot != NULL);
106 heap_->mark_compact_collector()->RecordCodeEntrySlot(
107 host, reinterpret_cast<Address>(slot), value);
108 }
109 }
110
RecordWriteIntoCodeSlow(Code * host,RelocInfo * rinfo,Object * value)111 void IncrementalMarking::RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo,
112 Object* value) {
113 if (BaseRecordWrite(host, value)) {
114 // Object is not going to be rescanned. We need to record the slot.
115 heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
116 }
117 }
118
WhiteToGreyAndPush(HeapObject * obj)119 void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
120 ObjectMarking::WhiteToGrey(obj);
121 heap_->mark_compact_collector()->marking_deque()->Push(obj);
122 }
123
124
MarkObjectGreyDoNotEnqueue(Object * obj)125 static void MarkObjectGreyDoNotEnqueue(Object* obj) {
126 if (obj->IsHeapObject()) {
127 HeapObject* heap_obj = HeapObject::cast(obj);
128 ObjectMarking::AnyToGrey(heap_obj);
129 }
130 }
131
TransferMark(Heap * heap,HeapObject * from,HeapObject * to)132 void IncrementalMarking::TransferMark(Heap* heap, HeapObject* from,
133 HeapObject* to) {
134 DCHECK(MemoryChunk::FromAddress(from->address())->SweepingDone());
135 // This is only used when resizing an object.
136 DCHECK(MemoryChunk::FromAddress(from->address()) ==
137 MemoryChunk::FromAddress(to->address()));
138
139 if (!heap->incremental_marking()->IsMarking()) return;
140
141 // If the mark doesn't move, we don't check the color of the object.
142 // It doesn't matter whether the object is black, since it hasn't changed
143 // size, so the adjustment to the live data count will be zero anyway.
144 if (from == to) return;
145
146 MarkBit new_mark_bit = ObjectMarking::MarkBitFrom(to);
147 MarkBit old_mark_bit = ObjectMarking::MarkBitFrom(from);
148
149 #ifdef DEBUG
150 Marking::ObjectColor old_color = Marking::Color(old_mark_bit);
151 #endif
152
153 if (Marking::IsBlack(old_mark_bit)) {
154 Marking::BlackToWhite(old_mark_bit);
155 Marking::WhiteToBlack(new_mark_bit);
156 return;
157 } else if (Marking::IsGrey(old_mark_bit)) {
158 Marking::GreyToWhite(old_mark_bit);
159 Marking::WhiteToGrey(new_mark_bit);
160 heap->mark_compact_collector()->marking_deque()->Push(to);
161 heap->incremental_marking()->RestartIfNotMarking();
162 }
163
164 #ifdef DEBUG
165 Marking::ObjectColor new_color = Marking::Color(new_mark_bit);
166 DCHECK(new_color == old_color);
167 #endif
168 }
169
170 class IncrementalMarkingMarkingVisitor
171 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
172 public:
Initialize()173 static void Initialize() {
174 StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
175 table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
176 table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
177 }
178
179 static const int kProgressBarScanningChunk = 32 * 1024;
180
VisitFixedArrayIncremental(Map * map,HeapObject * object)181 static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
182 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
183 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
184 DCHECK(!FLAG_use_marking_progress_bar ||
185 chunk->owner()->identity() == LO_SPACE);
186 Heap* heap = map->GetHeap();
187 // When using a progress bar for large fixed arrays, scan only a chunk of
188 // the array and try to push it onto the marking deque again until it is
189 // fully scanned. Fall back to scanning it through to the end in case this
190 // fails because of a full deque.
191 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
192 int start_offset =
193 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
194 int end_offset =
195 Min(object_size, start_offset + kProgressBarScanningChunk);
196 int already_scanned_offset = start_offset;
197 bool scan_until_end = false;
198 do {
199 VisitPointers(heap, object, HeapObject::RawField(object, start_offset),
200 HeapObject::RawField(object, end_offset));
201 start_offset = end_offset;
202 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
203 scan_until_end =
204 heap->mark_compact_collector()->marking_deque()->IsFull();
205 } while (scan_until_end && start_offset < object_size);
206 chunk->set_progress_bar(start_offset);
207 if (start_offset < object_size) {
208 if (ObjectMarking::IsGrey(object)) {
209 heap->mark_compact_collector()->marking_deque()->Unshift(object);
210 } else {
211 DCHECK(ObjectMarking::IsBlack(object));
212 heap->mark_compact_collector()->UnshiftBlack(object);
213 }
214 heap->incremental_marking()->NotifyIncompleteScanOfObject(
215 object_size - (start_offset - already_scanned_offset));
216 }
217 } else {
218 FixedArrayVisitor::Visit(map, object);
219 }
220 }
221
VisitNativeContextIncremental(Map * map,HeapObject * object)222 static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
223 Context* context = Context::cast(object);
224
225 // We will mark cache black with a separate pass when we finish marking.
226 // Note that GC can happen when the context is not fully initialized,
227 // so the cache can be undefined.
228 Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
229 if (!cache->IsUndefined(map->GetIsolate())) {
230 MarkObjectGreyDoNotEnqueue(cache);
231 }
232 VisitNativeContext(map, context);
233 }
234
INLINE(static void VisitPointer (Heap * heap,HeapObject * object,Object ** p))235 INLINE(static void VisitPointer(Heap* heap, HeapObject* object, Object** p)) {
236 Object* target = *p;
237 if (target->IsHeapObject()) {
238 heap->mark_compact_collector()->RecordSlot(object, p, target);
239 MarkObject(heap, target);
240 }
241 }
242
INLINE(static void VisitPointers (Heap * heap,HeapObject * object,Object ** start,Object ** end))243 INLINE(static void VisitPointers(Heap* heap, HeapObject* object,
244 Object** start, Object** end)) {
245 for (Object** p = start; p < end; p++) {
246 Object* target = *p;
247 if (target->IsHeapObject()) {
248 heap->mark_compact_collector()->RecordSlot(object, p, target);
249 MarkObject(heap, target);
250 }
251 }
252 }
253
254 // Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject (Heap * heap,Object * obj))255 INLINE(static void MarkObject(Heap* heap, Object* obj)) {
256 IncrementalMarking::MarkGrey(heap, HeapObject::cast(obj));
257 }
258
259 // Marks the object black without pushing it on the marking stack.
260 // Returns true if object needed marking and false otherwise.
INLINE(static bool MarkObjectWithoutPush (Heap * heap,Object * obj))261 INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
262 HeapObject* heap_object = HeapObject::cast(obj);
263 if (ObjectMarking::IsWhite(heap_object)) {
264 ObjectMarking::WhiteToBlack(heap_object);
265 return true;
266 }
267 return false;
268 }
269 };
270
IterateBlackObject(HeapObject * object)271 void IncrementalMarking::IterateBlackObject(HeapObject* object) {
272 if (IsMarking() && ObjectMarking::IsBlack(object)) {
273 Page* page = Page::FromAddress(object->address());
274 if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
275 // IterateBlackObject requires us to visit the whole object.
276 page->ResetProgressBar();
277 }
278 Map* map = object->map();
279 MarkGrey(heap_, map);
280 IncrementalMarkingMarkingVisitor::IterateBody(map, object);
281 }
282 }
283
284 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
285 public:
IncrementalMarkingRootMarkingVisitor(IncrementalMarking * incremental_marking)286 explicit IncrementalMarkingRootMarkingVisitor(
287 IncrementalMarking* incremental_marking)
288 : heap_(incremental_marking->heap()) {}
289
VisitPointer(Object ** p)290 void VisitPointer(Object** p) override { MarkObjectByPointer(p); }
291
VisitPointers(Object ** start,Object ** end)292 void VisitPointers(Object** start, Object** end) override {
293 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
294 }
295
296 private:
MarkObjectByPointer(Object ** p)297 void MarkObjectByPointer(Object** p) {
298 Object* obj = *p;
299 if (!obj->IsHeapObject()) return;
300
301 IncrementalMarking::MarkGrey(heap_, HeapObject::cast(obj));
302 }
303
304 Heap* heap_;
305 };
306
307
Initialize()308 void IncrementalMarking::Initialize() {
309 IncrementalMarkingMarkingVisitor::Initialize();
310 }
311
312
SetOldSpacePageFlags(MemoryChunk * chunk,bool is_marking,bool is_compacting)313 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
314 bool is_marking,
315 bool is_compacting) {
316 if (is_marking) {
317 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
318 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
319 } else {
320 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
321 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
322 }
323 }
324
325
SetNewSpacePageFlags(MemoryChunk * chunk,bool is_marking)326 void IncrementalMarking::SetNewSpacePageFlags(MemoryChunk* chunk,
327 bool is_marking) {
328 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
329 if (is_marking) {
330 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
331 } else {
332 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
333 }
334 }
335
336
DeactivateIncrementalWriteBarrierForSpace(PagedSpace * space)337 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
338 PagedSpace* space) {
339 for (Page* p : *space) {
340 SetOldSpacePageFlags(p, false, false);
341 }
342 }
343
344
DeactivateIncrementalWriteBarrierForSpace(NewSpace * space)345 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
346 NewSpace* space) {
347 for (Page* p : *space) {
348 SetNewSpacePageFlags(p, false);
349 }
350 }
351
352
DeactivateIncrementalWriteBarrier()353 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
354 DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
355 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
356 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
357 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
358
359 for (LargePage* lop : *heap_->lo_space()) {
360 SetOldSpacePageFlags(lop, false, false);
361 }
362 }
363
364
ActivateIncrementalWriteBarrier(PagedSpace * space)365 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
366 for (Page* p : *space) {
367 SetOldSpacePageFlags(p, true, is_compacting_);
368 }
369 }
370
371
ActivateIncrementalWriteBarrier(NewSpace * space)372 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
373 for (Page* p : *space) {
374 SetNewSpacePageFlags(p, true);
375 }
376 }
377
378
ActivateIncrementalWriteBarrier()379 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
380 ActivateIncrementalWriteBarrier(heap_->old_space());
381 ActivateIncrementalWriteBarrier(heap_->map_space());
382 ActivateIncrementalWriteBarrier(heap_->code_space());
383 ActivateIncrementalWriteBarrier(heap_->new_space());
384
385 for (LargePage* lop : *heap_->lo_space()) {
386 SetOldSpacePageFlags(lop, true, is_compacting_);
387 }
388 }
389
390
WasActivated()391 bool IncrementalMarking::WasActivated() { return was_activated_; }
392
393
CanBeActivated()394 bool IncrementalMarking::CanBeActivated() {
395 // Only start incremental marking in a safe state: 1) when incremental
396 // marking is turned on, 2) when we are currently not in a GC, and
397 // 3) when we are currently not serializing or deserializing the heap.
398 return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
399 heap_->deserialization_complete() &&
400 !heap_->isolate()->serializer_enabled();
401 }
402
403
ActivateGeneratedStub(Code * stub)404 void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
405 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
406
407 if (!IsMarking()) {
408 // Initially stub is generated in STORE_BUFFER_ONLY mode thus
409 // we don't need to do anything if incremental marking is
410 // not active.
411 } else if (IsCompacting()) {
412 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
413 } else {
414 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
415 }
416 }
417
418
PatchIncrementalMarkingRecordWriteStubs(Heap * heap,RecordWriteStub::Mode mode)419 static void PatchIncrementalMarkingRecordWriteStubs(
420 Heap* heap, RecordWriteStub::Mode mode) {
421 UnseededNumberDictionary* stubs = heap->code_stubs();
422
423 int capacity = stubs->Capacity();
424 Isolate* isolate = heap->isolate();
425 for (int i = 0; i < capacity; i++) {
426 Object* k = stubs->KeyAt(i);
427 if (stubs->IsKey(isolate, k)) {
428 uint32_t key = NumberToUint32(k);
429
430 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
431 Object* e = stubs->ValueAt(i);
432 if (e->IsCode()) {
433 RecordWriteStub::Patch(Code::cast(e), mode);
434 }
435 }
436 }
437 }
438 }
439
Start(GarbageCollectionReason gc_reason)440 void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
441 if (FLAG_trace_incremental_marking) {
442 int old_generation_size_mb =
443 static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
444 int old_generation_limit_mb =
445 static_cast<int>(heap()->old_generation_allocation_limit() / MB);
446 heap()->isolate()->PrintWithTimestamp(
447 "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
448 "slack %dMB\n",
449 Heap::GarbageCollectionReasonToString(gc_reason),
450 old_generation_size_mb, old_generation_limit_mb,
451 Max(0, old_generation_limit_mb - old_generation_size_mb));
452 }
453 DCHECK(FLAG_incremental_marking);
454 DCHECK(state_ == STOPPED);
455 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
456 DCHECK(!heap_->isolate()->serializer_enabled());
457
458 Counters* counters = heap_->isolate()->counters();
459
460 counters->incremental_marking_reason()->AddSample(
461 static_cast<int>(gc_reason));
462 HistogramTimerScope incremental_marking_scope(
463 counters->gc_incremental_marking_start());
464 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
465 heap_->tracer()->NotifyIncrementalMarkingStart();
466
467 start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
468 initial_old_generation_size_ = heap_->PromotedSpaceSizeOfObjects();
469 old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
470 bytes_allocated_ = 0;
471 bytes_marked_ahead_of_schedule_ = 0;
472 should_hurry_ = false;
473 was_activated_ = true;
474
475 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
476 StartMarking();
477 } else {
478 if (FLAG_trace_incremental_marking) {
479 heap()->isolate()->PrintWithTimestamp(
480 "[IncrementalMarking] Start sweeping.\n");
481 }
482 state_ = SWEEPING;
483 }
484
485 SpaceIterator it(heap_);
486 while (it.has_next()) {
487 Space* space = it.next();
488 if (space == heap_->new_space()) {
489 space->AddAllocationObserver(&new_generation_observer_);
490 } else {
491 space->AddAllocationObserver(&old_generation_observer_);
492 }
493 }
494
495 incremental_marking_job()->Start(heap_);
496 }
497
498
StartMarking()499 void IncrementalMarking::StartMarking() {
500 if (heap_->isolate()->serializer_enabled()) {
501 // Black allocation currently starts when we start incremental marking,
502 // but we cannot enable black allocation while deserializing. Hence, we
503 // have to delay the start of incremental marking in that case.
504 if (FLAG_trace_incremental_marking) {
505 heap()->isolate()->PrintWithTimestamp(
506 "[IncrementalMarking] Start delayed - serializer\n");
507 }
508 return;
509 }
510 if (FLAG_trace_incremental_marking) {
511 heap()->isolate()->PrintWithTimestamp(
512 "[IncrementalMarking] Start marking\n");
513 }
514
515 is_compacting_ =
516 !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
517
518 state_ = MARKING;
519
520 {
521 TRACE_GC(heap()->tracer(),
522 GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
523 heap_->local_embedder_heap_tracer()->TracePrologue();
524 }
525
526 RecordWriteStub::Mode mode = is_compacting_
527 ? RecordWriteStub::INCREMENTAL_COMPACTION
528 : RecordWriteStub::INCREMENTAL;
529
530 PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
531
532 heap_->mark_compact_collector()->marking_deque()->StartUsing();
533
534 ActivateIncrementalWriteBarrier();
535
536 // Marking bits are cleared by the sweeper.
537 #ifdef VERIFY_HEAP
538 if (FLAG_verify_heap) {
539 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
540 }
541 #endif
542
543 heap_->CompletelyClearInstanceofCache();
544 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
545
546 // Mark strong roots grey.
547 IncrementalMarkingRootMarkingVisitor visitor(this);
548 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
549
550 // Ready to start incremental marking.
551 if (FLAG_trace_incremental_marking) {
552 heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
553 }
554 }
555
StartBlackAllocation()556 void IncrementalMarking::StartBlackAllocation() {
557 DCHECK(FLAG_black_allocation);
558 DCHECK(IsMarking());
559 black_allocation_ = true;
560 heap()->old_space()->MarkAllocationInfoBlack();
561 heap()->map_space()->MarkAllocationInfoBlack();
562 heap()->code_space()->MarkAllocationInfoBlack();
563 if (FLAG_trace_incremental_marking) {
564 heap()->isolate()->PrintWithTimestamp(
565 "[IncrementalMarking] Black allocation started\n");
566 }
567 }
568
FinishBlackAllocation()569 void IncrementalMarking::FinishBlackAllocation() {
570 if (black_allocation_) {
571 black_allocation_ = false;
572 if (FLAG_trace_incremental_marking) {
573 heap()->isolate()->PrintWithTimestamp(
574 "[IncrementalMarking] Black allocation finished\n");
575 }
576 }
577 }
578
AbortBlackAllocation()579 void IncrementalMarking::AbortBlackAllocation() {
580 if (FLAG_trace_incremental_marking) {
581 heap()->isolate()->PrintWithTimestamp(
582 "[IncrementalMarking] Black allocation aborted\n");
583 }
584 }
585
MarkRoots()586 void IncrementalMarking::MarkRoots() {
587 DCHECK(!finalize_marking_completed_);
588 DCHECK(IsMarking());
589
590 IncrementalMarkingRootMarkingVisitor visitor(this);
591 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
592 }
593
594
MarkObjectGroups()595 void IncrementalMarking::MarkObjectGroups() {
596 TRACE_GC(heap_->tracer(),
597 GCTracer::Scope::MC_INCREMENTAL_FINALIZE_OBJECT_GROUPING);
598
599 DCHECK(!heap_->local_embedder_heap_tracer()->InUse());
600 DCHECK(!finalize_marking_completed_);
601 DCHECK(IsMarking());
602
603 IncrementalMarkingRootMarkingVisitor visitor(this);
604 heap_->mark_compact_collector()->MarkImplicitRefGroups(&MarkGrey);
605 heap_->isolate()->global_handles()->IterateObjectGroups(
606 &visitor, &MarkCompactCollector::IsUnmarkedHeapObjectWithHeap);
607 heap_->isolate()->global_handles()->RemoveImplicitRefGroups();
608 heap_->isolate()->global_handles()->RemoveObjectGroups();
609 }
610
611
ProcessWeakCells()612 void IncrementalMarking::ProcessWeakCells() {
613 DCHECK(!finalize_marking_completed_);
614 DCHECK(IsMarking());
615
616 Object* the_hole_value = heap()->the_hole_value();
617 Object* weak_cell_obj = heap()->encountered_weak_cells();
618 Object* weak_cell_head = Smi::kZero;
619 WeakCell* prev_weak_cell_obj = NULL;
620 while (weak_cell_obj != Smi::kZero) {
621 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
622 // We do not insert cleared weak cells into the list, so the value
623 // cannot be a Smi here.
624 HeapObject* value = HeapObject::cast(weak_cell->value());
625 // Remove weak cells with live objects from the list, they do not need
626 // clearing.
627 if (ObjectMarking::IsBlackOrGrey(value)) {
628 // Record slot, if value is pointing to an evacuation candidate.
629 Object** slot = HeapObject::RawField(weak_cell, WeakCell::kValueOffset);
630 heap_->mark_compact_collector()->RecordSlot(weak_cell, slot, *slot);
631 // Remove entry somewhere after top.
632 if (prev_weak_cell_obj != NULL) {
633 prev_weak_cell_obj->set_next(weak_cell->next());
634 }
635 weak_cell_obj = weak_cell->next();
636 weak_cell->clear_next(the_hole_value);
637 } else {
638 if (weak_cell_head == Smi::kZero) {
639 weak_cell_head = weak_cell;
640 }
641 prev_weak_cell_obj = weak_cell;
642 weak_cell_obj = weak_cell->next();
643 }
644 }
645 // Top may have changed.
646 heap()->set_encountered_weak_cells(weak_cell_head);
647 }
648
649
ShouldRetainMap(Map * map,int age)650 bool ShouldRetainMap(Map* map, int age) {
651 if (age == 0) {
652 // The map has aged. Do not retain this map.
653 return false;
654 }
655 Object* constructor = map->GetConstructor();
656 if (!constructor->IsHeapObject() ||
657 ObjectMarking::IsWhite(HeapObject::cast(constructor))) {
658 // The constructor is dead, no new objects with this map can
659 // be created. Do not retain this map.
660 return false;
661 }
662 return true;
663 }
664
665
RetainMaps()666 void IncrementalMarking::RetainMaps() {
667 // Do not retain dead maps if flag disables it or there is
668 // - memory pressure (reduce_memory_footprint_),
669 // - GC is requested by tests or dev-tools (abort_incremental_marking_).
670 bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
671 heap()->ShouldAbortIncrementalMarking() ||
672 FLAG_retain_maps_for_n_gc == 0;
673 ArrayList* retained_maps = heap()->retained_maps();
674 int length = retained_maps->Length();
675 // The number_of_disposed_maps separates maps in the retained_maps
676 // array that were created before and after context disposal.
677 // We do not age and retain disposed maps to avoid memory leaks.
678 int number_of_disposed_maps = heap()->number_of_disposed_maps_;
679 for (int i = 0; i < length; i += 2) {
680 DCHECK(retained_maps->Get(i)->IsWeakCell());
681 WeakCell* cell = WeakCell::cast(retained_maps->Get(i));
682 if (cell->cleared()) continue;
683 int age = Smi::cast(retained_maps->Get(i + 1))->value();
684 int new_age;
685 Map* map = Map::cast(cell->value());
686 if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
687 ObjectMarking::IsWhite(map)) {
688 if (ShouldRetainMap(map, age)) {
689 MarkGrey(heap(), map);
690 }
691 Object* prototype = map->prototype();
692 if (age > 0 && prototype->IsHeapObject() &&
693 ObjectMarking::IsWhite(HeapObject::cast(prototype))) {
694 // The prototype is not marked, age the map.
695 new_age = age - 1;
696 } else {
697 // The prototype and the constructor are marked, this map keeps only
698 // transition tree alive, not JSObjects. Do not age the map.
699 new_age = age;
700 }
701 } else {
702 new_age = FLAG_retain_maps_for_n_gc;
703 }
704 // Compact the array and update the age.
705 if (new_age != age) {
706 retained_maps->Set(i + 1, Smi::FromInt(new_age));
707 }
708 }
709 }
710
FinalizeIncrementally()711 void IncrementalMarking::FinalizeIncrementally() {
712 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
713 DCHECK(!finalize_marking_completed_);
714 DCHECK(IsMarking());
715
716 double start = heap_->MonotonicallyIncreasingTimeInMs();
717
718 int old_marking_deque_top =
719 heap_->mark_compact_collector()->marking_deque()->top();
720
721 // After finishing incremental marking, we try to discover all unmarked
722 // objects to reduce the marking load in the final pause.
723 // 1) We scan and mark the roots again to find all changes to the root set.
724 // 2) We mark the object groups.
725 // 3) Age and retain maps embedded in optimized code.
726 // 4) Remove weak cell with live values from the list of weak cells, they
727 // do not need processing during GC.
728 MarkRoots();
729 if (!heap_->local_embedder_heap_tracer()->InUse()) {
730 MarkObjectGroups();
731 }
732 if (incremental_marking_finalization_rounds_ == 0) {
733 // Map retaining is needed for perfromance, not correctness,
734 // so we can do it only once at the beginning of the finalization.
735 RetainMaps();
736 }
737 ProcessWeakCells();
738
739 int marking_progress =
740 abs(old_marking_deque_top -
741 heap_->mark_compact_collector()->marking_deque()->top());
742
743 marking_progress += static_cast<int>(
744 heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
745
746 double end = heap_->MonotonicallyIncreasingTimeInMs();
747 double delta = end - start;
748 if (FLAG_trace_incremental_marking) {
749 heap()->isolate()->PrintWithTimestamp(
750 "[IncrementalMarking] Finalize incrementally round %d, "
751 "spent %d ms, marking progress %d.\n",
752 static_cast<int>(delta), incremental_marking_finalization_rounds_,
753 marking_progress);
754 }
755
756 ++incremental_marking_finalization_rounds_;
757 if ((incremental_marking_finalization_rounds_ >=
758 FLAG_max_incremental_marking_finalization_rounds) ||
759 (marking_progress <
760 FLAG_min_progress_during_incremental_marking_finalization)) {
761 finalize_marking_completed_ = true;
762 }
763
764 if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
765 !black_allocation_) {
766 // TODO(hpayer): Move to an earlier point as soon as we make faster marking
767 // progress.
768 StartBlackAllocation();
769 }
770 }
771
772
UpdateMarkingDequeAfterScavenge()773 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
774 if (!IsMarking()) return;
775
776 MarkingDeque* marking_deque =
777 heap_->mark_compact_collector()->marking_deque();
778 int current = marking_deque->bottom();
779 int mask = marking_deque->mask();
780 int limit = marking_deque->top();
781 HeapObject** array = marking_deque->array();
782 int new_top = current;
783
784 Map* filler_map = heap_->one_pointer_filler_map();
785
786 while (current != limit) {
787 HeapObject* obj = array[current];
788 DCHECK(obj->IsHeapObject());
789 current = ((current + 1) & mask);
790 // Only pointers to from space have to be updated.
791 if (heap_->InFromSpace(obj)) {
792 MapWord map_word = obj->map_word();
793 // There may be objects on the marking deque that do not exist anymore,
794 // e.g. left trimmed objects or objects from the root set (frames).
795 // If these object are dead at scavenging time, their marking deque
796 // entries will not point to forwarding addresses. Hence, we can discard
797 // them.
798 if (map_word.IsForwardingAddress()) {
799 HeapObject* dest = map_word.ToForwardingAddress();
800 if (ObjectMarking::IsBlack(dest)) continue;
801 array[new_top] = dest;
802 new_top = ((new_top + 1) & mask);
803 DCHECK(new_top != marking_deque->bottom());
804 DCHECK(ObjectMarking::IsGrey(obj) ||
805 (obj->IsFiller() && ObjectMarking::IsWhite(obj)));
806 }
807 } else if (obj->map() != filler_map) {
808 // Skip one word filler objects that appear on the
809 // stack when we perform in place array shift.
810 array[new_top] = obj;
811 new_top = ((new_top + 1) & mask);
812 DCHECK(new_top != marking_deque->bottom());
813 DCHECK(ObjectMarking::IsGrey(obj) ||
814 (obj->IsFiller() && ObjectMarking::IsWhite(obj)) ||
815 (MemoryChunk::FromAddress(obj->address())
816 ->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
817 ObjectMarking::IsBlack(obj)));
818 }
819 }
820 marking_deque->set_top(new_top);
821 }
822
823
VisitObject(Map * map,HeapObject * obj,int size)824 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
825 MarkGrey(heap_, map);
826
827 IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
828
829 #if ENABLE_SLOW_DCHECKS
830 MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj);
831 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
832 SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
833 (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
834 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
835 Marking::IsBlack(mark_bit)));
836 #endif
837 MarkBlack(obj, size);
838 }
839
MarkGrey(Heap * heap,HeapObject * object)840 void IncrementalMarking::MarkGrey(Heap* heap, HeapObject* object) {
841 if (ObjectMarking::IsWhite(object)) {
842 heap->incremental_marking()->WhiteToGreyAndPush(object);
843 }
844 }
845
MarkBlack(HeapObject * obj,int size)846 void IncrementalMarking::MarkBlack(HeapObject* obj, int size) {
847 if (ObjectMarking::IsBlack(obj)) return;
848 ObjectMarking::GreyToBlack(obj);
849 }
850
ProcessMarkingDeque(intptr_t bytes_to_process,ForceCompletionAction completion)851 intptr_t IncrementalMarking::ProcessMarkingDeque(
852 intptr_t bytes_to_process, ForceCompletionAction completion) {
853 intptr_t bytes_processed = 0;
854 MarkingDeque* marking_deque =
855 heap_->mark_compact_collector()->marking_deque();
856 while (!marking_deque->IsEmpty() && (bytes_processed < bytes_to_process ||
857 completion == FORCE_COMPLETION)) {
858 HeapObject* obj = marking_deque->Pop();
859
860 // Left trimming may result in white filler objects on the marking deque.
861 // Ignore these objects.
862 if (obj->IsFiller()) {
863 DCHECK(ObjectMarking::IsImpossible(obj) || ObjectMarking::IsWhite(obj));
864 continue;
865 }
866
867 Map* map = obj->map();
868 int size = obj->SizeFromMap(map);
869 unscanned_bytes_of_large_object_ = 0;
870 VisitObject(map, obj, size);
871 bytes_processed += size - unscanned_bytes_of_large_object_;
872 }
873 // Report all found wrappers to the embedder. This is necessary as the
874 // embedder could potentially invalidate wrappers as soon as V8 is done
875 // with its incremental marking processing. Any cached wrappers could
876 // result in broken pointers at this point.
877 heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
878 return bytes_processed;
879 }
880
881
Hurry()882 void IncrementalMarking::Hurry() {
883 // A scavenge may have pushed new objects on the marking deque (due to black
884 // allocation) even in COMPLETE state. This may happen if scavenges are
885 // forced e.g. in tests. It should not happen when COMPLETE was set when
886 // incremental marking finished and a regular GC was triggered after that
887 // because should_hurry_ will force a full GC.
888 if (!heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
889 double start = 0.0;
890 if (FLAG_trace_incremental_marking) {
891 start = heap_->MonotonicallyIncreasingTimeInMs();
892 if (FLAG_trace_incremental_marking) {
893 heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
894 }
895 }
896 // TODO(gc) hurry can mark objects it encounters black as mutator
897 // was stopped.
898 ProcessMarkingDeque(0, FORCE_COMPLETION);
899 state_ = COMPLETE;
900 if (FLAG_trace_incremental_marking) {
901 double end = heap_->MonotonicallyIncreasingTimeInMs();
902 double delta = end - start;
903 if (FLAG_trace_incremental_marking) {
904 heap()->isolate()->PrintWithTimestamp(
905 "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
906 static_cast<int>(delta));
907 }
908 }
909 }
910
911 Object* context = heap_->native_contexts_list();
912 while (!context->IsUndefined(heap_->isolate())) {
913 // GC can happen when the context is not fully initialized,
914 // so the cache can be undefined.
915 HeapObject* cache = HeapObject::cast(
916 Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
917 if (!cache->IsUndefined(heap_->isolate())) {
918 if (ObjectMarking::IsGrey(cache)) {
919 ObjectMarking::GreyToBlack(cache);
920 }
921 }
922 context = Context::cast(context)->next_context_link();
923 }
924 }
925
926
Stop()927 void IncrementalMarking::Stop() {
928 if (IsStopped()) return;
929 if (FLAG_trace_incremental_marking) {
930 int old_generation_size_mb =
931 static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
932 int old_generation_limit_mb =
933 static_cast<int>(heap()->old_generation_allocation_limit() / MB);
934 heap()->isolate()->PrintWithTimestamp(
935 "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
936 "overshoot %dMB\n",
937 old_generation_size_mb, old_generation_limit_mb,
938 Max(0, old_generation_size_mb - old_generation_limit_mb));
939 }
940
941 SpaceIterator it(heap_);
942 while (it.has_next()) {
943 Space* space = it.next();
944 if (space == heap_->new_space()) {
945 space->RemoveAllocationObserver(&new_generation_observer_);
946 } else {
947 space->RemoveAllocationObserver(&old_generation_observer_);
948 }
949 }
950
951 IncrementalMarking::set_should_hurry(false);
952 if (IsMarking()) {
953 PatchIncrementalMarkingRecordWriteStubs(heap_,
954 RecordWriteStub::STORE_BUFFER_ONLY);
955 DeactivateIncrementalWriteBarrier();
956 }
957 heap_->isolate()->stack_guard()->ClearGC();
958 state_ = STOPPED;
959 is_compacting_ = false;
960 FinishBlackAllocation();
961 }
962
963
Finalize()964 void IncrementalMarking::Finalize() {
965 Hurry();
966 Stop();
967 }
968
969
FinalizeMarking(CompletionAction action)970 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
971 DCHECK(!finalize_marking_completed_);
972 if (FLAG_trace_incremental_marking) {
973 heap()->isolate()->PrintWithTimestamp(
974 "[IncrementalMarking] requesting finalization of incremental "
975 "marking.\n");
976 }
977 request_type_ = FINALIZATION;
978 if (action == GC_VIA_STACK_GUARD) {
979 heap_->isolate()->stack_guard()->RequestGC();
980 }
981 }
982
983
MarkingComplete(CompletionAction action)984 void IncrementalMarking::MarkingComplete(CompletionAction action) {
985 state_ = COMPLETE;
986 // We will set the stack guard to request a GC now. This will mean the rest
987 // of the GC gets performed as soon as possible (we can't do a GC here in a
988 // record-write context). If a few things get allocated between now and then
989 // that shouldn't make us do a scavenge and keep being incremental, so we set
990 // the should-hurry flag to indicate that there can't be much work left to do.
991 set_should_hurry(true);
992 if (FLAG_trace_incremental_marking) {
993 heap()->isolate()->PrintWithTimestamp(
994 "[IncrementalMarking] Complete (normal).\n");
995 }
996 request_type_ = COMPLETE_MARKING;
997 if (action == GC_VIA_STACK_GUARD) {
998 heap_->isolate()->stack_guard()->RequestGC();
999 }
1000 }
1001
1002
Epilogue()1003 void IncrementalMarking::Epilogue() {
1004 was_activated_ = false;
1005 finalize_marking_completed_ = false;
1006 incremental_marking_finalization_rounds_ = 0;
1007 }
1008
AdvanceIncrementalMarking(double deadline_in_ms,CompletionAction completion_action,ForceCompletionAction force_completion,StepOrigin step_origin)1009 double IncrementalMarking::AdvanceIncrementalMarking(
1010 double deadline_in_ms, CompletionAction completion_action,
1011 ForceCompletionAction force_completion, StepOrigin step_origin) {
1012 HistogramTimerScope incremental_marking_scope(
1013 heap_->isolate()->counters()->gc_incremental_marking());
1014 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
1015 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
1016 DCHECK(!IsStopped());
1017 DCHECK_EQ(
1018 0, heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
1019
1020 double remaining_time_in_ms = 0.0;
1021 intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
1022 kStepSizeInMs,
1023 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1024
1025 const bool incremental_wrapper_tracing =
1026 state_ == MARKING && FLAG_incremental_marking_wrappers &&
1027 heap_->local_embedder_heap_tracer()->InUse();
1028 do {
1029 if (incremental_wrapper_tracing && trace_wrappers_toggle_) {
1030 TRACE_GC(heap()->tracer(),
1031 GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
1032 const double wrapper_deadline =
1033 heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
1034 if (!heap_->local_embedder_heap_tracer()
1035 ->ShouldFinalizeIncrementalMarking()) {
1036 heap_->local_embedder_heap_tracer()->Trace(
1037 wrapper_deadline, EmbedderHeapTracer::AdvanceTracingActions(
1038 EmbedderHeapTracer::ForceCompletionAction::
1039 DO_NOT_FORCE_COMPLETION));
1040 }
1041 } else {
1042 Step(step_size_in_bytes, completion_action, force_completion,
1043 step_origin);
1044 }
1045 trace_wrappers_toggle_ = !trace_wrappers_toggle_;
1046 remaining_time_in_ms =
1047 deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
1048 } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
1049 !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
1050 return remaining_time_in_ms;
1051 }
1052
1053
FinalizeSweeping()1054 void IncrementalMarking::FinalizeSweeping() {
1055 DCHECK(state_ == SWEEPING);
1056 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
1057 (!FLAG_concurrent_sweeping ||
1058 !heap_->mark_compact_collector()->sweeper().AreSweeperTasksRunning())) {
1059 heap_->mark_compact_collector()->EnsureSweepingCompleted();
1060 }
1061 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1062 StartMarking();
1063 }
1064 }
1065
StepSizeToKeepUpWithAllocations()1066 size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
1067 // Update bytes_allocated_ based on the allocation counter.
1068 size_t current_counter = heap_->OldGenerationAllocationCounter();
1069 bytes_allocated_ += current_counter - old_generation_allocation_counter_;
1070 old_generation_allocation_counter_ = current_counter;
1071 return bytes_allocated_;
1072 }
1073
StepSizeToMakeProgress()1074 size_t IncrementalMarking::StepSizeToMakeProgress() {
1075 // We increase step size gradually based on the time passed in order to
1076 // leave marking work to standalone tasks. The ramp up duration and the
1077 // target step count are chosen based on benchmarks.
1078 const int kRampUpIntervalMs = 300;
1079 const size_t kTargetStepCount = 128;
1080 const size_t kTargetStepCountAtOOM = 16;
1081 size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
1082
1083 if (heap()->IsCloseToOutOfMemory(oom_slack)) {
1084 return heap()->PromotedSpaceSizeOfObjects() / kTargetStepCountAtOOM;
1085 }
1086
1087 size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
1088 IncrementalMarking::kAllocatedThreshold);
1089 double time_passed_ms =
1090 heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
1091 double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
1092 return static_cast<size_t>(factor * step_size);
1093 }
1094
AdvanceIncrementalMarkingOnAllocation()1095 void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
1096 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
1097 (state_ != SWEEPING && state_ != MARKING)) {
1098 return;
1099 }
1100
1101 size_t bytes_to_process =
1102 StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
1103
1104 if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
1105 // The first step after Scavenge will see many allocated bytes.
1106 // Cap the step size to distribute the marking work more uniformly.
1107 size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
1108 kMaxStepSizeInMs,
1109 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1110 bytes_to_process = Min(bytes_to_process, max_step_size);
1111
1112 size_t bytes_processed = 0;
1113 if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
1114 // Steps performed in tasks have put us ahead of schedule.
1115 // We skip processing of marking dequeue here and thus
1116 // shift marking time from inside V8 to standalone tasks.
1117 bytes_marked_ahead_of_schedule_ -= bytes_to_process;
1118 bytes_processed = bytes_to_process;
1119 } else {
1120 HistogramTimerScope incremental_marking_scope(
1121 heap_->isolate()->counters()->gc_incremental_marking());
1122 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
1123 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
1124 bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
1125 FORCE_COMPLETION, StepOrigin::kV8);
1126 }
1127 bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
1128 }
1129 }
1130
Step(size_t bytes_to_process,CompletionAction action,ForceCompletionAction completion,StepOrigin step_origin)1131 size_t IncrementalMarking::Step(size_t bytes_to_process,
1132 CompletionAction action,
1133 ForceCompletionAction completion,
1134 StepOrigin step_origin) {
1135 double start = heap_->MonotonicallyIncreasingTimeInMs();
1136
1137 if (state_ == SWEEPING) {
1138 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
1139 FinalizeSweeping();
1140 }
1141
1142 size_t bytes_processed = 0;
1143 if (state_ == MARKING) {
1144 bytes_processed = ProcessMarkingDeque(bytes_to_process);
1145 if (step_origin == StepOrigin::kTask) {
1146 bytes_marked_ahead_of_schedule_ += bytes_processed;
1147 }
1148
1149 if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
1150 if (heap_->local_embedder_heap_tracer()
1151 ->ShouldFinalizeIncrementalMarking()) {
1152 if (completion == FORCE_COMPLETION ||
1153 IsIdleMarkingDelayCounterLimitReached()) {
1154 if (!finalize_marking_completed_) {
1155 FinalizeMarking(action);
1156 } else {
1157 MarkingComplete(action);
1158 }
1159 } else {
1160 IncrementIdleMarkingDelayCounter();
1161 }
1162 } else {
1163 heap_->local_embedder_heap_tracer()->NotifyV8MarkingDequeWasEmpty();
1164 }
1165 }
1166 }
1167
1168 double end = heap_->MonotonicallyIncreasingTimeInMs();
1169 double duration = (end - start);
1170 // Note that we report zero bytes here when sweeping was in progress or
1171 // when we just started incremental marking. In these cases we did not
1172 // process the marking deque.
1173 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1174 if (FLAG_trace_incremental_marking) {
1175 heap_->isolate()->PrintWithTimestamp(
1176 "[IncrementalMarking] Step %s %zu bytes (%zu) in %.1f\n",
1177 step_origin == StepOrigin::kV8 ? "in v8" : "in task", bytes_processed,
1178 bytes_to_process, duration);
1179 }
1180 return bytes_processed;
1181 }
1182
1183
IsIdleMarkingDelayCounterLimitReached()1184 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1185 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1186 }
1187
1188
IncrementIdleMarkingDelayCounter()1189 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1190 idle_marking_delay_counter_++;
1191 }
1192
1193
ClearIdleMarkingDelayCounter()1194 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1195 idle_marking_delay_counter_ = 0;
1196 }
1197
1198 } // namespace internal
1199 } // namespace v8
1200