1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/incremental-marking.h"
6
7 #include "src/code-stubs.h"
8 #include "src/compilation-cache.h"
9 #include "src/conversions.h"
10 #include "src/heap/concurrent-marking.h"
11 #include "src/heap/gc-idle-time-handler.h"
12 #include "src/heap/gc-tracer.h"
13 #include "src/heap/heap-inl.h"
14 #include "src/heap/incremental-marking-inl.h"
15 #include "src/heap/mark-compact-inl.h"
16 #include "src/heap/object-stats.h"
17 #include "src/heap/objects-visiting-inl.h"
18 #include "src/heap/objects-visiting.h"
19 #include "src/heap/sweeper.h"
20 #include "src/objects/hash-table-inl.h"
21 #include "src/tracing/trace-event.h"
22 #include "src/v8.h"
23 #include "src/visitors.h"
24 #include "src/vm-state-inl.h"
25
26 namespace v8 {
27 namespace internal {
28
29 using IncrementalMarkingMarkingVisitor =
30 MarkingVisitor<FixedArrayVisitationMode::kIncremental,
31 TraceRetainingPathMode::kDisabled,
32 IncrementalMarking::MarkingState>;
33
Step(int bytes_allocated,Address addr,size_t size)34 void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
35 size_t size) {
36 Heap* heap = incremental_marking_.heap();
37 VMState<GC> state(heap->isolate());
38 RuntimeCallTimerScope runtime_timer(
39 heap->isolate(),
40 RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
41 incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
42 if (incremental_marking_.black_allocation() && addr != kNullAddress) {
43 // AdvanceIncrementalMarkingOnAllocation can start black allocation.
44 // Ensure that the new object is marked black.
45 HeapObject* object = HeapObject::FromAddress(addr);
46 if (incremental_marking_.marking_state()->IsWhite(object) &&
47 !(Heap::InNewSpace(object) || heap->new_lo_space()->Contains(object))) {
48 if (heap->lo_space()->Contains(object)) {
49 incremental_marking_.marking_state()->WhiteToBlack(object);
50 } else {
51 Page::FromAddress(addr)->CreateBlackArea(addr, addr + size);
52 }
53 }
54 }
55 }
56
IncrementalMarking(Heap * heap,MarkCompactCollector::MarkingWorklist * marking_worklist,WeakObjects * weak_objects)57 IncrementalMarking::IncrementalMarking(
58 Heap* heap, MarkCompactCollector::MarkingWorklist* marking_worklist,
59 WeakObjects* weak_objects)
60 : heap_(heap),
61 marking_worklist_(marking_worklist),
62 weak_objects_(weak_objects),
63 initial_old_generation_size_(0),
64 bytes_marked_ahead_of_schedule_(0),
65 bytes_marked_concurrently_(0),
66 unscanned_bytes_of_large_object_(0),
67 is_compacting_(false),
68 should_hurry_(false),
69 was_activated_(false),
70 black_allocation_(false),
71 finalize_marking_completed_(false),
72 trace_wrappers_toggle_(false),
73 request_type_(NONE),
74 new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
75 old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
76 DCHECK_NOT_NULL(marking_worklist_);
77 SetState(STOPPED);
78 }
79
BaseRecordWrite(HeapObject * obj,Object * value)80 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
81 HeapObject* value_heap_obj = HeapObject::cast(value);
82 DCHECK(!marking_state()->IsImpossible(value_heap_obj));
83 DCHECK(!marking_state()->IsImpossible(obj));
84 #ifdef V8_CONCURRENT_MARKING
85 // The write barrier stub generated with V8_CONCURRENT_MARKING does not
86 // check the color of the source object.
87 const bool need_recording = true;
88 #else
89 const bool need_recording = marking_state()->IsBlack(obj);
90 #endif
91
92 if (need_recording && WhiteToGreyAndPush(value_heap_obj)) {
93 RestartIfNotMarking();
94 }
95 return is_compacting_ && need_recording;
96 }
97
RecordWriteSlow(HeapObject * obj,HeapObjectReference ** slot,Object * value)98 void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
99 HeapObjectReference** slot,
100 Object* value) {
101 if (BaseRecordWrite(obj, value) && slot != nullptr) {
102 // Object is not going to be rescanned we need to record the slot.
103 heap_->mark_compact_collector()->RecordSlot(obj, slot,
104 HeapObject::cast(value));
105 }
106 }
107
RecordWriteFromCode(HeapObject * obj,MaybeObject ** slot,Isolate * isolate)108 int IncrementalMarking::RecordWriteFromCode(HeapObject* obj, MaybeObject** slot,
109 Isolate* isolate) {
110 DCHECK(obj->IsHeapObject());
111 isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(obj, slot,
112 *slot);
113 // Called by RecordWriteCodeStubAssembler, which doesnt accept void type
114 return 0;
115 }
116
RecordWriteIntoCode(Code * host,RelocInfo * rinfo,HeapObject * value)117 void IncrementalMarking::RecordWriteIntoCode(Code* host, RelocInfo* rinfo,
118 HeapObject* value) {
119 DCHECK(IsMarking());
120 if (BaseRecordWrite(host, value)) {
121 // Object is not going to be rescanned. We need to record the slot.
122 heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
123 }
124 }
125
WhiteToGreyAndPush(HeapObject * obj)126 bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
127 if (marking_state()->WhiteToGrey(obj)) {
128 marking_worklist()->Push(obj);
129 return true;
130 }
131 return false;
132 }
133
MarkBlackAndPush(HeapObject * obj)134 void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
135 // Marking left-trimmable fixed array black is unsafe because left-trimming
136 // re-pushes only grey arrays onto the marking worklist.
137 DCHECK(!obj->IsFixedArrayBase());
138 // Color the object black and push it into the bailout deque.
139 marking_state()->WhiteToGrey(obj);
140 if (marking_state()->GreyToBlack(obj)) {
141 if (FLAG_concurrent_marking) {
142 marking_worklist()->PushBailout(obj);
143 } else {
144 marking_worklist()->Push(obj);
145 }
146 }
147 }
148
NotifyLeftTrimming(HeapObject * from,HeapObject * to)149 void IncrementalMarking::NotifyLeftTrimming(HeapObject* from, HeapObject* to) {
150 DCHECK(IsMarking());
151 DCHECK(MemoryChunk::FromAddress(from->address())->SweepingDone());
152 DCHECK_EQ(MemoryChunk::FromAddress(from->address()),
153 MemoryChunk::FromAddress(to->address()));
154 DCHECK_NE(from, to);
155
156 MarkBit old_mark_bit = marking_state()->MarkBitFrom(from);
157 MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
158
159 if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
160 // Nothing to do if the object is in black area.
161 return;
162 }
163
164 bool marked_black_due_to_left_trimming = false;
165 if (FLAG_concurrent_marking) {
166 // We need to mark the array black before overwriting its map and length
167 // so that the concurrent marker does not observe inconsistent state.
168 Marking::WhiteToGrey<kAtomicity>(old_mark_bit);
169 if (Marking::GreyToBlack<kAtomicity>(old_mark_bit)) {
170 // The concurrent marker will not mark the array. We need to push the
171 // new array start in marking deque to ensure that it will be marked.
172 marked_black_due_to_left_trimming = true;
173 }
174 DCHECK(Marking::IsBlack<kAtomicity>(old_mark_bit));
175 }
176
177 if (Marking::IsBlack<kAtomicity>(old_mark_bit) &&
178 !marked_black_due_to_left_trimming) {
179 // The array was black before left trimming or was marked black by the
180 // concurrent marker. Simply transfer the color.
181 if (from->address() + kPointerSize == to->address()) {
182 // The old and the new markbits overlap. The |to| object has the
183 // grey color. To make it black, we need to set the second bit.
184 DCHECK(new_mark_bit.Get<kAtomicity>());
185 new_mark_bit.Next().Set<kAtomicity>();
186 } else {
187 bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
188 DCHECK(success);
189 USE(success);
190 }
191 } else if (Marking::IsGrey<kAtomicity>(old_mark_bit) ||
192 marked_black_due_to_left_trimming) {
193 // The array was already grey or was marked black by this function.
194 // Mark the new array grey and push it to marking deque.
195 if (from->address() + kPointerSize == to->address()) {
196 // The old and the new markbits overlap. The |to| object is either white
197 // or grey. Set the first bit to make sure that it is grey.
198 new_mark_bit.Set<kAtomicity>();
199 DCHECK(!new_mark_bit.Next().Get<kAtomicity>());
200 } else {
201 bool success = Marking::WhiteToGrey<kAtomicity>(new_mark_bit);
202 DCHECK(success);
203 USE(success);
204 }
205 // Subsequent left-trimming will re-push only grey arrays.
206 // Ensure that this array is grey.
207 DCHECK(Marking::IsGrey<kAtomicity>(new_mark_bit));
208 marking_worklist()->PushBailout(to);
209 RestartIfNotMarking();
210 }
211 }
212
213 class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
214 public:
IncrementalMarkingRootMarkingVisitor(IncrementalMarking * incremental_marking)215 explicit IncrementalMarkingRootMarkingVisitor(
216 IncrementalMarking* incremental_marking)
217 : heap_(incremental_marking->heap()) {}
218
VisitRootPointer(Root root,const char * description,Object ** p)219 void VisitRootPointer(Root root, const char* description,
220 Object** p) override {
221 MarkObjectByPointer(p);
222 }
223
VisitRootPointers(Root root,const char * description,Object ** start,Object ** end)224 void VisitRootPointers(Root root, const char* description, Object** start,
225 Object** end) override {
226 for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
227 }
228
229 private:
MarkObjectByPointer(Object ** p)230 void MarkObjectByPointer(Object** p) {
231 Object* obj = *p;
232 if (!obj->IsHeapObject()) return;
233
234 heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
235 }
236
237 Heap* heap_;
238 };
239
DeactivateIncrementalWriteBarrierForSpace(PagedSpace * space)240 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
241 PagedSpace* space) {
242 for (Page* p : *space) {
243 p->SetOldGenerationPageFlags(false);
244 }
245 }
246
247
DeactivateIncrementalWriteBarrierForSpace(NewSpace * space)248 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
249 NewSpace* space) {
250 for (Page* p : *space) {
251 p->SetYoungGenerationPageFlags(false);
252 }
253 }
254
255
DeactivateIncrementalWriteBarrier()256 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
257 DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
258 DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
259 DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
260 DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
261
262 for (LargePage* p : *heap_->lo_space()) {
263 p->SetOldGenerationPageFlags(false);
264 }
265 }
266
267
ActivateIncrementalWriteBarrier(PagedSpace * space)268 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
269 for (Page* p : *space) {
270 p->SetOldGenerationPageFlags(true);
271 }
272 }
273
274
ActivateIncrementalWriteBarrier(NewSpace * space)275 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
276 for (Page* p : *space) {
277 p->SetYoungGenerationPageFlags(true);
278 }
279 }
280
281
ActivateIncrementalWriteBarrier()282 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
283 ActivateIncrementalWriteBarrier(heap_->old_space());
284 ActivateIncrementalWriteBarrier(heap_->map_space());
285 ActivateIncrementalWriteBarrier(heap_->code_space());
286 ActivateIncrementalWriteBarrier(heap_->new_space());
287
288 for (LargePage* p : *heap_->lo_space()) {
289 p->SetOldGenerationPageFlags(true);
290 }
291 }
292
293
WasActivated()294 bool IncrementalMarking::WasActivated() { return was_activated_; }
295
296
CanBeActivated()297 bool IncrementalMarking::CanBeActivated() {
298 // Only start incremental marking in a safe state: 1) when incremental
299 // marking is turned on, 2) when we are currently not in a GC, and
300 // 3) when we are currently not serializing or deserializing the heap.
301 return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
302 heap_->deserialization_complete() &&
303 !heap_->isolate()->serializer_enabled();
304 }
305
306
Deactivate()307 void IncrementalMarking::Deactivate() {
308 DeactivateIncrementalWriteBarrier();
309 }
310
Start(GarbageCollectionReason gc_reason)311 void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
312 if (FLAG_trace_incremental_marking) {
313 int old_generation_size_mb =
314 static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
315 int old_generation_limit_mb =
316 static_cast<int>(heap()->old_generation_allocation_limit() / MB);
317 heap()->isolate()->PrintWithTimestamp(
318 "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
319 "slack %dMB\n",
320 Heap::GarbageCollectionReasonToString(gc_reason),
321 old_generation_size_mb, old_generation_limit_mb,
322 Max(0, old_generation_limit_mb - old_generation_size_mb));
323 }
324 DCHECK(FLAG_incremental_marking);
325 DCHECK(state_ == STOPPED);
326 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
327 DCHECK(!heap_->isolate()->serializer_enabled());
328
329 Counters* counters = heap_->isolate()->counters();
330
331 counters->incremental_marking_reason()->AddSample(
332 static_cast<int>(gc_reason));
333 HistogramTimerScope incremental_marking_scope(
334 counters->gc_incremental_marking_start());
335 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
336 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START);
337 heap_->tracer()->NotifyIncrementalMarkingStart();
338
339 start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
340 initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
341 old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
342 bytes_allocated_ = 0;
343 bytes_marked_ahead_of_schedule_ = 0;
344 bytes_marked_concurrently_ = 0;
345 should_hurry_ = false;
346 was_activated_ = true;
347
348 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
349 StartMarking();
350 } else {
351 if (FLAG_trace_incremental_marking) {
352 heap()->isolate()->PrintWithTimestamp(
353 "[IncrementalMarking] Start sweeping.\n");
354 }
355 SetState(SWEEPING);
356 }
357
358 heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
359 &new_generation_observer_);
360 incremental_marking_job()->Start(heap_);
361 }
362
363
StartMarking()364 void IncrementalMarking::StartMarking() {
365 if (heap_->isolate()->serializer_enabled()) {
366 // Black allocation currently starts when we start incremental marking,
367 // but we cannot enable black allocation while deserializing. Hence, we
368 // have to delay the start of incremental marking in that case.
369 if (FLAG_trace_incremental_marking) {
370 heap()->isolate()->PrintWithTimestamp(
371 "[IncrementalMarking] Start delayed - serializer\n");
372 }
373 return;
374 }
375 if (FLAG_trace_incremental_marking) {
376 heap()->isolate()->PrintWithTimestamp(
377 "[IncrementalMarking] Start marking\n");
378 }
379
380 is_compacting_ =
381 !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
382
383 SetState(MARKING);
384
385 {
386 TRACE_GC(heap()->tracer(),
387 GCTracer::Scope::MC_INCREMENTAL_WRAPPER_PROLOGUE);
388 heap_->local_embedder_heap_tracer()->TracePrologue();
389 }
390
391 ActivateIncrementalWriteBarrier();
392
393 // Marking bits are cleared by the sweeper.
394 #ifdef VERIFY_HEAP
395 if (FLAG_verify_heap) {
396 heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
397 }
398 #endif
399
400 heap_->isolate()->compilation_cache()->MarkCompactPrologue();
401
402 #ifdef V8_CONCURRENT_MARKING
403 // The write-barrier does not check the color of the source object.
404 // Start black allocation earlier to ensure faster marking progress.
405 if (!black_allocation_) {
406 StartBlackAllocation();
407 }
408 #endif
409
410 // Mark strong roots grey.
411 IncrementalMarkingRootMarkingVisitor visitor(this);
412 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
413
414 if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
415 heap_->concurrent_marking()->ScheduleTasks();
416 }
417
418 // Ready to start incremental marking.
419 if (FLAG_trace_incremental_marking) {
420 heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
421 }
422 }
423
StartBlackAllocation()424 void IncrementalMarking::StartBlackAllocation() {
425 DCHECK(FLAG_black_allocation);
426 DCHECK(!black_allocation_);
427 DCHECK(IsMarking());
428 black_allocation_ = true;
429 heap()->old_space()->MarkLinearAllocationAreaBlack();
430 heap()->map_space()->MarkLinearAllocationAreaBlack();
431 heap()->code_space()->MarkLinearAllocationAreaBlack();
432 if (FLAG_trace_incremental_marking) {
433 heap()->isolate()->PrintWithTimestamp(
434 "[IncrementalMarking] Black allocation started\n");
435 }
436 }
437
PauseBlackAllocation()438 void IncrementalMarking::PauseBlackAllocation() {
439 DCHECK(FLAG_black_allocation);
440 DCHECK(IsMarking());
441 heap()->old_space()->UnmarkLinearAllocationArea();
442 heap()->map_space()->UnmarkLinearAllocationArea();
443 heap()->code_space()->UnmarkLinearAllocationArea();
444 if (FLAG_trace_incremental_marking) {
445 heap()->isolate()->PrintWithTimestamp(
446 "[IncrementalMarking] Black allocation paused\n");
447 }
448 black_allocation_ = false;
449 }
450
FinishBlackAllocation()451 void IncrementalMarking::FinishBlackAllocation() {
452 if (black_allocation_) {
453 black_allocation_ = false;
454 if (FLAG_trace_incremental_marking) {
455 heap()->isolate()->PrintWithTimestamp(
456 "[IncrementalMarking] Black allocation finished\n");
457 }
458 }
459 }
460
AbortBlackAllocation()461 void IncrementalMarking::AbortBlackAllocation() {
462 if (FLAG_trace_incremental_marking) {
463 heap()->isolate()->PrintWithTimestamp(
464 "[IncrementalMarking] Black allocation aborted\n");
465 }
466 }
467
MarkRoots()468 void IncrementalMarking::MarkRoots() {
469 DCHECK(!finalize_marking_completed_);
470 DCHECK(IsMarking());
471
472 IncrementalMarkingRootMarkingVisitor visitor(this);
473 heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
474 }
475
ShouldRetainMap(Map * map,int age)476 bool IncrementalMarking::ShouldRetainMap(Map* map, int age) {
477 if (age == 0) {
478 // The map has aged. Do not retain this map.
479 return false;
480 }
481 Object* constructor = map->GetConstructor();
482 if (!constructor->IsHeapObject() ||
483 marking_state()->IsWhite(HeapObject::cast(constructor))) {
484 // The constructor is dead, no new objects with this map can
485 // be created. Do not retain this map.
486 return false;
487 }
488 return true;
489 }
490
491
RetainMaps()492 void IncrementalMarking::RetainMaps() {
493 // Do not retain dead maps if flag disables it or there is
494 // - memory pressure (reduce_memory_footprint_),
495 // - GC is requested by tests or dev-tools (abort_incremental_marking_).
496 bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
497 heap()->ShouldAbortIncrementalMarking() ||
498 FLAG_retain_maps_for_n_gc == 0;
499 WeakArrayList* retained_maps = heap()->retained_maps();
500 int length = retained_maps->length();
501 // The number_of_disposed_maps separates maps in the retained_maps
502 // array that were created before and after context disposal.
503 // We do not age and retain disposed maps to avoid memory leaks.
504 int number_of_disposed_maps = heap()->number_of_disposed_maps_;
505 for (int i = 0; i < length; i += 2) {
506 MaybeObject* value = retained_maps->Get(i);
507 HeapObject* map_heap_object;
508 if (!value->ToWeakHeapObject(&map_heap_object)) {
509 continue;
510 }
511 int age = Smi::ToInt(retained_maps->Get(i + 1)->ToSmi());
512 int new_age;
513 Map* map = Map::cast(map_heap_object);
514 if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
515 marking_state()->IsWhite(map)) {
516 if (ShouldRetainMap(map, age)) {
517 WhiteToGreyAndPush(map);
518 }
519 Object* prototype = map->prototype();
520 if (age > 0 && prototype->IsHeapObject() &&
521 marking_state()->IsWhite(HeapObject::cast(prototype))) {
522 // The prototype is not marked, age the map.
523 new_age = age - 1;
524 } else {
525 // The prototype and the constructor are marked, this map keeps only
526 // transition tree alive, not JSObjects. Do not age the map.
527 new_age = age;
528 }
529 } else {
530 new_age = FLAG_retain_maps_for_n_gc;
531 }
532 // Compact the array and update the age.
533 if (new_age != age) {
534 retained_maps->Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
535 }
536 }
537 }
538
FinalizeIncrementally()539 void IncrementalMarking::FinalizeIncrementally() {
540 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
541 DCHECK(!finalize_marking_completed_);
542 DCHECK(IsMarking());
543
544 double start = heap_->MonotonicallyIncreasingTimeInMs();
545
546 // After finishing incremental marking, we try to discover all unmarked
547 // objects to reduce the marking load in the final pause.
548 // 1) We scan and mark the roots again to find all changes to the root set.
549 // 2) Age and retain maps embedded in optimized code.
550 MarkRoots();
551
552 // Map retaining is needed for perfromance, not correctness,
553 // so we can do it only once at the beginning of the finalization.
554 RetainMaps();
555
556 finalize_marking_completed_ = true;
557
558 if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
559 !black_allocation_) {
560 // TODO(hpayer): Move to an earlier point as soon as we make faster marking
561 // progress.
562 StartBlackAllocation();
563 }
564
565 if (FLAG_trace_incremental_marking) {
566 double end = heap_->MonotonicallyIncreasingTimeInMs();
567 double delta = end - start;
568 heap()->isolate()->PrintWithTimestamp(
569 "[IncrementalMarking] Finalize incrementally spent %.1f ms.\n", delta);
570 }
571 }
572
UpdateMarkingWorklistAfterScavenge()573 void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
574 if (!IsMarking()) return;
575
576 Map* filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
577
578 #ifdef ENABLE_MINOR_MC
579 MinorMarkCompactCollector::MarkingState* minor_marking_state =
580 heap()->minor_mark_compact_collector()->marking_state();
581 #else
582 void* minor_marking_state = nullptr;
583 #endif // ENABLE_MINOR_MC
584
585 marking_worklist()->Update([this, filler_map, minor_marking_state](
586 HeapObject* obj, HeapObject** out) -> bool {
587 DCHECK(obj->IsHeapObject());
588 // Only pointers to from space have to be updated.
589 if (Heap::InFromSpace(obj)) {
590 MapWord map_word = obj->map_word();
591 if (!map_word.IsForwardingAddress()) {
592 // There may be objects on the marking deque that do not exist anymore,
593 // e.g. left trimmed objects or objects from the root set (frames).
594 // If these object are dead at scavenging time, their marking deque
595 // entries will not point to forwarding addresses. Hence, we can discard
596 // them.
597 return false;
598 }
599 HeapObject* dest = map_word.ToForwardingAddress();
600 DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
601 *out = dest;
602 return true;
603 } else if (Heap::InToSpace(obj)) {
604 // The object may be on a page that was moved in new space.
605 DCHECK(
606 Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
607 #ifdef ENABLE_MINOR_MC
608 if (minor_marking_state->IsGrey(obj)) {
609 *out = obj;
610 return true;
611 }
612 #endif // ENABLE_MINOR_MC
613 return false;
614 } else {
615 // The object may be on a page that was moved from new to old space. Only
616 // applicable during minor MC garbage collections.
617 if (Page::FromAddress(obj->address())
618 ->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
619 #ifdef ENABLE_MINOR_MC
620 if (minor_marking_state->IsGrey(obj)) {
621 *out = obj;
622 return true;
623 }
624 #endif // ENABLE_MINOR_MC
625 return false;
626 }
627 DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
628 // Skip one word filler objects that appear on the
629 // stack when we perform in place array shift.
630 if (obj->map() != filler_map) {
631 *out = obj;
632 return true;
633 }
634 return false;
635 }
636 });
637
638 UpdateWeakReferencesAfterScavenge();
639 }
640
641 namespace {
642 template <typename T>
ForwardingAddress(T * heap_obj)643 T* ForwardingAddress(T* heap_obj) {
644 MapWord map_word = heap_obj->map_word();
645
646 if (map_word.IsForwardingAddress()) {
647 return T::cast(map_word.ToForwardingAddress());
648 } else if (Heap::InNewSpace(heap_obj)) {
649 return nullptr;
650 } else {
651 return heap_obj;
652 }
653 }
654 } // namespace
655
UpdateWeakReferencesAfterScavenge()656 void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
657 weak_objects_->weak_references.Update(
658 [](std::pair<HeapObject*, HeapObjectReference**> slot_in,
659 std::pair<HeapObject*, HeapObjectReference**>* slot_out) -> bool {
660 HeapObject* heap_obj = slot_in.first;
661 HeapObject* forwarded = ForwardingAddress(heap_obj);
662
663 if (forwarded) {
664 ptrdiff_t distance_to_slot =
665 reinterpret_cast<Address>(slot_in.second) -
666 reinterpret_cast<Address>(slot_in.first);
667 Address new_slot =
668 reinterpret_cast<Address>(forwarded) + distance_to_slot;
669 slot_out->first = forwarded;
670 slot_out->second = reinterpret_cast<HeapObjectReference**>(new_slot);
671 return true;
672 }
673
674 return false;
675 });
676 weak_objects_->weak_objects_in_code.Update(
677 [](std::pair<HeapObject*, Code*> slot_in,
678 std::pair<HeapObject*, Code*>* slot_out) -> bool {
679 HeapObject* heap_obj = slot_in.first;
680 HeapObject* forwarded = ForwardingAddress(heap_obj);
681
682 if (forwarded) {
683 slot_out->first = forwarded;
684 slot_out->second = slot_in.second;
685 return true;
686 }
687
688 return false;
689 });
690 weak_objects_->ephemeron_hash_tables.Update(
691 [](EphemeronHashTable* slot_in, EphemeronHashTable** slot_out) -> bool {
692 EphemeronHashTable* forwarded = ForwardingAddress(slot_in);
693
694 if (forwarded) {
695 *slot_out = forwarded;
696 return true;
697 }
698
699 return false;
700 });
701
702 auto ephemeron_updater = [](Ephemeron slot_in, Ephemeron* slot_out) -> bool {
703 HeapObject* key = slot_in.key;
704 HeapObject* value = slot_in.value;
705 HeapObject* forwarded_key = ForwardingAddress(key);
706 HeapObject* forwarded_value = ForwardingAddress(value);
707
708 if (forwarded_key && forwarded_value) {
709 *slot_out = Ephemeron{forwarded_key, forwarded_value};
710 return true;
711 }
712
713 return false;
714 };
715
716 weak_objects_->current_ephemerons.Update(ephemeron_updater);
717 weak_objects_->next_ephemerons.Update(ephemeron_updater);
718 weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
719 }
720
UpdateMarkedBytesAfterScavenge(size_t dead_bytes_in_new_space)721 void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
722 size_t dead_bytes_in_new_space) {
723 if (!IsMarking()) return;
724 bytes_marked_ahead_of_schedule_ -=
725 Min(bytes_marked_ahead_of_schedule_, dead_bytes_in_new_space);
726 }
727
IsFixedArrayWithProgressBar(HeapObject * obj)728 bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) {
729 if (!obj->IsFixedArray()) return false;
730 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
731 return chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR);
732 }
733
VisitObject(Map * map,HeapObject * obj)734 int IncrementalMarking::VisitObject(Map* map, HeapObject* obj) {
735 DCHECK(marking_state()->IsGrey(obj) || marking_state()->IsBlack(obj));
736 if (!marking_state()->GreyToBlack(obj)) {
737 // The object can already be black in these cases:
738 // 1. The object is a fixed array with the progress bar.
739 // 2. The object is a JSObject that was colored black before
740 // unsafe layout change.
741 // 3. The object is a string that was colored black before
742 // unsafe layout change.
743 // 4. The object is materizalized by the deoptimizer.
744 DCHECK(obj->IsHashTable() || obj->IsPropertyArray() ||
745 obj->IsFixedArray() || obj->IsJSObject() || obj->IsString());
746 }
747 DCHECK(marking_state()->IsBlack(obj));
748 WhiteToGreyAndPush(map);
749 IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
750 marking_state());
751 return visitor.Visit(map, obj);
752 }
753
ProcessBlackAllocatedObject(HeapObject * obj)754 void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject* obj) {
755 if (IsMarking() && marking_state()->IsBlack(obj)) {
756 RevisitObject(obj);
757 }
758 }
759
RevisitObject(HeapObject * obj)760 void IncrementalMarking::RevisitObject(HeapObject* obj) {
761 DCHECK(IsMarking());
762 DCHECK(FLAG_concurrent_marking || marking_state()->IsBlack(obj));
763 Page* page = Page::FromAddress(obj->address());
764 if (page->owner()->identity() == LO_SPACE) {
765 page->ResetProgressBar();
766 }
767 Map* map = obj->map();
768 WhiteToGreyAndPush(map);
769 IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
770 marking_state());
771 visitor.Visit(map, obj);
772 }
773
774 template <WorklistToProcess worklist_to_process>
ProcessMarkingWorklist(intptr_t bytes_to_process,ForceCompletionAction completion)775 intptr_t IncrementalMarking::ProcessMarkingWorklist(
776 intptr_t bytes_to_process, ForceCompletionAction completion) {
777 intptr_t bytes_processed = 0;
778 while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
779 HeapObject* obj;
780 if (worklist_to_process == WorklistToProcess::kBailout) {
781 obj = marking_worklist()->PopBailout();
782 } else {
783 obj = marking_worklist()->Pop();
784 }
785 if (obj == nullptr) break;
786 // Left trimming may result in white, grey, or black filler objects on the
787 // marking deque. Ignore these objects.
788 if (obj->IsFiller()) {
789 DCHECK(!marking_state()->IsImpossible(obj));
790 continue;
791 }
792 unscanned_bytes_of_large_object_ = 0;
793 int size = VisitObject(obj->map(), obj);
794 bytes_processed += size - unscanned_bytes_of_large_object_;
795 }
796 // Report all found wrappers to the embedder. This is necessary as the
797 // embedder could potentially invalidate wrappers as soon as V8 is done
798 // with its incremental marking processing. Any cached wrappers could
799 // result in broken pointers at this point.
800 heap_->local_embedder_heap_tracer()->RegisterWrappersWithRemoteTracer();
801 return bytes_processed;
802 }
803
804
Hurry()805 void IncrementalMarking::Hurry() {
806 // A scavenge may have pushed new objects on the marking deque (due to black
807 // allocation) even in COMPLETE state. This may happen if scavenges are
808 // forced e.g. in tests. It should not happen when COMPLETE was set when
809 // incremental marking finished and a regular GC was triggered after that
810 // because should_hurry_ will force a full GC.
811 if (!marking_worklist()->IsEmpty()) {
812 double start = 0.0;
813 if (FLAG_trace_incremental_marking) {
814 start = heap_->MonotonicallyIncreasingTimeInMs();
815 if (FLAG_trace_incremental_marking) {
816 heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
817 }
818 }
819 // TODO(gc) hurry can mark objects it encounters black as mutator
820 // was stopped.
821 ProcessMarkingWorklist(0, FORCE_COMPLETION);
822 SetState(COMPLETE);
823 if (FLAG_trace_incremental_marking) {
824 double end = heap_->MonotonicallyIncreasingTimeInMs();
825 double delta = end - start;
826 if (FLAG_trace_incremental_marking) {
827 heap()->isolate()->PrintWithTimestamp(
828 "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
829 static_cast<int>(delta));
830 }
831 }
832 }
833 }
834
835
Stop()836 void IncrementalMarking::Stop() {
837 if (IsStopped()) return;
838 if (FLAG_trace_incremental_marking) {
839 int old_generation_size_mb =
840 static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
841 int old_generation_limit_mb =
842 static_cast<int>(heap()->old_generation_allocation_limit() / MB);
843 heap()->isolate()->PrintWithTimestamp(
844 "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
845 "overshoot %dMB\n",
846 old_generation_size_mb, old_generation_limit_mb,
847 Max(0, old_generation_size_mb - old_generation_limit_mb));
848 }
849
850 SpaceIterator it(heap_);
851 while (it.has_next()) {
852 Space* space = it.next();
853 if (space == heap_->new_space()) {
854 space->RemoveAllocationObserver(&new_generation_observer_);
855 } else {
856 space->RemoveAllocationObserver(&old_generation_observer_);
857 }
858 }
859
860 IncrementalMarking::set_should_hurry(false);
861 heap_->isolate()->stack_guard()->ClearGC();
862 SetState(STOPPED);
863 is_compacting_ = false;
864 FinishBlackAllocation();
865 }
866
867
Finalize()868 void IncrementalMarking::Finalize() {
869 Hurry();
870 Stop();
871 }
872
873
FinalizeMarking(CompletionAction action)874 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
875 DCHECK(!finalize_marking_completed_);
876 if (FLAG_trace_incremental_marking) {
877 heap()->isolate()->PrintWithTimestamp(
878 "[IncrementalMarking] requesting finalization of incremental "
879 "marking.\n");
880 }
881 request_type_ = FINALIZATION;
882 if (action == GC_VIA_STACK_GUARD) {
883 heap_->isolate()->stack_guard()->RequestGC();
884 }
885 }
886
887
MarkingComplete(CompletionAction action)888 void IncrementalMarking::MarkingComplete(CompletionAction action) {
889 SetState(COMPLETE);
890 // We will set the stack guard to request a GC now. This will mean the rest
891 // of the GC gets performed as soon as possible (we can't do a GC here in a
892 // record-write context). If a few things get allocated between now and then
893 // that shouldn't make us do a scavenge and keep being incremental, so we set
894 // the should-hurry flag to indicate that there can't be much work left to do.
895 set_should_hurry(true);
896 if (FLAG_trace_incremental_marking) {
897 heap()->isolate()->PrintWithTimestamp(
898 "[IncrementalMarking] Complete (normal).\n");
899 }
900 request_type_ = COMPLETE_MARKING;
901 if (action == GC_VIA_STACK_GUARD) {
902 heap_->isolate()->stack_guard()->RequestGC();
903 }
904 }
905
906
Epilogue()907 void IncrementalMarking::Epilogue() {
908 was_activated_ = false;
909 finalize_marking_completed_ = false;
910 }
911
AdvanceIncrementalMarking(double deadline_in_ms,CompletionAction completion_action,StepOrigin step_origin)912 double IncrementalMarking::AdvanceIncrementalMarking(
913 double deadline_in_ms, CompletionAction completion_action,
914 StepOrigin step_origin) {
915 HistogramTimerScope incremental_marking_scope(
916 heap_->isolate()->counters()->gc_incremental_marking());
917 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
918 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
919 DCHECK(!IsStopped());
920 DCHECK_EQ(
921 0, heap_->local_embedder_heap_tracer()->NumberOfCachedWrappersToTrace());
922
923 double remaining_time_in_ms = 0.0;
924 intptr_t step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize(
925 kStepSizeInMs,
926 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
927
928 const bool incremental_wrapper_tracing =
929 state_ == MARKING && FLAG_incremental_marking_wrappers &&
930 heap_->local_embedder_heap_tracer()->InUse();
931 do {
932 if (incremental_wrapper_tracing && trace_wrappers_toggle_) {
933 TRACE_GC(heap()->tracer(),
934 GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
935 const double wrapper_deadline =
936 heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
937 if (!heap_->local_embedder_heap_tracer()
938 ->ShouldFinalizeIncrementalMarking()) {
939 heap_->local_embedder_heap_tracer()->Trace(wrapper_deadline);
940 }
941 } else {
942 Step(step_size_in_bytes, completion_action, step_origin);
943 }
944 trace_wrappers_toggle_ = !trace_wrappers_toggle_;
945 remaining_time_in_ms =
946 deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
947 } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
948 !marking_worklist()->IsEmpty());
949 return remaining_time_in_ms;
950 }
951
952
FinalizeSweeping()953 void IncrementalMarking::FinalizeSweeping() {
954 DCHECK(state_ == SWEEPING);
955 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
956 (!FLAG_concurrent_sweeping ||
957 !heap_->mark_compact_collector()->sweeper()->AreSweeperTasksRunning())) {
958 heap_->mark_compact_collector()->EnsureSweepingCompleted();
959 }
960 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
961 #ifdef DEBUG
962 heap_->VerifyCountersAfterSweeping();
963 #endif
964 StartMarking();
965 }
966 }
967
StepSizeToKeepUpWithAllocations()968 size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
969 // Update bytes_allocated_ based on the allocation counter.
970 size_t current_counter = heap_->OldGenerationAllocationCounter();
971 bytes_allocated_ += current_counter - old_generation_allocation_counter_;
972 old_generation_allocation_counter_ = current_counter;
973 return bytes_allocated_;
974 }
975
StepSizeToMakeProgress()976 size_t IncrementalMarking::StepSizeToMakeProgress() {
977 // We increase step size gradually based on the time passed in order to
978 // leave marking work to standalone tasks. The ramp up duration and the
979 // target step count are chosen based on benchmarks.
980 const int kRampUpIntervalMs = 300;
981 const size_t kTargetStepCount = 256;
982 const size_t kTargetStepCountAtOOM = 32;
983 size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
984
985 if (!heap()->CanExpandOldGeneration(oom_slack)) {
986 return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
987 }
988
989 size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
990 IncrementalMarking::kMinStepSizeInBytes);
991 double time_passed_ms =
992 heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
993 double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
994 return static_cast<size_t>(factor * step_size);
995 }
996
AdvanceIncrementalMarkingOnAllocation()997 void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
998 // Code using an AlwaysAllocateScope assumes that the GC state does not
999 // change; that implies that no marking steps must be performed.
1000 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
1001 (state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
1002 return;
1003 }
1004
1005 size_t bytes_to_process =
1006 StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
1007
1008 if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) {
1009 HistogramTimerScope incremental_marking_scope(
1010 heap_->isolate()->counters()->gc_incremental_marking());
1011 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
1012 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
1013 // The first step after Scavenge will see many allocated bytes.
1014 // Cap the step size to distribute the marking work more uniformly.
1015 size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
1016 kMaxStepSizeInMs,
1017 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1018 bytes_to_process = Min(bytes_to_process, max_step_size);
1019 size_t bytes_processed = 0;
1020 if (FLAG_concurrent_marking) {
1021 bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
1022 StepOrigin::kV8, WorklistToProcess::kBailout);
1023 bytes_to_process = (bytes_processed >= bytes_to_process)
1024 ? 0
1025 : bytes_to_process - bytes_processed;
1026 size_t current_bytes_marked_concurrently =
1027 heap()->concurrent_marking()->TotalMarkedBytes();
1028 // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
1029 // short period of time when a concurrent marking task is finishing.
1030 if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
1031 bytes_marked_ahead_of_schedule_ +=
1032 current_bytes_marked_concurrently - bytes_marked_concurrently_;
1033 bytes_marked_concurrently_ = current_bytes_marked_concurrently;
1034 }
1035 }
1036 if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
1037 // Steps performed in tasks and concurrently have put us ahead of
1038 // schedule. We skip processing of marking dequeue here and thus shift
1039 // marking time from inside V8 to standalone tasks.
1040 bytes_marked_ahead_of_schedule_ -= bytes_to_process;
1041 bytes_processed += bytes_to_process;
1042 bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
1043 }
1044 bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD,
1045 StepOrigin::kV8, WorklistToProcess::kAll);
1046 bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
1047 }
1048 }
1049
Step(size_t bytes_to_process,CompletionAction action,StepOrigin step_origin,WorklistToProcess worklist_to_process)1050 size_t IncrementalMarking::Step(size_t bytes_to_process,
1051 CompletionAction action, StepOrigin step_origin,
1052 WorklistToProcess worklist_to_process) {
1053 double start = heap_->MonotonicallyIncreasingTimeInMs();
1054
1055 if (state_ == SWEEPING) {
1056 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
1057 FinalizeSweeping();
1058 }
1059
1060 size_t bytes_processed = 0;
1061 if (state_ == MARKING) {
1062 if (FLAG_concurrent_marking) {
1063 heap_->new_space()->ResetOriginalTop();
1064 // It is safe to merge back all objects that were on hold to the shared
1065 // work list at Step because we are at a safepoint where all objects
1066 // are properly initialized.
1067 marking_worklist()->shared()->MergeGlobalPool(
1068 marking_worklist()->on_hold());
1069 }
1070
1071 // Only print marking worklist in debug mode to save ~40KB of code size.
1072 #ifdef DEBUG
1073 if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
1074 FLAG_trace_gc_verbose) {
1075 marking_worklist()->Print();
1076 }
1077 #endif
1078
1079 if (worklist_to_process == WorklistToProcess::kBailout) {
1080 bytes_processed =
1081 ProcessMarkingWorklist<WorklistToProcess::kBailout>(bytes_to_process);
1082 } else {
1083 bytes_processed =
1084 ProcessMarkingWorklist<WorklistToProcess::kAll>(bytes_to_process);
1085 }
1086
1087 if (step_origin == StepOrigin::kTask) {
1088 bytes_marked_ahead_of_schedule_ += bytes_processed;
1089 }
1090
1091 if (marking_worklist()->IsEmpty()) {
1092 if (heap_->local_embedder_heap_tracer()
1093 ->ShouldFinalizeIncrementalMarking()) {
1094 if (!finalize_marking_completed_) {
1095 FinalizeMarking(action);
1096 } else {
1097 MarkingComplete(action);
1098 }
1099 } else {
1100 heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
1101 }
1102 }
1103 }
1104 if (FLAG_concurrent_marking) {
1105 heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1106 }
1107
1108 double end = heap_->MonotonicallyIncreasingTimeInMs();
1109 double duration = (end - start);
1110 // Note that we report zero bytes here when sweeping was in progress or
1111 // when we just started incremental marking. In these cases we did not
1112 // process the marking deque.
1113 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1114 if (FLAG_trace_incremental_marking) {
1115 heap_->isolate()->PrintWithTimestamp(
1116 "[IncrementalMarking] Step %s %" PRIuS "KB (%" PRIuS "KB) in %.1f\n",
1117 step_origin == StepOrigin::kV8 ? "in v8" : "in task",
1118 bytes_processed / KB, bytes_to_process / KB, duration);
1119 }
1120 if (FLAG_trace_concurrent_marking) {
1121 heap_->isolate()->PrintWithTimestamp(
1122 "Concurrently marked %" PRIuS "KB\n",
1123 heap_->concurrent_marking()->TotalMarkedBytes() / KB);
1124 }
1125 return bytes_processed;
1126 }
1127
1128 } // namespace internal
1129 } // namespace v8
1130