1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifndef V8_HEAP_GC_TRACER_H_
6 #define V8_HEAP_GC_TRACER_H_
7
8 #include "include/v8-metrics.h"
9 #include "src/base/compiler-specific.h"
10 #include "src/base/macros.h"
11 #include "src/base/optional.h"
12 #include "src/base/ring-buffer.h"
13 #include "src/common/globals.h"
14 #include "src/heap/heap.h"
15 #include "src/init/heap-symbols.h"
16 #include "src/logging/counters.h"
17 #include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
18
19 namespace v8 {
20 namespace internal {
21
22 using BytesAndDuration = std::pair<uint64_t, double>;
23
MakeBytesAndDuration(uint64_t bytes,double duration)24 inline BytesAndDuration MakeBytesAndDuration(uint64_t bytes, double duration) {
25 return std::make_pair(bytes, duration);
26 }
27
28 enum ScavengeSpeedMode { kForAllObjects, kForSurvivedObjects };
29
30 #define TRACE_GC_CATEGORIES \
31 "devtools.timeline," TRACE_DISABLED_BY_DEFAULT("v8.gc")
32
33 #define TRACE_GC(tracer, scope_id) \
34 GCTracer::Scope UNIQUE_IDENTIFIER(gc_tracer_scope)( \
35 tracer, GCTracer::Scope::ScopeId(scope_id), ThreadKind::kMain); \
36 TRACE_EVENT0(TRACE_GC_CATEGORIES, \
37 GCTracer::Scope::Name(GCTracer::Scope::ScopeId(scope_id)))
38
39 #define TRACE_GC1(tracer, scope_id, thread_kind) \
40 GCTracer::Scope UNIQUE_IDENTIFIER(gc_tracer_scope)( \
41 tracer, GCTracer::Scope::ScopeId(scope_id), thread_kind); \
42 TRACE_EVENT0(TRACE_GC_CATEGORIES, \
43 GCTracer::Scope::Name(GCTracer::Scope::ScopeId(scope_id)))
44
45 #define TRACE_GC_EPOCH(tracer, scope_id, thread_kind) \
46 GCTracer::Scope UNIQUE_IDENTIFIER(gc_tracer_scope)( \
47 tracer, GCTracer::Scope::ScopeId(scope_id), thread_kind); \
48 TRACE_EVENT1(TRACE_GC_CATEGORIES, \
49 GCTracer::Scope::Name(GCTracer::Scope::ScopeId(scope_id)), \
50 "epoch", tracer->CurrentEpoch(scope_id))
51
52 using CollectionEpoch = uint32_t;
53
54 // GCTracer collects and prints ONE line after each garbage collector
55 // invocation IFF --trace_gc is used.
56 class V8_EXPORT_PRIVATE GCTracer {
57 public:
58 GCTracer(const GCTracer&) = delete;
59 GCTracer& operator=(const GCTracer&) = delete;
60
61 struct IncrementalMarkingInfos {
62 V8_INLINE IncrementalMarkingInfos();
63 V8_INLINE void Update(double delta);
64 V8_INLINE void ResetCurrentCycle();
65
66 double duration; // in ms
67 double longest_step; // in ms
68 int steps;
69 };
70
71 class V8_EXPORT_PRIVATE V8_NODISCARD Scope {
72 public:
73 enum ScopeId {
74 #define DEFINE_SCOPE(scope) scope,
75 TRACER_SCOPES(DEFINE_SCOPE) TRACER_BACKGROUND_SCOPES(DEFINE_SCOPE)
76 #undef DEFINE_SCOPE
77 NUMBER_OF_SCOPES,
78
79 FIRST_INCREMENTAL_SCOPE = MC_INCREMENTAL,
80 LAST_INCREMENTAL_SCOPE = MC_INCREMENTAL_SWEEPING,
81 FIRST_SCOPE = MC_INCREMENTAL,
82 NUMBER_OF_INCREMENTAL_SCOPES =
83 LAST_INCREMENTAL_SCOPE - FIRST_INCREMENTAL_SCOPE + 1,
84 FIRST_GENERAL_BACKGROUND_SCOPE = BACKGROUND_YOUNG_ARRAY_BUFFER_SWEEP,
85 LAST_GENERAL_BACKGROUND_SCOPE = BACKGROUND_UNMAPPER,
86 FIRST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_EVACUATE_COPY,
87 LAST_MC_BACKGROUND_SCOPE = MC_BACKGROUND_SWEEPING,
88 FIRST_TOP_MC_SCOPE = MC_CLEAR,
89 LAST_TOP_MC_SCOPE = MC_SWEEP,
90 FIRST_MINOR_GC_BACKGROUND_SCOPE = MINOR_MC_BACKGROUND_EVACUATE_COPY,
91 LAST_MINOR_GC_BACKGROUND_SCOPE = SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL,
92 FIRST_BACKGROUND_SCOPE = FIRST_GENERAL_BACKGROUND_SCOPE,
93 LAST_BACKGROUND_SCOPE = LAST_MINOR_GC_BACKGROUND_SCOPE
94 };
95
96 V8_INLINE Scope(GCTracer* tracer, ScopeId scope, ThreadKind thread_kind);
97 V8_INLINE ~Scope();
98 Scope(const Scope&) = delete;
99 Scope& operator=(const Scope&) = delete;
100 static const char* Name(ScopeId id);
101 static bool NeedsYoungEpoch(ScopeId id);
102 V8_INLINE static constexpr int IncrementalOffset(ScopeId id);
103
104 private:
105 #if DEBUG
106 void AssertMainThread();
107 #endif // DEBUG
108
109 GCTracer* const tracer_;
110 const ScopeId scope_;
111 const ThreadKind thread_kind_;
112 const double start_time_;
113 #ifdef V8_RUNTIME_CALL_STATS
114 RuntimeCallTimer timer_;
115 RuntimeCallStats* runtime_stats_ = nullptr;
116 base::Optional<WorkerThreadRuntimeCallStatsScope> runtime_call_stats_scope_;
117 #endif // defined(V8_RUNTIME_CALL_STATS)
118 };
119
120 class Event {
121 public:
122 enum Type {
123 SCAVENGER = 0,
124 MARK_COMPACTOR = 1,
125 INCREMENTAL_MARK_COMPACTOR = 2,
126 MINOR_MARK_COMPACTOR = 3,
127 START = 4
128 };
129
130 // Returns true if the event corresponds to a young generation GC.
131 V8_INLINE static constexpr bool IsYoungGenerationEvent(Type type);
132
133 // The state diagram for a GC cycle:
134 // (NOT_RUNNING) -----(StartCycle)----->
135 // MARKING --(StartAtomicPause)-->
136 // ATOMIC ---(StopAtomicPause)-->
137 // SWEEPING ------(StopCycle)-----> NOT_RUNNING
138 enum class State { NOT_RUNNING, MARKING, ATOMIC, SWEEPING };
139
140 Event(Type type, State state, GarbageCollectionReason gc_reason,
141 const char* collector_reason);
142
143 // Returns a string describing the event type.
144 const char* TypeName(bool short_name) const;
145
146 // Type of the event.
147 Type type;
148
149 // State of the cycle corresponding to the event.
150 State state;
151
152 GarbageCollectionReason gc_reason;
153 const char* collector_reason;
154
155 // Timestamp set in the constructor.
156 double start_time;
157
158 // Timestamp set in the destructor.
159 double end_time;
160
161 // Memory reduction flag set.
162 bool reduce_memory;
163
164 // Size of objects in heap set in constructor.
165 size_t start_object_size;
166
167 // Size of objects in heap set in destructor.
168 size_t end_object_size;
169
170 // Size of memory allocated from OS set in constructor.
171 size_t start_memory_size;
172
173 // Size of memory allocated from OS set in destructor.
174 size_t end_memory_size;
175
176 // Total amount of space either wasted or contained in one of free lists
177 // before the current GC.
178 size_t start_holes_size;
179
180 // Total amount of space either wasted or contained in one of free lists
181 // after the current GC.
182 size_t end_holes_size;
183
184 // Size of young objects in constructor.
185 size_t young_object_size;
186
187 // Size of survived young objects in destructor.
188 size_t survived_young_object_size;
189
190 // Bytes marked incrementally for INCREMENTAL_MARK_COMPACTOR
191 size_t incremental_marking_bytes;
192
193 // Duration (in ms) of incremental marking steps for
194 // INCREMENTAL_MARK_COMPACTOR.
195 double incremental_marking_duration;
196
197 // Amounts of time (in ms) spent in different scopes during GC.
198 double scopes[Scope::NUMBER_OF_SCOPES];
199
200 // Holds details for incremental marking scopes.
201 IncrementalMarkingInfos
202 incremental_scopes[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
203 };
204
205 class RecordGCPhasesInfo {
206 public:
207 RecordGCPhasesInfo(Heap* heap, GarbageCollector collector);
208
209 enum class Mode { None, Scavenger, Finalize };
210
211 Mode mode;
212
213 // The timer used for a given GC type:
214 // - GCScavenger: young generation GC
215 // - GCCompactor: full GC
216 // - GCFinalizeMC: finalization of incremental full GC
217 // - GCFinalizeMCReduceMemory: finalization of incremental full GC with
218 // memory reduction.
219 TimedHistogram* type_timer;
220 TimedHistogram* type_priority_timer;
221 };
222
223 static const int kThroughputTimeFrameMs = 5000;
224 static constexpr double kConservativeSpeedInBytesPerMillisecond = 128 * KB;
225
226 static double CombineSpeedsInBytesPerMillisecond(double default_speed,
227 double optional_speed);
228
229 #ifdef V8_RUNTIME_CALL_STATS
230 V8_INLINE static RuntimeCallCounterId RCSCounterFromScope(Scope::ScopeId id);
231 #endif // defined(V8_RUNTIME_CALL_STATS)
232
233 explicit GCTracer(Heap* heap);
234
235 V8_INLINE CollectionEpoch CurrentEpoch(Scope::ScopeId id) const;
236
237 // Start and stop an observable pause.
238 void StartObservablePause();
239 void StopObservablePause();
240
241 // Update the current event if it precedes the start of the observable pause.
242 void UpdateCurrentEvent(GarbageCollectionReason gc_reason,
243 const char* collector_reason);
244
245 void UpdateStatistics(GarbageCollector collector);
246 void FinalizeCurrentEvent();
247
248 enum class MarkingType { kAtomic, kIncremental };
249
250 // Start and stop a GC cycle (collecting data and reporting results).
251 void StartCycle(GarbageCollector collector, GarbageCollectionReason gc_reason,
252 const char* collector_reason, MarkingType marking);
253 void StopYoungCycleIfNeeded();
254 void StopFullCycleIfNeeded();
255
256 // Start and stop a cycle's atomic pause.
257 void StartAtomicPause();
258 void StopAtomicPause();
259
260 void StartInSafepoint();
261 void StopInSafepoint();
262
263 void NotifySweepingCompleted();
264 void NotifyFullCppGCCompleted();
265
266 void NotifyYoungCppGCRunning();
267 void NotifyYoungCppGCCompleted();
268
269 void NotifyYoungGenerationHandling(
270 YoungGenerationHandling young_generation_handling);
271
272 #ifdef DEBUG
273 V8_INLINE bool IsInObservablePause() const;
274
275 // Checks if the current event is consistent with a collector.
276 V8_INLINE bool IsConsistentWithCollector(GarbageCollector collector) const;
277
278 // Checks if the current event corresponds to a full GC cycle whose sweeping
279 // has not finalized yet.
280 V8_INLINE bool IsSweepingInProgress() const;
281 #endif
282
283 // Sample and accumulate bytes allocated since the last GC.
284 void SampleAllocation(double current_ms, size_t new_space_counter_bytes,
285 size_t old_generation_counter_bytes,
286 size_t embedder_counter_bytes);
287
288 // Log the accumulated new space allocation bytes.
289 void AddAllocation(double current_ms);
290
291 void AddCompactionEvent(double duration, size_t live_bytes_compacted);
292
293 void AddSurvivalRatio(double survival_ratio);
294
295 // Log an incremental marking step.
296 void AddIncrementalMarkingStep(double duration, size_t bytes);
297
298 // Log an incremental marking step.
299 void AddIncrementalSweepingStep(double duration);
300
301 // Compute the average incremental marking speed in bytes/millisecond.
302 // Returns a conservative value if no events have been recorded.
303 double IncrementalMarkingSpeedInBytesPerMillisecond() const;
304
305 // Compute the average embedder speed in bytes/millisecond.
306 // Returns a conservative value if no events have been recorded.
307 double EmbedderSpeedInBytesPerMillisecond() const;
308
309 // Compute the average scavenge speed in bytes/millisecond.
310 // Returns 0 if no events have been recorded.
311 double ScavengeSpeedInBytesPerMillisecond(
312 ScavengeSpeedMode mode = kForAllObjects) const;
313
314 // Compute the average compaction speed in bytes/millisecond.
315 // Returns 0 if not enough events have been recorded.
316 double CompactionSpeedInBytesPerMillisecond() const;
317
318 // Compute the average mark-sweep speed in bytes/millisecond.
319 // Returns 0 if no events have been recorded.
320 double MarkCompactSpeedInBytesPerMillisecond() const;
321
322 // Compute the average incremental mark-sweep finalize speed in
323 // bytes/millisecond.
324 // Returns 0 if no events have been recorded.
325 double FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
326
327 // Compute the overall mark compact speed including incremental steps
328 // and the final mark-compact step.
329 double CombinedMarkCompactSpeedInBytesPerMillisecond();
330
331 // Allocation throughput in the new space in bytes/millisecond.
332 // Returns 0 if no allocation events have been recorded.
333 double NewSpaceAllocationThroughputInBytesPerMillisecond(
334 double time_ms = 0) const;
335
336 // Allocation throughput in the old generation in bytes/millisecond in the
337 // last time_ms milliseconds.
338 // Returns 0 if no allocation events have been recorded.
339 double OldGenerationAllocationThroughputInBytesPerMillisecond(
340 double time_ms = 0) const;
341
342 // Allocation throughput in the embedder in bytes/millisecond in the
343 // last time_ms milliseconds. Reported through v8::EmbedderHeapTracer.
344 // Returns 0 if no allocation events have been recorded.
345 double EmbedderAllocationThroughputInBytesPerMillisecond(
346 double time_ms = 0) const;
347
348 // Allocation throughput in heap in bytes/millisecond in the last time_ms
349 // milliseconds.
350 // Returns 0 if no allocation events have been recorded.
351 double AllocationThroughputInBytesPerMillisecond(double time_ms) const;
352
353 // Allocation throughput in heap in bytes/milliseconds in the last
354 // kThroughputTimeFrameMs seconds.
355 // Returns 0 if no allocation events have been recorded.
356 double CurrentAllocationThroughputInBytesPerMillisecond() const;
357
358 // Allocation throughput in old generation in bytes/milliseconds in the last
359 // kThroughputTimeFrameMs seconds.
360 // Returns 0 if no allocation events have been recorded.
361 double CurrentOldGenerationAllocationThroughputInBytesPerMillisecond() const;
362
363 // Allocation throughput in the embedder in bytes/milliseconds in the last
364 // kThroughputTimeFrameMs seconds. Reported through v8::EmbedderHeapTracer.
365 // Returns 0 if no allocation events have been recorded.
366 double CurrentEmbedderAllocationThroughputInBytesPerMillisecond() const;
367
368 // Computes the average survival ratio based on the last recorded survival
369 // events.
370 // Returns 0 if no events have been recorded.
371 double AverageSurvivalRatio() const;
372
373 // Returns true if at least one survival event was recorded.
374 bool SurvivalEventsRecorded() const;
375
376 // Discard all recorded survival events.
377 void ResetSurvivalEvents();
378
379 void NotifyIncrementalMarkingStart();
380
381 // Returns average mutator utilization with respect to mark-compact
382 // garbage collections. This ignores scavenger.
383 double AverageMarkCompactMutatorUtilization() const;
384 double CurrentMarkCompactMutatorUtilization() const;
385
386 V8_INLINE void AddScopeSample(Scope::ScopeId id, double duration);
387
388 void RecordGCPhasesHistograms(RecordGCPhasesInfo::Mode mode);
389
390 void RecordEmbedderSpeed(size_t bytes, double duration);
391
392 // Returns the average time between scheduling and invocation of an
393 // incremental marking task.
394 double AverageTimeToIncrementalMarkingTask() const;
395 void RecordTimeToIncrementalMarkingTask(double time_to_task);
396
397 #ifdef V8_RUNTIME_CALL_STATS
398 V8_INLINE WorkerThreadRuntimeCallStats* worker_thread_runtime_call_stats();
399 #endif // defined(V8_RUNTIME_CALL_STATS)
400
401 private:
402 FRIEND_TEST(GCTracer, AverageSpeed);
403 FRIEND_TEST(GCTracerTest, AllocationThroughput);
404 FRIEND_TEST(GCTracerTest, BackgroundScavengerScope);
405 FRIEND_TEST(GCTracerTest, BackgroundMinorMCScope);
406 FRIEND_TEST(GCTracerTest, BackgroundMajorMCScope);
407 FRIEND_TEST(GCTracerTest, EmbedderAllocationThroughput);
408 FRIEND_TEST(GCTracerTest, MultithreadedBackgroundScope);
409 FRIEND_TEST(GCTracerTest, NewSpaceAllocationThroughput);
410 FRIEND_TEST(GCTracerTest, PerGenerationAllocationThroughput);
411 FRIEND_TEST(GCTracerTest, PerGenerationAllocationThroughputWithProvidedTime);
412 FRIEND_TEST(GCTracerTest, RegularScope);
413 FRIEND_TEST(GCTracerTest, IncrementalMarkingDetails);
414 FRIEND_TEST(GCTracerTest, IncrementalScope);
415 FRIEND_TEST(GCTracerTest, IncrementalMarkingSpeed);
416 FRIEND_TEST(GCTracerTest, MutatorUtilization);
417 FRIEND_TEST(GCTracerTest, RecordMarkCompactHistograms);
418 FRIEND_TEST(GCTracerTest, RecordScavengerHistograms);
419
420 struct BackgroundCounter {
421 double total_duration_ms;
422 };
423
424 void StopCycle(GarbageCollector collector);
425
426 // Statistics for incremental and background scopes are kept out of the
427 // current event and only copied there by FinalizeCurrentEvent, at StopCycle.
428 // This method can be used to access scopes correctly, before this happens.
429 // Note: when accessing a background scope via this method, the caller is
430 // responsible for avoiding data races, e.g., by acquiring
431 // background_counter_mutex_.
432 V8_INLINE constexpr double current_scope(Scope::ScopeId id) const;
433
434 V8_INLINE constexpr const IncrementalMarkingInfos& incremental_scope(
435 Scope::ScopeId id) const;
436
437 // Returns the average speed of the events in the buffer.
438 // If the buffer is empty, the result is 0.
439 // Otherwise, the result is between 1 byte/ms and 1 GB/ms.
440 static double AverageSpeed(const base::RingBuffer<BytesAndDuration>& buffer);
441 static double AverageSpeed(const base::RingBuffer<BytesAndDuration>& buffer,
442 const BytesAndDuration& initial, double time_ms);
443
444 void ResetForTesting();
445 void ResetIncrementalMarkingCounters();
446 void RecordIncrementalMarkingSpeed(size_t bytes, double duration);
447 void RecordMutatorUtilization(double mark_compactor_end_time,
448 double mark_compactor_duration);
449
450 // Update counters for an entire full GC cycle. Exact accounting of events
451 // within a GC is not necessary which is why the recording takes place at the
452 // end of the atomic pause.
453 void RecordGCSumCounters();
454
455 V8_INLINE double MonotonicallyIncreasingTimeInMs();
456
457 // Print one detailed trace line in name=value format.
458 // TODO(ernstm): Move to Heap.
459 void PrintNVP() const;
460
461 // Print one trace line.
462 // TODO(ernstm): Move to Heap.
463 void Print() const;
464
465 // Prints a line and also adds it to the heap's ring buffer so that
466 // it can be included in later crash dumps.
467 void PRINTF_FORMAT(2, 3) Output(const char* format, ...) const;
468
469 void FetchBackgroundCounters(int first_scope, int last_scope);
470 void FetchBackgroundMinorGCCounters();
471 void FetchBackgroundMarkCompactCounters();
472 void FetchBackgroundGeneralCounters();
473
474 void ReportFullCycleToRecorder();
475 void ReportIncrementalMarkingStepToRecorder(double v8_duration);
476 void ReportIncrementalSweepingStepToRecorder(double v8_duration);
477 void ReportYoungCycleToRecorder();
478
479 // Pointer to the heap that owns this tracer.
480 Heap* heap_;
481
482 // Current tracer event. Populated during Start/Stop cycle. Valid after Stop()
483 // has returned.
484 Event current_;
485
486 // Previous tracer event.
487 Event previous_;
488
489 // The starting time of the observable pause or 0.0 if we're not inside it.
490 double start_of_observable_pause_ = 0.0;
491
492 // We need two epochs, since there can be scavenges during incremental
493 // marking.
494 CollectionEpoch epoch_young_ = 0;
495 CollectionEpoch epoch_full_ = 0;
496
497 // Size of incremental marking steps (in bytes) accumulated since the end of
498 // the last mark compact GC.
499 size_t incremental_marking_bytes_;
500
501 // Duration (in ms) of incremental marking steps since the end of the last
502 // mark-compact event.
503 double incremental_marking_duration_;
504
505 double incremental_marking_start_time_;
506
507 double recorded_incremental_marking_speed_;
508
509 double average_time_to_incremental_marking_task_ = 0.0;
510
511 double recorded_embedder_speed_ = 0.0;
512
513 // Incremental scopes carry more information than just the duration. The infos
514 // here are merged back upon starting/stopping the GC tracer.
515 IncrementalMarkingInfos
516 incremental_scopes_[Scope::NUMBER_OF_INCREMENTAL_SCOPES];
517
518 // Timestamp and allocation counter at the last sampled allocation event.
519 double allocation_time_ms_;
520 size_t new_space_allocation_counter_bytes_;
521 size_t old_generation_allocation_counter_bytes_;
522 size_t embedder_allocation_counter_bytes_;
523
524 // Accumulated duration (in ms) and allocated bytes since the last GC.
525 double allocation_duration_since_gc_;
526 size_t new_space_allocation_in_bytes_since_gc_;
527 size_t old_generation_allocation_in_bytes_since_gc_;
528 size_t embedder_allocation_in_bytes_since_gc_;
529
530 double combined_mark_compact_speed_cache_;
531
532 // Counts how many tracers were started without stopping.
533 int start_counter_;
534
535 // Used for computing average mutator utilization.
536 double average_mutator_duration_;
537 double average_mark_compact_duration_;
538 double current_mark_compact_mutator_utilization_;
539 double previous_mark_compact_end_time_;
540
541 base::RingBuffer<BytesAndDuration> recorded_minor_gcs_total_;
542 base::RingBuffer<BytesAndDuration> recorded_minor_gcs_survived_;
543 base::RingBuffer<BytesAndDuration> recorded_compactions_;
544 base::RingBuffer<BytesAndDuration> recorded_incremental_mark_compacts_;
545 base::RingBuffer<BytesAndDuration> recorded_mark_compacts_;
546 base::RingBuffer<BytesAndDuration> recorded_new_generation_allocations_;
547 base::RingBuffer<BytesAndDuration> recorded_old_generation_allocations_;
548 base::RingBuffer<BytesAndDuration> recorded_embedder_generation_allocations_;
549 base::RingBuffer<double> recorded_survival_ratios_;
550
551 // A full GC cycle stops only when both v8 and cppgc (if available) GCs have
552 // finished sweeping.
553 bool notified_sweeping_completed_ = false;
554 bool notified_full_cppgc_completed_ = false;
555 // Similar to full GCs, a young GC cycle stops only when both v8 and cppgc GCs
556 // have finished sweeping.
557 bool notified_young_cppgc_completed_ = false;
558 // Keep track whether the young cppgc GC was scheduled (as opposed to full
559 // cycles, for young cycles cppgc is not always scheduled).
560 bool notified_young_cppgc_running_ = false;
561
562 // When a full GC cycle is interrupted by a young generation GC cycle, the
563 // |previous_| event is used as temporary storage for the |current_| event
564 // that corresponded to the full GC cycle, and this field is set to true.
565 bool young_gc_while_full_gc_ = false;
566
567 v8::metrics::GarbageCollectionFullMainThreadBatchedIncrementalMark
568 incremental_mark_batched_events_;
569 v8::metrics::GarbageCollectionFullMainThreadBatchedIncrementalSweep
570 incremental_sweep_batched_events_;
571
572 mutable base::Mutex background_counter_mutex_;
573 BackgroundCounter background_counter_[Scope::NUMBER_OF_SCOPES];
574 };
575
576 } // namespace internal
577 } // namespace v8
578
579 #endif // V8_HEAP_GC_TRACER_H_
580