• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_CPPGC_STATS_COLLECTOR_H_
6 #define V8_HEAP_CPPGC_STATS_COLLECTOR_H_
7 
8 #include <stddef.h>
9 #include <stdint.h>
10 
11 #include <atomic>
12 #include <vector>
13 
14 #include "include/cppgc/platform.h"
15 #include "src/base/logging.h"
16 #include "src/base/macros.h"
17 #include "src/base/platform/time.h"
18 #include "src/heap/cppgc/garbage-collector.h"
19 #include "src/heap/cppgc/metric-recorder.h"
20 #include "src/heap/cppgc/trace-event.h"
21 
22 namespace cppgc {
23 namespace internal {
24 
25 // Histogram scopes contribute to histogram as well as to traces and metrics.
26 // Other scopes contribute only to traces and metrics.
27 #define CPPGC_FOR_ALL_HISTOGRAM_SCOPES(V) \
28   V(AtomicMark)                           \
29   V(AtomicWeak)                           \
30   V(AtomicCompact)                        \
31   V(AtomicSweep)                          \
32   V(IncrementalMark)                      \
33   V(IncrementalSweep)
34 
35 #define CPPGC_FOR_ALL_SCOPES(V)             \
36   V(MarkIncrementalStart)                   \
37   V(MarkIncrementalFinalize)                \
38   V(MarkAtomicPrologue)                     \
39   V(MarkAtomicEpilogue)                     \
40   V(MarkTransitiveClosure)                  \
41   V(MarkTransitiveClosureWithDeadline)      \
42   V(MarkFlushEphemerons)                    \
43   V(MarkOnAllocation)                       \
44   V(MarkProcessBailOutObjects)              \
45   V(MarkProcessMarkingWorklist)             \
46   V(MarkProcessWriteBarrierWorklist)        \
47   V(MarkProcessNotFullyconstructedWorklist) \
48   V(MarkProcessEphemerons)                  \
49   V(MarkVisitRoots)                         \
50   V(MarkVisitNotFullyConstructedObjects)    \
51   V(MarkVisitPersistents)                   \
52   V(MarkVisitCrossThreadPersistents)        \
53   V(MarkVisitStack)                         \
54   V(MarkVisitRememberedSets)                \
55   V(SweepInvokePreFinalizers)               \
56   V(SweepIdleStep)                          \
57   V(SweepInTask)                            \
58   V(SweepOnAllocation)                      \
59   V(SweepFinalize)
60 
61 #define CPPGC_FOR_ALL_HISTOGRAM_CONCURRENT_SCOPES(V) \
62   V(ConcurrentMark)                                  \
63   V(ConcurrentSweep)
64 
65 #define CPPGC_FOR_ALL_CONCURRENT_SCOPES(V) V(ConcurrentMarkProcessEphemerons)
66 
67 // Sink for various time and memory statistics.
68 class V8_EXPORT_PRIVATE StatsCollector final {
69   using IsForcedGC = GarbageCollector::Config::IsForcedGC;
70 
71  public:
72   using CollectionType = GarbageCollector::Config::CollectionType;
73 
74 #if defined(CPPGC_DECLARE_ENUM)
75   static_assert(false, "CPPGC_DECLARE_ENUM macro is already defined");
76 #endif
77 
78   enum ScopeId {
79 #define CPPGC_DECLARE_ENUM(name) k##name,
80     CPPGC_FOR_ALL_HISTOGRAM_SCOPES(CPPGC_DECLARE_ENUM)
81         kNumHistogramScopeIds,
82     CPPGC_FOR_ALL_SCOPES(CPPGC_DECLARE_ENUM)
83 #undef CPPGC_DECLARE_ENUM
84         kNumScopeIds,
85   };
86 
87   enum ConcurrentScopeId {
88 #define CPPGC_DECLARE_ENUM(name) k##name,
89     CPPGC_FOR_ALL_HISTOGRAM_CONCURRENT_SCOPES(CPPGC_DECLARE_ENUM)
90         kNumHistogramConcurrentScopeIds,
91     CPPGC_FOR_ALL_CONCURRENT_SCOPES(CPPGC_DECLARE_ENUM)
92 #undef CPPGC_DECLARE_ENUM
93         kNumConcurrentScopeIds
94   };
95 
96   // POD to hold interesting data accumulated during a garbage collection cycle.
97   //
98   // The event is always fully populated when looking at previous events but
99   // may only be partially populated when looking at the current event.
100   struct Event final {
101     V8_EXPORT_PRIVATE explicit Event();
102 
103     v8::base::TimeDelta scope_data[kNumHistogramScopeIds];
104     v8::base::Atomic32 concurrent_scope_data[kNumHistogramConcurrentScopeIds]{
105         0};
106 
107     size_t epoch = -1;
108     CollectionType collection_type = CollectionType::kMajor;
109     IsForcedGC is_forced_gc = IsForcedGC::kNotForced;
110     // Marked bytes collected during marking.
111     size_t marked_bytes = 0;
112     size_t object_size_before_sweep_bytes = -1;
113     size_t memory_size_before_sweep_bytes = -1;
114   };
115 
116  private:
117 #if defined(CPPGC_CASE)
118   static_assert(false, "CPPGC_CASE macro is already defined");
119 #endif
120 
GetScopeName(ScopeId id,CollectionType type)121   constexpr static const char* GetScopeName(ScopeId id, CollectionType type) {
122     switch (id) {
123 #define CPPGC_CASE(name)                                   \
124   case k##name:                                            \
125     return type == CollectionType::kMajor ? "CppGC." #name \
126                                           : "CppGC." #name ".Minor";
127       CPPGC_FOR_ALL_HISTOGRAM_SCOPES(CPPGC_CASE)
128       CPPGC_FOR_ALL_SCOPES(CPPGC_CASE)
129 #undef CPPGC_CASE
130       default:
131         return nullptr;
132     }
133   }
134 
GetScopeName(ConcurrentScopeId id,CollectionType type)135   constexpr static const char* GetScopeName(ConcurrentScopeId id,
136                                             CollectionType type) {
137     switch (id) {
138 #define CPPGC_CASE(name)                                   \
139   case k##name:                                            \
140     return type == CollectionType::kMajor ? "CppGC." #name \
141                                           : "CppGC." #name ".Minor";
142       CPPGC_FOR_ALL_HISTOGRAM_CONCURRENT_SCOPES(CPPGC_CASE)
143       CPPGC_FOR_ALL_CONCURRENT_SCOPES(CPPGC_CASE)
144 #undef CPPGC_CASE
145       default:
146         return nullptr;
147     }
148   }
149 
150   enum TraceCategory { kEnabled, kDisabled };
151   enum ScopeContext { kMutatorThread, kConcurrentThread };
152 
153   // Trace a particular scope. Will emit a trace event and record the time in
154   // the corresponding StatsCollector.
155   template <TraceCategory trace_category, ScopeContext scope_category>
156   class V8_NODISCARD InternalScope {
157     using ScopeIdType = std::conditional_t<scope_category == kMutatorThread,
158                                            ScopeId, ConcurrentScopeId>;
159 
160    public:
161     template <typename... Args>
InternalScope(StatsCollector * stats_collector,ScopeIdType scope_id,Args...args)162     InternalScope(StatsCollector* stats_collector, ScopeIdType scope_id,
163                   Args... args)
164         : stats_collector_(stats_collector),
165           start_time_(v8::base::TimeTicks::Now()),
166           scope_id_(scope_id) {
167       DCHECK_LE(0, scope_id_);
168       DCHECK_LT(static_cast<int>(scope_id_),
169                 scope_category == kMutatorThread
170                     ? static_cast<int>(kNumScopeIds)
171                     : static_cast<int>(kNumConcurrentScopeIds));
172       DCHECK_NE(static_cast<int>(scope_id_),
173                 scope_category == kMutatorThread
174                     ? static_cast<int>(kNumHistogramScopeIds)
175                     : static_cast<int>(kNumHistogramConcurrentScopeIds));
176       StartTrace(args...);
177     }
178 
~InternalScope()179     ~InternalScope() {
180       StopTrace();
181       IncreaseScopeTime();
182     }
183 
184     InternalScope(const InternalScope&) = delete;
185     InternalScope& operator=(const InternalScope&) = delete;
186 
DecreaseStartTimeForTesting(v8::base::TimeDelta delta)187     void DecreaseStartTimeForTesting(v8::base::TimeDelta delta) {
188       start_time_ -= delta;
189     }
190 
191    private:
192     void* operator new(size_t, void*) = delete;
193     void* operator new(size_t) = delete;
194 
195     inline constexpr static const char* TraceCategory();
196 
197     template <typename... Args>
198     inline void StartTrace(Args... args);
199     inline void StopTrace();
200 
201     inline void StartTraceImpl();
202     template <typename Value1>
203     inline void StartTraceImpl(const char* k1, Value1 v1);
204     template <typename Value1, typename Value2>
205     inline void StartTraceImpl(const char* k1, Value1 v1, const char* k2,
206                                Value2 v2);
207     inline void StopTraceImpl();
208 
209     inline void IncreaseScopeTime();
210 
211     StatsCollector* const stats_collector_;
212     v8::base::TimeTicks start_time_;
213     const ScopeIdType scope_id_;
214   };
215 
216  public:
217   using DisabledScope = InternalScope<kDisabled, kMutatorThread>;
218   using EnabledScope = InternalScope<kEnabled, kMutatorThread>;
219   using DisabledConcurrentScope = InternalScope<kDisabled, kConcurrentThread>;
220   using EnabledConcurrentScope = InternalScope<kEnabled, kConcurrentThread>;
221 
222   // Observer for allocated object size. May e.g. be used to implement heap
223   // growing heuristics. Observers may register/unregister observers at any
224   // time when being invoked.
225   class AllocationObserver {
226    public:
227     // Called after observing at least
228     // StatsCollector::kAllocationThresholdBytes changed bytes through
229     // allocation or explicit free. Reports both, negative and positive
230     // increments, to allow observer to decide whether absolute values or only
231     // the deltas is interesting.
232     //
233     // May trigger GC.
AllocatedObjectSizeIncreased(size_t)234     virtual void AllocatedObjectSizeIncreased(size_t) {}
AllocatedObjectSizeDecreased(size_t)235     virtual void AllocatedObjectSizeDecreased(size_t) {}
236 
237     // Called when the exact size of allocated object size is known. In
238     // practice, this is after marking when marked bytes == allocated bytes.
239     //
240     // Must not trigger GC synchronously.
ResetAllocatedObjectSize(size_t)241     virtual void ResetAllocatedObjectSize(size_t) {}
242 
243     // Called upon allocating/releasing chunks of memory (e.g. pages) that can
244     // contain objects.
245     //
246     // Must not trigger GC.
AllocatedSizeIncreased(size_t)247     virtual void AllocatedSizeIncreased(size_t) {}
AllocatedSizeDecreased(size_t)248     virtual void AllocatedSizeDecreased(size_t) {}
249   };
250 
251   // Observers are implemented using virtual calls. Avoid notifications below
252   // reasonably interesting sizes.
253   static constexpr size_t kAllocationThresholdBytes = 1024;
254 
255   explicit StatsCollector(Platform*);
256   StatsCollector(const StatsCollector&) = delete;
257   StatsCollector& operator=(const StatsCollector&) = delete;
258 
259   void RegisterObserver(AllocationObserver*);
260   void UnregisterObserver(AllocationObserver*);
261 
262   void NotifyAllocation(size_t);
263   void NotifyExplicitFree(size_t);
264   // Safepoints should only be invoked when garbage collections are possible.
265   // This is necessary as increments and decrements are reported as close to
266   // their actual allocation/reclamation as possible.
267   void NotifySafePointForConservativeCollection();
268 
269   void NotifySafePointForTesting();
270 
271   // Indicates a new garbage collection cycle.
272   void NotifyMarkingStarted(CollectionType, IsForcedGC);
273   // Indicates that marking of the current garbage collection cycle is
274   // completed.
275   void NotifyMarkingCompleted(size_t marked_bytes);
276   // Indicates the end of a garbage collection cycle. This means that sweeping
277   // is finished at this point.
278   void NotifySweepingCompleted();
279 
280   size_t allocated_memory_size() const;
281   // Size of live objects in bytes  on the heap. Based on the most recent marked
282   // bytes and the bytes allocated since last marking.
283   size_t allocated_object_size() const;
284 
285   // Returns the overall marked bytes count, i.e. if young generation is
286   // enabled, it returns the accumulated number. Should not be called during
287   // marking.
288   size_t marked_bytes() const;
289 
290   // Returns the marked bytes for the current cycle. Should only be called
291   // within GC cycle.
292   size_t marked_bytes_on_current_cycle() const;
293 
294   // Returns the overall duration of the most recent marking phase. Should not
295   // be called during marking.
296   v8::base::TimeDelta marking_time() const;
297 
298   double GetRecentAllocationSpeedInBytesPerMs() const;
299 
GetPreviousEventForTesting()300   const Event& GetPreviousEventForTesting() const { return previous_; }
301 
302   void NotifyAllocatedMemory(int64_t);
303   void NotifyFreedMemory(int64_t);
304 
305   void IncrementDiscardedMemory(size_t);
306   void DecrementDiscardedMemory(size_t);
307   void ResetDiscardedMemory();
308   size_t discarded_memory_size() const;
309   size_t resident_memory_size() const;
310 
SetMetricRecorder(std::unique_ptr<MetricRecorder> histogram_recorder)311   void SetMetricRecorder(std::unique_ptr<MetricRecorder> histogram_recorder) {
312     metric_recorder_ = std::move(histogram_recorder);
313   }
314 
GetMetricRecorder()315   MetricRecorder* GetMetricRecorder() const { return metric_recorder_.get(); }
316 
317  private:
318   enum class GarbageCollectionState : uint8_t {
319     kNotRunning,
320     kMarking,
321     kSweeping
322   };
323 
324   void RecordHistogramSample(ScopeId, v8::base::TimeDelta);
RecordHistogramSample(ConcurrentScopeId,v8::base::TimeDelta)325   void RecordHistogramSample(ConcurrentScopeId, v8::base::TimeDelta) {}
326 
327   // Invokes |callback| for all registered observers.
328   template <typename Callback>
329   void ForAllAllocationObservers(Callback callback);
330 
331   void AllocatedObjectSizeSafepointImpl();
332 
333   // Allocated bytes since the end of marking. These bytes are reset after
334   // marking as they are accounted in marked_bytes then. May be negative in case
335   // an object was explicitly freed that was marked as live in the previous
336   // cycle.
337   int64_t allocated_bytes_since_end_of_marking_ = 0;
338   v8::base::TimeTicks time_of_last_end_of_marking_ = v8::base::TimeTicks::Now();
339   // Counters for allocation and free. The individual values are never negative
340   // but their delta may be because of the same reason the overall
341   // allocated_bytes_since_end_of_marking_ may be negative. Keep integer
342   // arithmetic for simplicity.
343   int64_t allocated_bytes_since_safepoint_ = 0;
344   int64_t explicitly_freed_bytes_since_safepoint_ = 0;
345 #ifdef CPPGC_VERIFY_HEAP
346   // Tracks live bytes for overflows.
347   size_t tracked_live_bytes_ = 0;
348 #endif  // CPPGC_VERIFY_HEAP
349 
350   // The number of bytes marked so far. For young generation (with sticky bits)
351   // keeps track of marked bytes across multiple GC cycles.
352   size_t marked_bytes_so_far_ = 0;
353 
354   int64_t memory_allocated_bytes_ = 0;
355   int64_t memory_freed_bytes_since_end_of_marking_ = 0;
356   std::atomic<size_t> discarded_bytes_{0};
357 
358   // vector to allow fast iteration of observers. Register/Unregisters only
359   // happens on startup/teardown.
360   std::vector<AllocationObserver*> allocation_observers_;
361   bool allocation_observer_deleted_ = false;
362 
363   GarbageCollectionState gc_state_ = GarbageCollectionState::kNotRunning;
364 
365   // The event being filled by the current GC cycle between NotifyMarkingStarted
366   // and NotifySweepingFinished.
367   Event current_;
368   // The previous GC event which is populated at NotifySweepingFinished.
369   Event previous_;
370 
371   std::unique_ptr<MetricRecorder> metric_recorder_;
372 
373   // |platform_| is used by the TRACE_EVENT_* macros.
374   Platform* platform_;
375 };
376 
377 template <typename Callback>
ForAllAllocationObservers(Callback callback)378 void StatsCollector::ForAllAllocationObservers(Callback callback) {
379   // Iterate using indices to allow push_back() of new observers.
380   for (size_t i = 0; i < allocation_observers_.size(); ++i) {
381     auto* observer = allocation_observers_[i];
382     if (observer) {
383       callback(observer);
384     }
385   }
386   if (allocation_observer_deleted_) {
387     allocation_observers_.erase(
388         std::remove(allocation_observers_.begin(), allocation_observers_.end(),
389                     nullptr),
390         allocation_observers_.end());
391     allocation_observer_deleted_ = false;
392   }
393 }
394 
395 template <StatsCollector::TraceCategory trace_category,
396           StatsCollector::ScopeContext scope_category>
397 constexpr const char*
TraceCategory()398 StatsCollector::InternalScope<trace_category, scope_category>::TraceCategory() {
399   switch (trace_category) {
400     case kEnabled:
401       return "cppgc";
402     case kDisabled:
403       return TRACE_DISABLED_BY_DEFAULT("cppgc");
404   }
405 }
406 
407 template <StatsCollector::TraceCategory trace_category,
408           StatsCollector::ScopeContext scope_category>
409 template <typename... Args>
StartTrace(Args...args)410 void StatsCollector::InternalScope<trace_category, scope_category>::StartTrace(
411     Args... args) {
412   // Top level scopes that contribute to histogram should always be enabled.
413   DCHECK_IMPLIES(static_cast<int>(scope_id_) <
414                      (scope_category == kMutatorThread
415                           ? static_cast<int>(kNumHistogramScopeIds)
416                           : static_cast<int>(kNumHistogramConcurrentScopeIds)),
417                  trace_category == StatsCollector::TraceCategory::kEnabled);
418   if (trace_category == StatsCollector::TraceCategory::kEnabled)
419     StartTraceImpl(args...);
420 }
421 
422 template <StatsCollector::TraceCategory trace_category,
423           StatsCollector::ScopeContext scope_category>
424 void StatsCollector::InternalScope<trace_category,
StopTrace()425                                    scope_category>::StopTrace() {
426   if (trace_category == StatsCollector::TraceCategory::kEnabled)
427     StopTraceImpl();
428 }
429 
430 template <StatsCollector::TraceCategory trace_category,
431           StatsCollector::ScopeContext scope_category>
432 void StatsCollector::InternalScope<trace_category,
StartTraceImpl()433                                    scope_category>::StartTraceImpl() {
434   TRACE_EVENT_BEGIN0(
435       TraceCategory(),
436       GetScopeName(scope_id_, stats_collector_->current_.collection_type));
437 }
438 
439 template <StatsCollector::TraceCategory trace_category,
440           StatsCollector::ScopeContext scope_category>
441 template <typename Value1>
442 void StatsCollector::InternalScope<
StartTraceImpl(const char * k1,Value1 v1)443     trace_category, scope_category>::StartTraceImpl(const char* k1, Value1 v1) {
444   TRACE_EVENT_BEGIN1(
445       TraceCategory(),
446       GetScopeName(scope_id_, stats_collector_->current_.collection_type), k1,
447       v1);
448 }
449 
450 template <StatsCollector::TraceCategory trace_category,
451           StatsCollector::ScopeContext scope_category>
452 template <typename Value1, typename Value2>
453 void StatsCollector::InternalScope<
StartTraceImpl(const char * k1,Value1 v1,const char * k2,Value2 v2)454     trace_category, scope_category>::StartTraceImpl(const char* k1, Value1 v1,
455                                                     const char* k2, Value2 v2) {
456   TRACE_EVENT_BEGIN2(
457       TraceCategory(),
458       GetScopeName(scope_id_, stats_collector_->current_.collection_type), k1,
459       v1, k2, v2);
460 }
461 
462 template <StatsCollector::TraceCategory trace_category,
463           StatsCollector::ScopeContext scope_category>
464 void StatsCollector::InternalScope<trace_category,
StopTraceImpl()465                                    scope_category>::StopTraceImpl() {
466   TRACE_EVENT_END2(
467       TraceCategory(),
468       GetScopeName(scope_id_, stats_collector_->current_.collection_type),
469       "epoch", stats_collector_->current_.epoch, "forced",
470       stats_collector_->current_.is_forced_gc == IsForcedGC::kForced);
471 }
472 
473 template <StatsCollector::TraceCategory trace_category,
474           StatsCollector::ScopeContext scope_category>
475 void StatsCollector::InternalScope<trace_category,
IncreaseScopeTime()476                                    scope_category>::IncreaseScopeTime() {
477   DCHECK_NE(GarbageCollectionState::kNotRunning, stats_collector_->gc_state_);
478   // Only record top level scopes.
479   if (static_cast<int>(scope_id_) >=
480       (scope_category == kMutatorThread
481            ? static_cast<int>(kNumHistogramScopeIds)
482            : static_cast<int>(kNumHistogramConcurrentScopeIds)))
483     return;
484   v8::base::TimeDelta time = v8::base::TimeTicks::Now() - start_time_;
485   if (scope_category == StatsCollector::ScopeContext::kMutatorThread) {
486     stats_collector_->current_.scope_data[scope_id_] += time;
487     if (stats_collector_->metric_recorder_)
488       stats_collector_->RecordHistogramSample(scope_id_, time);
489     return;
490   }
491   // scope_category == StatsCollector::ScopeContext::kConcurrentThread
492   using Atomic32 = v8::base::Atomic32;
493   const int64_t us = time.InMicroseconds();
494   DCHECK_LE(us, std::numeric_limits<Atomic32>::max());
495   v8::base::Relaxed_AtomicIncrement(
496       &stats_collector_->current_.concurrent_scope_data[scope_id_],
497       static_cast<Atomic32>(us));
498 }
499 
500 }  // namespace internal
501 }  // namespace cppgc
502 
503 #endif  // V8_HEAP_CPPGC_STATS_COLLECTOR_H_
504