1 // Copyright 2020 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/heap/cppgc/stats-collector.h"
6
7 #include <algorithm>
8 #include <atomic>
9 #include <cmath>
10
11 #include "src/base/atomicops.h"
12 #include "src/base/logging.h"
13 #include "src/base/platform/time.h"
14 #include "src/heap/cppgc/metric-recorder.h"
15
16 namespace cppgc {
17 namespace internal {
18
19 // static
20 constexpr size_t StatsCollector::kAllocationThresholdBytes;
21
StatsCollector(Platform * platform)22 StatsCollector::StatsCollector(Platform* platform) : platform_(platform) {
23 USE(platform_);
24 }
25
RegisterObserver(AllocationObserver * observer)26 void StatsCollector::RegisterObserver(AllocationObserver* observer) {
27 DCHECK_EQ(allocation_observers_.end(),
28 std::find(allocation_observers_.begin(),
29 allocation_observers_.end(), observer));
30 allocation_observers_.push_back(observer);
31 }
32
UnregisterObserver(AllocationObserver * observer)33 void StatsCollector::UnregisterObserver(AllocationObserver* observer) {
34 auto it = std::find(allocation_observers_.begin(),
35 allocation_observers_.end(), observer);
36 DCHECK_NE(allocation_observers_.end(), it);
37 *it = nullptr;
38 allocation_observer_deleted_ = true;
39 }
40
NotifyAllocation(size_t bytes)41 void StatsCollector::NotifyAllocation(size_t bytes) {
42 // The current GC may not have been started. This is ok as recording considers
43 // the whole time range between garbage collections.
44 allocated_bytes_since_safepoint_ += bytes;
45 #ifdef CPPGC_VERIFY_HEAP
46 DCHECK_GE(tracked_live_bytes_ + bytes, tracked_live_bytes_);
47 tracked_live_bytes_ += bytes;
48 #endif // CPPGC_VERIFY_HEAP
49 }
50
NotifyExplicitFree(size_t bytes)51 void StatsCollector::NotifyExplicitFree(size_t bytes) {
52 // See IncreaseAllocatedObjectSize for lifetime of the counter.
53 explicitly_freed_bytes_since_safepoint_ += bytes;
54 #ifdef CPPGC_VERIFY_HEAP
55 DCHECK_GE(tracked_live_bytes_, bytes);
56 tracked_live_bytes_ -= bytes;
57 #endif // CPPGC_VERIFY_HEAP
58 }
59
NotifySafePointForConservativeCollection()60 void StatsCollector::NotifySafePointForConservativeCollection() {
61 if (std::abs(allocated_bytes_since_safepoint_ -
62 explicitly_freed_bytes_since_safepoint_) >=
63 static_cast<int64_t>(kAllocationThresholdBytes)) {
64 AllocatedObjectSizeSafepointImpl();
65 }
66 }
67
NotifySafePointForTesting()68 void StatsCollector::NotifySafePointForTesting() {
69 AllocatedObjectSizeSafepointImpl();
70 }
71
AllocatedObjectSizeSafepointImpl()72 void StatsCollector::AllocatedObjectSizeSafepointImpl() {
73 allocated_bytes_since_end_of_marking_ +=
74 static_cast<int64_t>(allocated_bytes_since_safepoint_) -
75 static_cast<int64_t>(explicitly_freed_bytes_since_safepoint_);
76
77 // Save the epoch to avoid clearing counters when a GC happened, see below.
78 const auto saved_epoch = current_.epoch;
79
80 // These observer methods may start or finalize GC. In case they trigger a
81 // final GC pause, the delta counters are reset there and the following
82 // observer calls are called with '0' updates.
83 ForAllAllocationObservers([this](AllocationObserver* observer) {
84 // Recompute delta here so that a GC finalization is able to clear the
85 // delta for other observer calls.
86 int64_t delta = allocated_bytes_since_safepoint_ -
87 explicitly_freed_bytes_since_safepoint_;
88 if (delta < 0) {
89 observer->AllocatedObjectSizeDecreased(static_cast<size_t>(-delta));
90 } else {
91 observer->AllocatedObjectSizeIncreased(static_cast<size_t>(delta));
92 }
93 });
94 // Only clear the counters when no garbage collection happened. In case of a
95 // garbage collection in the callbacks, the counters have been cleared by
96 // `NotifyMarkingFinished()`. In addition, atomic sweeping may have already
97 // allocated new memory which would be dropped from accounting in case
98 // of clearing here.
99 if (saved_epoch == current_.epoch) {
100 allocated_bytes_since_safepoint_ = 0;
101 explicitly_freed_bytes_since_safepoint_ = 0;
102 }
103 }
104
Event()105 StatsCollector::Event::Event() {
106 static std::atomic<size_t> epoch_counter{0};
107 epoch = epoch_counter.fetch_add(1);
108 }
109
NotifyMarkingStarted(CollectionType collection_type,IsForcedGC is_forced_gc)110 void StatsCollector::NotifyMarkingStarted(CollectionType collection_type,
111 IsForcedGC is_forced_gc) {
112 DCHECK_EQ(GarbageCollectionState::kNotRunning, gc_state_);
113 current_.collection_type = collection_type;
114 current_.is_forced_gc = is_forced_gc;
115 gc_state_ = GarbageCollectionState::kMarking;
116 }
117
NotifyMarkingCompleted(size_t marked_bytes)118 void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
119 DCHECK_EQ(GarbageCollectionState::kMarking, gc_state_);
120 gc_state_ = GarbageCollectionState::kSweeping;
121 current_.marked_bytes = marked_bytes;
122 current_.object_size_before_sweep_bytes =
123 marked_bytes_so_far_ + allocated_bytes_since_end_of_marking_ +
124 allocated_bytes_since_safepoint_ -
125 explicitly_freed_bytes_since_safepoint_;
126 allocated_bytes_since_safepoint_ = 0;
127 explicitly_freed_bytes_since_safepoint_ = 0;
128
129 if (current_.collection_type == CollectionType::kMajor)
130 marked_bytes_so_far_ = 0;
131 marked_bytes_so_far_ += marked_bytes;
132
133 #ifdef CPPGC_VERIFY_HEAP
134 tracked_live_bytes_ = marked_bytes_so_far_;
135 #endif // CPPGC_VERIFY_HEAP
136
137 DCHECK_LE(memory_freed_bytes_since_end_of_marking_, memory_allocated_bytes_);
138 memory_allocated_bytes_ -= memory_freed_bytes_since_end_of_marking_;
139 current_.memory_size_before_sweep_bytes = memory_allocated_bytes_;
140 memory_freed_bytes_since_end_of_marking_ = 0;
141
142 ForAllAllocationObservers([this](AllocationObserver* observer) {
143 observer->ResetAllocatedObjectSize(marked_bytes_so_far_);
144 });
145
146 // HeapGrowing would use the below fields to estimate allocation rate during
147 // execution of ResetAllocatedObjectSize.
148 allocated_bytes_since_end_of_marking_ = 0;
149 time_of_last_end_of_marking_ = v8::base::TimeTicks::Now();
150 }
151
GetRecentAllocationSpeedInBytesPerMs() const152 double StatsCollector::GetRecentAllocationSpeedInBytesPerMs() const {
153 v8::base::TimeTicks current_time = v8::base::TimeTicks::Now();
154 DCHECK_LE(time_of_last_end_of_marking_, current_time);
155 if (time_of_last_end_of_marking_ == current_time) return 0;
156 return allocated_bytes_since_end_of_marking_ /
157 (current_time - time_of_last_end_of_marking_).InMillisecondsF();
158 }
159
160 namespace {
161
SumPhases(const MetricRecorder::GCCycle::Phases & phases)162 int64_t SumPhases(const MetricRecorder::GCCycle::Phases& phases) {
163 return phases.mark_duration_us + phases.weak_duration_us +
164 phases.compact_duration_us + phases.sweep_duration_us;
165 }
166
GetCycleEventForMetricRecorder(StatsCollector::CollectionType type,int64_t atomic_mark_us,int64_t atomic_weak_us,int64_t atomic_compact_us,int64_t atomic_sweep_us,int64_t incremental_mark_us,int64_t incremental_sweep_us,int64_t concurrent_mark_us,int64_t concurrent_sweep_us,int64_t objects_before_bytes,int64_t objects_after_bytes,int64_t objects_freed_bytes,int64_t memory_before_bytes,int64_t memory_after_bytes,int64_t memory_freed_bytes)167 MetricRecorder::GCCycle GetCycleEventForMetricRecorder(
168 StatsCollector::CollectionType type, int64_t atomic_mark_us,
169 int64_t atomic_weak_us, int64_t atomic_compact_us, int64_t atomic_sweep_us,
170 int64_t incremental_mark_us, int64_t incremental_sweep_us,
171 int64_t concurrent_mark_us, int64_t concurrent_sweep_us,
172 int64_t objects_before_bytes, int64_t objects_after_bytes,
173 int64_t objects_freed_bytes, int64_t memory_before_bytes,
174 int64_t memory_after_bytes, int64_t memory_freed_bytes) {
175 MetricRecorder::GCCycle event;
176 event.type = (type == StatsCollector::CollectionType::kMajor)
177 ? MetricRecorder::GCCycle::Type::kMajor
178 : MetricRecorder::GCCycle::Type::kMinor;
179 // MainThread.Incremental:
180 event.main_thread_incremental.mark_duration_us = incremental_mark_us;
181 event.main_thread_incremental.sweep_duration_us = incremental_sweep_us;
182 // MainThread.Atomic:
183 event.main_thread_atomic.mark_duration_us = atomic_mark_us;
184 event.main_thread_atomic.weak_duration_us = atomic_weak_us;
185 event.main_thread_atomic.compact_duration_us = atomic_compact_us;
186 event.main_thread_atomic.sweep_duration_us = atomic_sweep_us;
187 // MainThread:
188 event.main_thread.mark_duration_us =
189 event.main_thread_atomic.mark_duration_us +
190 event.main_thread_incremental.mark_duration_us;
191 event.main_thread.weak_duration_us =
192 event.main_thread_atomic.weak_duration_us;
193 event.main_thread.compact_duration_us =
194 event.main_thread_atomic.compact_duration_us;
195 event.main_thread.sweep_duration_us =
196 event.main_thread_atomic.sweep_duration_us +
197 event.main_thread_incremental.sweep_duration_us;
198 // Total:
199 event.total.mark_duration_us =
200 event.main_thread.mark_duration_us + concurrent_mark_us;
201 event.total.weak_duration_us = event.main_thread.weak_duration_us;
202 event.total.compact_duration_us = event.main_thread.compact_duration_us;
203 event.total.sweep_duration_us =
204 event.main_thread.sweep_duration_us + concurrent_sweep_us;
205 // Objects:
206 event.objects.before_bytes = objects_before_bytes;
207 event.objects.after_bytes = objects_after_bytes;
208 event.objects.freed_bytes = objects_freed_bytes;
209 // Memory:
210 event.memory.before_bytes = memory_before_bytes;
211 event.memory.after_bytes = memory_after_bytes;
212 event.memory.freed_bytes = memory_freed_bytes;
213 // Collection Rate:
214 event.collection_rate_in_percent =
215 static_cast<double>(event.objects.after_bytes) /
216 event.objects.before_bytes;
217 // Efficiency:
218 event.efficiency_in_bytes_per_us =
219 static_cast<double>(event.objects.freed_bytes) / SumPhases(event.total);
220 event.main_thread_efficiency_in_bytes_per_us =
221 static_cast<double>(event.objects.freed_bytes) /
222 SumPhases(event.main_thread);
223 return event;
224 }
225
226 } // namespace
227
NotifySweepingCompleted()228 void StatsCollector::NotifySweepingCompleted() {
229 DCHECK_EQ(GarbageCollectionState::kSweeping, gc_state_);
230 gc_state_ = GarbageCollectionState::kNotRunning;
231 previous_ = std::move(current_);
232 current_ = Event();
233 if (metric_recorder_) {
234 MetricRecorder::GCCycle event = GetCycleEventForMetricRecorder(
235 previous_.collection_type,
236 previous_.scope_data[kAtomicMark].InMicroseconds(),
237 previous_.scope_data[kAtomicWeak].InMicroseconds(),
238 previous_.scope_data[kAtomicCompact].InMicroseconds(),
239 previous_.scope_data[kAtomicSweep].InMicroseconds(),
240 previous_.scope_data[kIncrementalMark].InMicroseconds(),
241 previous_.scope_data[kIncrementalSweep].InMicroseconds(),
242 previous_.concurrent_scope_data[kConcurrentMark],
243 previous_.concurrent_scope_data[kConcurrentSweep],
244 previous_.object_size_before_sweep_bytes /* objects_before */,
245 marked_bytes_so_far_ /* objects_after */,
246 previous_.object_size_before_sweep_bytes -
247 marked_bytes_so_far_ /* objects_freed */,
248 previous_.memory_size_before_sweep_bytes /* memory_before */,
249 previous_.memory_size_before_sweep_bytes -
250 memory_freed_bytes_since_end_of_marking_ /* memory_after */,
251 memory_freed_bytes_since_end_of_marking_ /* memory_freed */);
252 metric_recorder_->AddMainThreadEvent(event);
253 }
254 }
255
allocated_memory_size() const256 size_t StatsCollector::allocated_memory_size() const {
257 return memory_allocated_bytes_ - memory_freed_bytes_since_end_of_marking_;
258 }
259
allocated_object_size() const260 size_t StatsCollector::allocated_object_size() const {
261 return marked_bytes_so_far_ + allocated_bytes_since_end_of_marking_;
262 }
263
marked_bytes() const264 size_t StatsCollector::marked_bytes() const {
265 DCHECK_NE(GarbageCollectionState::kMarking, gc_state_);
266 return marked_bytes_so_far_;
267 }
268
marked_bytes_on_current_cycle() const269 size_t StatsCollector::marked_bytes_on_current_cycle() const {
270 DCHECK_NE(GarbageCollectionState::kNotRunning, gc_state_);
271 return current_.marked_bytes;
272 }
273
marking_time() const274 v8::base::TimeDelta StatsCollector::marking_time() const {
275 DCHECK_NE(GarbageCollectionState::kMarking, gc_state_);
276 // During sweeping we refer to the current Event as that already holds the
277 // correct marking information. In all other phases, the previous event holds
278 // the most up-to-date marking information.
279 const Event& event =
280 gc_state_ == GarbageCollectionState::kSweeping ? current_ : previous_;
281 return event.scope_data[kAtomicMark] + event.scope_data[kIncrementalMark] +
282 v8::base::TimeDelta::FromMicroseconds(v8::base::Relaxed_Load(
283 &event.concurrent_scope_data[kConcurrentMark]));
284 }
285
NotifyAllocatedMemory(int64_t size)286 void StatsCollector::NotifyAllocatedMemory(int64_t size) {
287 memory_allocated_bytes_ += size;
288 #ifdef DEBUG
289 const auto saved_epoch = current_.epoch;
290 #endif // DEBUG
291 ForAllAllocationObservers([size](AllocationObserver* observer) {
292 observer->AllocatedSizeIncreased(static_cast<size_t>(size));
293 });
294 #ifdef DEBUG
295 // AllocatedSizeIncreased() must not trigger GC.
296 DCHECK_EQ(saved_epoch, current_.epoch);
297 #endif // DEBUG
298 }
299
NotifyFreedMemory(int64_t size)300 void StatsCollector::NotifyFreedMemory(int64_t size) {
301 memory_freed_bytes_since_end_of_marking_ += size;
302 #ifdef DEBUG
303 const auto saved_epoch = current_.epoch;
304 #endif // DEBUG
305 ForAllAllocationObservers([size](AllocationObserver* observer) {
306 observer->AllocatedSizeDecreased(static_cast<size_t>(size));
307 });
308 #ifdef DEBUG
309 // AllocatedSizeDecreased() must not trigger GC.
310 DCHECK_EQ(saved_epoch, current_.epoch);
311 #endif // DEBUG
312 }
313
IncrementDiscardedMemory(size_t value)314 void StatsCollector::IncrementDiscardedMemory(size_t value) {
315 const size_t old =
316 discarded_bytes_.fetch_add(value, std::memory_order_relaxed);
317 DCHECK_GE(old + value, old);
318 USE(old);
319 }
320
DecrementDiscardedMemory(size_t value)321 void StatsCollector::DecrementDiscardedMemory(size_t value) {
322 const size_t old =
323 discarded_bytes_.fetch_sub(value, std::memory_order_relaxed);
324 DCHECK_GE(old, old - value);
325 USE(old);
326 }
327
ResetDiscardedMemory()328 void StatsCollector::ResetDiscardedMemory() {
329 discarded_bytes_.store(0, std::memory_order_relaxed);
330 }
331
discarded_memory_size() const332 size_t StatsCollector::discarded_memory_size() const {
333 return discarded_bytes_.load(std::memory_order_relaxed);
334 }
335
resident_memory_size() const336 size_t StatsCollector::resident_memory_size() const {
337 const auto allocated = allocated_memory_size();
338 const auto discarded = discarded_memory_size();
339 DCHECK_IMPLIES(allocated == 0, discarded == 0);
340 DCHECK_IMPLIES(allocated > 0, allocated > discarded);
341 return allocated - discarded;
342 }
343
RecordHistogramSample(ScopeId scope_id_,v8::base::TimeDelta time)344 void StatsCollector::RecordHistogramSample(ScopeId scope_id_,
345 v8::base::TimeDelta time) {
346 switch (scope_id_) {
347 case kIncrementalMark: {
348 MetricRecorder::MainThreadIncrementalMark event{time.InMicroseconds()};
349 metric_recorder_->AddMainThreadEvent(event);
350 break;
351 }
352 case kIncrementalSweep: {
353 MetricRecorder::MainThreadIncrementalSweep event{time.InMicroseconds()};
354 metric_recorder_->AddMainThreadEvent(event);
355 break;
356 }
357 default:
358 break;
359 }
360 }
361
362 } // namespace internal
363 } // namespace cppgc
364