1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9
10 #include "base/trace_event/malloc_dump_provider.h"
11
12 #include <stddef.h>
13
14 #include <unordered_map>
15
16 #include "base/allocator/buildflags.h"
17 #include "base/debug/profiler.h"
18 #include "base/format_macros.h"
19 #include "base/metrics/histogram_functions.h"
20 #include "base/numerics/safe_conversions.h"
21 #include "base/strings/stringprintf.h"
22 #include "base/trace_event/process_memory_dump.h"
23 #include "base/trace_event/traced_value.h"
24 #include "build/build_config.h"
25 #include "partition_alloc/buildflags.h"
26
27 #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
28 #include "partition_alloc/partition_alloc_config.h" // nogncheck
29 #include "partition_alloc/partition_bucket_lookup.h" // nogncheck
30 #endif
31
32 #if BUILDFLAG(IS_APPLE)
33 #include <malloc/malloc.h>
34 #else
35 #include <malloc.h>
36 #endif
37 #if BUILDFLAG(IS_WIN)
38 #include <windows.h>
39 #endif
40
41 #if BUILDFLAG(IS_LINUX) || BUILDFLAG(IS_CHROMEOS) || BUILDFLAG(IS_ANDROID)
42 #include <features.h>
43 #endif
44
45 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
46 #include "base/no_destructor.h"
47 #include "partition_alloc/shim/allocator_shim_default_dispatch_to_partition_alloc.h"
48 #endif
49
50 #if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
51 #include "partition_alloc/partition_alloc_constants.h" // nogncheck
52 #endif
53
54 namespace base {
55 namespace trace_event {
56
57 namespace {
58 #if BUILDFLAG(IS_WIN)
59 // A structure containing some information about a given heap.
60 struct WinHeapInfo {
61 size_t committed_size;
62 size_t uncommitted_size;
63 size_t allocated_size;
64 size_t block_count;
65 };
66
67 // NOTE: crbug.com/665516
68 // Unfortunately, there is no safe way to collect information from secondary
69 // heaps due to limitations and racy nature of this piece of WinAPI.
WinHeapMemoryDumpImpl(WinHeapInfo * crt_heap_info)70 void WinHeapMemoryDumpImpl(WinHeapInfo* crt_heap_info) {
71 // Iterate through whichever heap our CRT is using.
72 HANDLE crt_heap = reinterpret_cast<HANDLE>(_get_heap_handle());
73 ::HeapLock(crt_heap);
74 PROCESS_HEAP_ENTRY heap_entry;
75 heap_entry.lpData = nullptr;
76 // Walk over all the entries in the main heap.
77 while (::HeapWalk(crt_heap, &heap_entry) != FALSE) {
78 if ((heap_entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) != 0) {
79 crt_heap_info->allocated_size += heap_entry.cbData;
80 crt_heap_info->block_count++;
81 } else if ((heap_entry.wFlags & PROCESS_HEAP_REGION) != 0) {
82 crt_heap_info->committed_size += heap_entry.Region.dwCommittedSize;
83 crt_heap_info->uncommitted_size += heap_entry.Region.dwUnCommittedSize;
84 }
85 }
86 CHECK(::HeapUnlock(crt_heap) == TRUE);
87 }
88
ReportWinHeapStats(MemoryDumpLevelOfDetail level_of_detail,ProcessMemoryDump * pmd,size_t * total_virtual_size,size_t * resident_size,size_t * allocated_objects_size,size_t * allocated_objects_count)89 void ReportWinHeapStats(MemoryDumpLevelOfDetail level_of_detail,
90 ProcessMemoryDump* pmd,
91 size_t* total_virtual_size,
92 size_t* resident_size,
93 size_t* allocated_objects_size,
94 size_t* allocated_objects_count) {
95 // This is too expensive on Windows, crbug.com/780735.
96 if (level_of_detail == MemoryDumpLevelOfDetail::kDetailed) {
97 WinHeapInfo main_heap_info = {};
98 WinHeapMemoryDumpImpl(&main_heap_info);
99 *total_virtual_size +=
100 main_heap_info.committed_size + main_heap_info.uncommitted_size;
101 // Resident size is approximated with committed heap size. Note that it is
102 // possible to do this with better accuracy on windows by intersecting the
103 // working set with the virtual memory ranges occuipied by the heap. It's
104 // not clear that this is worth it, as it's fairly expensive to do.
105 *resident_size += main_heap_info.committed_size;
106 *allocated_objects_size += main_heap_info.allocated_size;
107 *allocated_objects_count += main_heap_info.block_count;
108
109 if (pmd) {
110 MemoryAllocatorDump* win_heap_dump =
111 pmd->CreateAllocatorDump("malloc/win_heap");
112 win_heap_dump->AddScalar(MemoryAllocatorDump::kNameSize,
113 MemoryAllocatorDump::kUnitsBytes,
114 main_heap_info.allocated_size);
115 }
116 }
117 }
118 #endif // BUILDFLAG(IS_WIN)
119
120 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
ReportPartitionAllocStats(ProcessMemoryDump * pmd,MemoryDumpLevelOfDetail level_of_detail,size_t * total_virtual_size,size_t * resident_size,size_t * allocated_objects_size,size_t * allocated_objects_count,uint64_t * syscall_count,size_t * cumulative_brp_quarantined_size,size_t * cumulative_brp_quarantined_count)121 void ReportPartitionAllocStats(ProcessMemoryDump* pmd,
122 MemoryDumpLevelOfDetail level_of_detail,
123 size_t* total_virtual_size,
124 size_t* resident_size,
125 size_t* allocated_objects_size,
126 size_t* allocated_objects_count,
127 uint64_t* syscall_count,
128 size_t* cumulative_brp_quarantined_size,
129 size_t* cumulative_brp_quarantined_count) {
130 MemoryDumpPartitionStatsDumper partition_stats_dumper("malloc", pmd,
131 level_of_detail);
132 bool is_light_dump = level_of_detail == MemoryDumpLevelOfDetail::kBackground;
133
134 auto* allocator = allocator_shim::internal::PartitionAllocMalloc::Allocator();
135 allocator->DumpStats("allocator", is_light_dump, &partition_stats_dumper);
136
137 auto* original_allocator =
138 allocator_shim::internal::PartitionAllocMalloc::OriginalAllocator();
139 if (original_allocator) {
140 original_allocator->DumpStats("original", is_light_dump,
141 &partition_stats_dumper);
142 }
143
144 *total_virtual_size += partition_stats_dumper.total_resident_bytes();
145 *resident_size += partition_stats_dumper.total_resident_bytes();
146 *allocated_objects_size += partition_stats_dumper.total_active_bytes();
147 *allocated_objects_count += partition_stats_dumper.total_active_count();
148 *syscall_count += partition_stats_dumper.syscall_count();
149 #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
150 *cumulative_brp_quarantined_size +=
151 partition_stats_dumper.cumulative_brp_quarantined_bytes();
152 *cumulative_brp_quarantined_count +=
153 partition_stats_dumper.cumulative_brp_quarantined_count();
154 #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
155 }
156 #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
157
158 #if !PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_APPLE)
ReportAppleAllocStats(size_t * total_virtual_size,size_t * resident_size,size_t * allocated_objects_size)159 void ReportAppleAllocStats(size_t* total_virtual_size,
160 size_t* resident_size,
161 size_t* allocated_objects_size) {
162 malloc_statistics_t stats = {0};
163 malloc_zone_statistics(nullptr, &stats);
164 *total_virtual_size += stats.size_allocated;
165 *allocated_objects_size += stats.size_in_use;
166
167 // Resident size is approximated pretty well by stats.max_size_in_use.
168 // However, on macOS, freed blocks are both resident and reusable, which is
169 // semantically equivalent to deallocated. The implementation of libmalloc
170 // will also only hold a fixed number of freed regions before actually
171 // starting to deallocate them, so stats.max_size_in_use is also not
172 // representative of the peak size. As a result, stats.max_size_in_use is
173 // typically somewhere between actually resident [non-reusable] pages, and
174 // peak size. This is not very useful, so we just use stats.size_in_use for
175 // resident_size, even though it's an underestimate and fails to account for
176 // fragmentation. See
177 // https://bugs.chromium.org/p/chromium/issues/detail?id=695263#c1.
178 *resident_size += stats.size_in_use;
179 }
180 #endif
181
182 #if (PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && BUILDFLAG(IS_ANDROID)) || \
183 (!PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC) && !BUILDFLAG(IS_WIN) && \
184 !BUILDFLAG(IS_APPLE) && !BUILDFLAG(IS_FUCHSIA))
ReportMallinfoStats(ProcessMemoryDump * pmd,size_t * total_virtual_size,size_t * resident_size,size_t * allocated_objects_size,size_t * allocated_objects_count)185 void ReportMallinfoStats(ProcessMemoryDump* pmd,
186 size_t* total_virtual_size,
187 size_t* resident_size,
188 size_t* allocated_objects_size,
189 size_t* allocated_objects_count) {
190 #if defined(__GLIBC__) && defined(__GLIBC_PREREQ)
191 #if __GLIBC_PREREQ(2, 33)
192 #define MALLINFO2_FOUND_IN_LIBC
193 struct mallinfo2 info = mallinfo2();
194 #endif
195 #endif // defined(__GLIBC__) && defined(__GLIBC_PREREQ)
196 #if !defined(MALLINFO2_FOUND_IN_LIBC)
197 struct mallinfo info = mallinfo();
198 #endif
199 #undef MALLINFO2_FOUND_IN_LIBC
200 // In case of Android's jemalloc |arena| is 0 and the outer pages size is
201 // reported by |hblkhd|. In case of dlmalloc the total is given by
202 // |arena| + |hblkhd|. For more details see link: http://goo.gl/fMR8lF.
203 *total_virtual_size += checked_cast<size_t>(info.arena + info.hblkhd);
204 size_t total_allocated_size = checked_cast<size_t>(info.uordblks);
205 *resident_size += total_allocated_size;
206
207 // Total allocated space is given by |uordblks|.
208 *allocated_objects_size += total_allocated_size;
209
210 if (pmd) {
211 MemoryAllocatorDump* sys_alloc_dump =
212 pmd->CreateAllocatorDump("malloc/sys_malloc");
213 sys_alloc_dump->AddScalar(MemoryAllocatorDump::kNameSize,
214 MemoryAllocatorDump::kUnitsBytes,
215 total_allocated_size);
216 }
217 }
218 #endif
219
220 #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
ReportPartitionAllocThreadCacheStats(ProcessMemoryDump * pmd,MemoryAllocatorDump * dump,const partition_alloc::ThreadCacheStats & stats,const std::string & metrics_suffix,bool detailed)221 void ReportPartitionAllocThreadCacheStats(
222 ProcessMemoryDump* pmd,
223 MemoryAllocatorDump* dump,
224 const partition_alloc::ThreadCacheStats& stats,
225 const std::string& metrics_suffix,
226 bool detailed) {
227 dump->AddScalar("alloc_count", MemoryAllocatorDump::kTypeScalar,
228 stats.alloc_count);
229 dump->AddScalar("alloc_hits", MemoryAllocatorDump::kTypeScalar,
230 stats.alloc_hits);
231 dump->AddScalar("alloc_misses", MemoryAllocatorDump::kTypeScalar,
232 stats.alloc_misses);
233
234 dump->AddScalar("alloc_miss_empty", MemoryAllocatorDump::kTypeScalar,
235 stats.alloc_miss_empty);
236 dump->AddScalar("alloc_miss_too_large", MemoryAllocatorDump::kTypeScalar,
237 stats.alloc_miss_too_large);
238
239 dump->AddScalar("cache_fill_count", MemoryAllocatorDump::kTypeScalar,
240 stats.cache_fill_count);
241 dump->AddScalar("cache_fill_hits", MemoryAllocatorDump::kTypeScalar,
242 stats.cache_fill_hits);
243 dump->AddScalar("cache_fill_misses", MemoryAllocatorDump::kTypeScalar,
244 stats.cache_fill_misses);
245
246 dump->AddScalar("batch_fill_count", MemoryAllocatorDump::kTypeScalar,
247 stats.batch_fill_count);
248
249 dump->AddScalar(MemoryAllocatorDump::kNameSize,
250 MemoryAllocatorDump::kUnitsBytes, stats.bucket_total_memory);
251 dump->AddScalar("metadata_overhead", MemoryAllocatorDump::kUnitsBytes,
252 stats.metadata_overhead);
253
254 #if PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
255 if (stats.alloc_count && detailed) {
256 partition_alloc::internal::BucketIndexLookup lookup{};
257 std::string name = dump->absolute_name();
258 for (size_t i = 0; i < partition_alloc::kNumBuckets; i++) {
259 size_t bucket_size = lookup.bucket_sizes()[i];
260 if (bucket_size == partition_alloc::kInvalidBucketSize) {
261 continue;
262 }
263 // Covers all normal buckets, that is up to ~1MiB, so 7 digits.
264 std::string dump_name = base::StringPrintf(
265 "%s/buckets_alloc/%07d", name.c_str(), static_cast<int>(bucket_size));
266 auto* buckets_alloc_dump = pmd->CreateAllocatorDump(dump_name);
267 buckets_alloc_dump->AddScalar("count", MemoryAllocatorDump::kUnitsObjects,
268 stats.allocs_per_bucket_[i]);
269 }
270 }
271 #endif // PA_CONFIG(THREAD_CACHE_ALLOC_STATS)
272 }
273
ReportPartitionAllocLightweightQuarantineStats(MemoryAllocatorDump * dump,const partition_alloc::LightweightQuarantineStats & stats)274 void ReportPartitionAllocLightweightQuarantineStats(
275 MemoryAllocatorDump* dump,
276 const partition_alloc::LightweightQuarantineStats& stats) {
277 dump->AddScalar("count", MemoryAllocatorDump::kUnitsObjects, stats.count);
278 dump->AddScalar("size_in_bytes", MemoryAllocatorDump::kUnitsBytes,
279 stats.size_in_bytes);
280 dump->AddScalar("cumulative_count", MemoryAllocatorDump::kUnitsObjects,
281 stats.cumulative_count);
282 dump->AddScalar("cumulative_size_in_bytes", MemoryAllocatorDump::kUnitsBytes,
283 stats.cumulative_size_in_bytes);
284 dump->AddScalar("quarantine_miss_count", MemoryAllocatorDump::kUnitsObjects,
285 stats.quarantine_miss_count);
286 }
287 #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC)
288
289 } // namespace
290
291 // static
292 const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
293
294 // static
GetInstance()295 MallocDumpProvider* MallocDumpProvider::GetInstance() {
296 return Singleton<MallocDumpProvider,
297 LeakySingletonTraits<MallocDumpProvider>>::get();
298 }
299
300 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
301 // static
SetExtremeLUDGetStatsCallback(ExtremeLUDGetStatsCallback callback)302 void MallocDumpProvider::SetExtremeLUDGetStatsCallback(
303 ExtremeLUDGetStatsCallback callback) {
304 DCHECK(!callback.is_null());
305 auto& extreme_lud_get_stats_callback = GetExtremeLUDGetStatsCallback();
306 DCHECK(extreme_lud_get_stats_callback.is_null());
307 extreme_lud_get_stats_callback = std::move(callback);
308 }
309
310 // static
311 MallocDumpProvider::ExtremeLUDGetStatsCallback&
GetExtremeLUDGetStatsCallback()312 MallocDumpProvider::GetExtremeLUDGetStatsCallback() {
313 static NoDestructor<MallocDumpProvider::ExtremeLUDGetStatsCallback>
314 extreme_lud_get_stats_callback;
315 return *extreme_lud_get_stats_callback;
316 }
317 #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
318
319 MallocDumpProvider::MallocDumpProvider() = default;
320 MallocDumpProvider::~MallocDumpProvider() = default;
321
322 // Called at trace dump point time. Creates a snapshot the memory counters for
323 // the current process.
OnMemoryDump(const MemoryDumpArgs & args,ProcessMemoryDump * pmd)324 bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
325 ProcessMemoryDump* pmd) {
326 {
327 base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
328 if (!emit_metrics_on_memory_dump_) {
329 return true;
330 }
331 }
332
333 size_t total_virtual_size = 0;
334 size_t resident_size = 0;
335 size_t allocated_objects_size = 0;
336 size_t allocated_objects_count = 0;
337 uint64_t syscall_count = 0;
338 size_t cumulative_brp_quarantined_size = 0;
339 size_t cumulative_brp_quarantined_count = 0;
340 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
341 uint64_t pa_only_resident_size;
342 uint64_t pa_only_allocated_objects_size;
343 #endif
344
345 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
346 ReportPartitionAllocStats(
347 pmd, args.level_of_detail, &total_virtual_size, &resident_size,
348 &allocated_objects_size, &allocated_objects_count, &syscall_count,
349 &cumulative_brp_quarantined_size, &cumulative_brp_quarantined_count);
350
351 pa_only_resident_size = resident_size;
352 pa_only_allocated_objects_size = allocated_objects_size;
353
354 // Even when PartitionAlloc is used, WinHeap / System malloc is still used as
355 // well, report its statistics.
356 #if BUILDFLAG(IS_ANDROID)
357 ReportMallinfoStats(pmd, &total_virtual_size, &resident_size,
358 &allocated_objects_size, &allocated_objects_count);
359 #elif BUILDFLAG(IS_WIN)
360 ReportWinHeapStats(args.level_of_detail, pmd, &total_virtual_size,
361 &resident_size, &allocated_objects_size,
362 &allocated_objects_count);
363 #endif // BUILDFLAG(IS_ANDROID), BUILDFLAG(IS_WIN)
364
365 #elif BUILDFLAG(IS_APPLE)
366 ReportAppleAllocStats(&total_virtual_size, &resident_size,
367 &allocated_objects_size);
368 #elif BUILDFLAG(IS_WIN)
369 ReportWinHeapStats(args.level_of_detail, nullptr, &total_virtual_size,
370 &resident_size, &allocated_objects_size,
371 &allocated_objects_count);
372 #elif BUILDFLAG(IS_FUCHSIA)
373 // TODO(fuchsia): Port, see https://crbug.com/706592.
374 #else
375 ReportMallinfoStats(/*pmd=*/nullptr, &total_virtual_size, &resident_size,
376 &allocated_objects_size, &allocated_objects_count);
377 #endif
378
379 MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("malloc");
380 outer_dump->AddScalar("virtual_size", MemoryAllocatorDump::kUnitsBytes,
381 total_virtual_size);
382 outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
383 MemoryAllocatorDump::kUnitsBytes, resident_size);
384
385 MemoryAllocatorDump* inner_dump = pmd->CreateAllocatorDump(kAllocatedObjects);
386 inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
387 MemoryAllocatorDump::kUnitsBytes,
388 allocated_objects_size);
389 if (allocated_objects_count != 0) {
390 inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
391 MemoryAllocatorDump::kUnitsObjects,
392 allocated_objects_count);
393 }
394
395 int64_t waste = static_cast<int64_t>(resident_size - allocated_objects_size);
396
397 // With PartitionAlloc, reported size under malloc/partitions is the resident
398 // size, so it already includes fragmentation. Meaning that "malloc/"'s size
399 // would double-count fragmentation if we report it under
400 // "malloc/metadata_fragmentation_caches" as well.
401 //
402 // Still report waste, as on some platforms, PartitionAlloc doesn't capture
403 // all of malloc()'s memory footprint.
404 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
405 int64_t pa_waste = static_cast<int64_t>(pa_only_resident_size -
406 pa_only_allocated_objects_size);
407 waste -= pa_waste;
408 #endif
409
410 if (waste > 0) {
411 // Explicitly specify why is extra memory resident. In mac and ios it
412 // accounts for the fragmentation and metadata.
413 MemoryAllocatorDump* other_dump =
414 pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches");
415 other_dump->AddScalar(MemoryAllocatorDump::kNameSize,
416 MemoryAllocatorDump::kUnitsBytes,
417 static_cast<uint64_t>(waste));
418 }
419
420 base::trace_event::MemoryAllocatorDump* partitions_dump = nullptr;
421 base::trace_event::MemoryAllocatorDump* elud_dump_for_small_objects = nullptr;
422 ExtremeLUDStats elud_stats_for_small_objects;
423 base::trace_event::MemoryAllocatorDump* elud_dump_for_large_objects = nullptr;
424 ExtremeLUDStats elud_stats_for_large_objects;
425 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
426 partitions_dump = pmd->CreateAllocatorDump("malloc/partitions");
427 pmd->AddOwnershipEdge(inner_dump->guid(), partitions_dump->guid());
428
429 auto& extreme_lud_get_stats_callback = GetExtremeLUDGetStatsCallback();
430 if (!extreme_lud_get_stats_callback.is_null()) {
431 // The Extreme LUD is enabled.
432 elud_dump_for_small_objects =
433 pmd->CreateAllocatorDump("malloc/extreme_lud/small_objects");
434 elud_dump_for_large_objects =
435 pmd->CreateAllocatorDump("malloc/extreme_lud/large_objects");
436 const auto elud_stats_set = extreme_lud_get_stats_callback.Run();
437 elud_stats_for_small_objects = elud_stats_set.for_small_objects;
438 elud_stats_for_large_objects = elud_stats_set.for_large_objects;
439 ReportPartitionAllocLightweightQuarantineStats(
440 elud_dump_for_small_objects, elud_stats_for_small_objects.lq_stats);
441 ReportPartitionAllocLightweightQuarantineStats(
442 elud_dump_for_large_objects, elud_stats_for_large_objects.lq_stats);
443 }
444 #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
445
446 ReportPerMinuteStats(
447 syscall_count, cumulative_brp_quarantined_size,
448 cumulative_brp_quarantined_count, elud_stats_for_small_objects,
449 elud_stats_for_large_objects, outer_dump, partitions_dump,
450 elud_dump_for_small_objects, elud_dump_for_large_objects);
451
452 return true;
453 }
454
ReportPerMinuteStats(uint64_t syscall_count,size_t cumulative_brp_quarantined_bytes,size_t cumulative_brp_quarantined_count,const ExtremeLUDStats & elud_stats_for_small_objects,const ExtremeLUDStats & elud_stats_for_large_objects,MemoryAllocatorDump * malloc_dump,MemoryAllocatorDump * partition_alloc_dump,MemoryAllocatorDump * elud_dump_for_small_objects,MemoryAllocatorDump * elud_dump_for_large_objects)455 void MallocDumpProvider::ReportPerMinuteStats(
456 uint64_t syscall_count,
457 size_t cumulative_brp_quarantined_bytes,
458 size_t cumulative_brp_quarantined_count,
459 const ExtremeLUDStats& elud_stats_for_small_objects,
460 const ExtremeLUDStats& elud_stats_for_large_objects,
461 MemoryAllocatorDump* malloc_dump,
462 MemoryAllocatorDump* partition_alloc_dump,
463 MemoryAllocatorDump* elud_dump_for_small_objects,
464 MemoryAllocatorDump* elud_dump_for_large_objects) {
465 #if PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
466 uint64_t new_syscalls = syscall_count - last_syscall_count_;
467 size_t new_brp_quarantined_bytes =
468 cumulative_brp_quarantined_bytes - last_cumulative_brp_quarantined_bytes_;
469 size_t new_brp_quarantined_count =
470 cumulative_brp_quarantined_count - last_cumulative_brp_quarantined_count_;
471 base::TimeDelta time_since_last_dump =
472 base::TimeTicks::Now() - last_memory_dump_time_;
473 auto seconds_since_last_dump = time_since_last_dump.InSecondsF();
474 uint64_t syscalls_per_minute =
475 static_cast<uint64_t>((60 * new_syscalls) / seconds_since_last_dump);
476 malloc_dump->AddScalar("syscalls_per_minute", "count", syscalls_per_minute);
477 if (partition_alloc_dump) {
478 size_t brp_quarantined_bytes_per_minute =
479 (60 * new_brp_quarantined_bytes) / seconds_since_last_dump;
480 size_t brp_quarantined_count_per_minute =
481 (60 * new_brp_quarantined_count) / seconds_since_last_dump;
482 partition_alloc_dump->AddScalar("brp_quarantined_bytes_per_minute",
483 MemoryAllocatorDump::kUnitsBytes,
484 brp_quarantined_bytes_per_minute);
485 partition_alloc_dump->AddScalar("brp_quarantined_count_per_minute",
486 MemoryAllocatorDump::kNameObjectCount,
487 brp_quarantined_count_per_minute);
488 }
489
490 auto report_elud_per_minute_stats = [time_since_last_dump,
491 seconds_since_last_dump](
492 const ExtremeLUDStats& elud_stats,
493 CumulativeEludStats&
494 last_cumulative_elud_stats,
495 MemoryAllocatorDump* elud_dump) {
496 size_t bytes = elud_stats.lq_stats.cumulative_size_in_bytes -
497 last_cumulative_elud_stats.quarantined_bytes;
498 size_t count = elud_stats.lq_stats.cumulative_count -
499 last_cumulative_elud_stats.quarantined_count;
500 size_t miss_count = elud_stats.lq_stats.quarantine_miss_count -
501 last_cumulative_elud_stats.miss_count;
502 elud_dump->AddScalar("bytes_per_minute", MemoryAllocatorDump::kUnitsBytes,
503 60ull * bytes / seconds_since_last_dump);
504 elud_dump->AddScalar("count_per_minute",
505 MemoryAllocatorDump::kNameObjectCount,
506 60ull * count / seconds_since_last_dump);
507 elud_dump->AddScalar("miss_count_per_minute",
508 MemoryAllocatorDump::kNameObjectCount,
509 60ull * miss_count / seconds_since_last_dump);
510 // Given the following three:
511 // capacity := the quarantine storage space
512 // time := the elapsed time since the last dump
513 // bytes := the consumed/used bytes since the last dump
514 // We can define/calculate the following.
515 // speed := the consuming speed of the quarantine
516 // = bytes / time
517 // quarantined_time
518 // := the time to use up the capacity
519 // (near to how long an object may be quarantined)
520 // = capacity / speed
521 // = capacity / (bytes / time)
522 // = time * capacity / bytes
523 //
524 // Note that objects in the quarantine are randomly evicted. So objects may
525 // stay in the qurantine longer or shorter depending on object sizes,
526 // allocation/deallocation patterns, etc. in addition to pure randomness.
527 // So, this is just a rough estimation, not necessarily to be the average.
528 if (bytes > 0) {
529 elud_dump->AddScalar(
530 "quarantined_time", "msec",
531 static_cast<uint64_t>(time_since_last_dump.InMilliseconds()) *
532 elud_stats.capacity_in_bytes / bytes);
533 }
534 last_cumulative_elud_stats.quarantined_bytes =
535 elud_stats.lq_stats.cumulative_size_in_bytes;
536 last_cumulative_elud_stats.quarantined_count =
537 elud_stats.lq_stats.cumulative_count;
538 last_cumulative_elud_stats.miss_count =
539 elud_stats.lq_stats.quarantine_miss_count;
540 };
541 if (elud_dump_for_small_objects) {
542 report_elud_per_minute_stats(elud_stats_for_small_objects,
543 last_cumulative_elud_stats_for_small_objects_,
544 elud_dump_for_small_objects);
545 }
546 if (elud_dump_for_large_objects) {
547 report_elud_per_minute_stats(elud_stats_for_large_objects,
548 last_cumulative_elud_stats_for_large_objects_,
549 elud_dump_for_large_objects);
550 }
551
552 last_memory_dump_time_ = base::TimeTicks::Now();
553 last_syscall_count_ = syscall_count;
554 last_cumulative_brp_quarantined_bytes_ = cumulative_brp_quarantined_bytes;
555 last_cumulative_brp_quarantined_count_ = cumulative_brp_quarantined_count;
556 #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC_AS_MALLOC)
557 }
558
559 #if PA_BUILDFLAG(USE_PARTITION_ALLOC)
GetPartitionDumpName(const char * root_name,const char * partition_name)560 std::string GetPartitionDumpName(const char* root_name,
561 const char* partition_name) {
562 return base::StringPrintf("%s/%s/%s", root_name,
563 MemoryDumpPartitionStatsDumper::kPartitionsDumpName,
564 partition_name);
565 }
566
MemoryDumpPartitionStatsDumper(const char * root_name,ProcessMemoryDump * memory_dump,MemoryDumpLevelOfDetail level_of_detail)567 MemoryDumpPartitionStatsDumper::MemoryDumpPartitionStatsDumper(
568 const char* root_name,
569 ProcessMemoryDump* memory_dump,
570 MemoryDumpLevelOfDetail level_of_detail)
571 : root_name_(root_name),
572 memory_dump_(memory_dump),
573 detailed_(level_of_detail != MemoryDumpLevelOfDetail::kBackground) {}
574
PartitionDumpTotals(const char * partition_name,const partition_alloc::PartitionMemoryStats * memory_stats)575 void MemoryDumpPartitionStatsDumper::PartitionDumpTotals(
576 const char* partition_name,
577 const partition_alloc::PartitionMemoryStats* memory_stats) {
578 total_mmapped_bytes_ += memory_stats->total_mmapped_bytes;
579 total_resident_bytes_ += memory_stats->total_resident_bytes;
580 total_active_bytes_ += memory_stats->total_active_bytes;
581 total_active_count_ += memory_stats->total_active_count;
582 syscall_count_ += memory_stats->syscall_count;
583 #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
584 cumulative_brp_quarantined_bytes_ +=
585 memory_stats->cumulative_brp_quarantined_bytes;
586 cumulative_brp_quarantined_count_ +=
587 memory_stats->cumulative_brp_quarantined_count;
588 #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
589
590 std::string dump_name = GetPartitionDumpName(root_name_, partition_name);
591 MemoryAllocatorDump* allocator_dump =
592 memory_dump_->CreateAllocatorDump(dump_name);
593
594 auto total_committed_bytes = memory_stats->total_committed_bytes;
595 auto total_active_bytes = memory_stats->total_active_bytes;
596 size_t wasted = total_committed_bytes - total_active_bytes;
597 DCHECK_GE(total_committed_bytes, total_active_bytes);
598 size_t fragmentation =
599 total_committed_bytes == 0 ? 0 : 100 * wasted / total_committed_bytes;
600
601 allocator_dump->AddScalar(MemoryAllocatorDump::kNameSize,
602 MemoryAllocatorDump::kUnitsBytes,
603 memory_stats->total_resident_bytes);
604 allocator_dump->AddScalar("allocated_objects_size",
605 MemoryAllocatorDump::kUnitsBytes,
606 memory_stats->total_active_bytes);
607 allocator_dump->AddScalar("allocated_objects_count", "count",
608 memory_stats->total_active_count);
609 allocator_dump->AddScalar("virtual_size", MemoryAllocatorDump::kUnitsBytes,
610 memory_stats->total_mmapped_bytes);
611 allocator_dump->AddScalar("virtual_committed_size",
612 MemoryAllocatorDump::kUnitsBytes,
613 memory_stats->total_committed_bytes);
614 allocator_dump->AddScalar("max_committed_size",
615 MemoryAllocatorDump::kUnitsBytes,
616 memory_stats->max_committed_bytes);
617 allocator_dump->AddScalar("allocated_size", MemoryAllocatorDump::kUnitsBytes,
618 memory_stats->total_allocated_bytes);
619 allocator_dump->AddScalar("max_allocated_size",
620 MemoryAllocatorDump::kUnitsBytes,
621 memory_stats->max_allocated_bytes);
622 allocator_dump->AddScalar("decommittable_size",
623 MemoryAllocatorDump::kUnitsBytes,
624 memory_stats->total_decommittable_bytes);
625 allocator_dump->AddScalar("discardable_size",
626 MemoryAllocatorDump::kUnitsBytes,
627 memory_stats->total_discardable_bytes);
628 #if PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
629 allocator_dump->AddScalar("brp_quarantined_size",
630 MemoryAllocatorDump::kUnitsBytes,
631 memory_stats->total_brp_quarantined_bytes);
632 allocator_dump->AddScalar("brp_quarantined_count", "count",
633 memory_stats->total_brp_quarantined_count);
634 #endif // PA_BUILDFLAG(ENABLE_BACKUP_REF_PTR_SUPPORT)
635 allocator_dump->AddScalar("syscall_count", "count",
636 memory_stats->syscall_count);
637 allocator_dump->AddScalar("syscall_total_time_ms", "ms",
638 memory_stats->syscall_total_time_ns / 1e6);
639 allocator_dump->AddScalar("fragmentation", "percent", fragmentation);
640 allocator_dump->AddScalar("wasted", MemoryAllocatorDump::kUnitsBytes, wasted);
641
642 if (memory_stats->has_thread_cache) {
643 const auto& thread_cache_stats = memory_stats->current_thread_cache_stats;
644 auto* thread_cache_dump = memory_dump_->CreateAllocatorDump(
645 dump_name + "/thread_cache/main_thread");
646 ReportPartitionAllocThreadCacheStats(memory_dump_, thread_cache_dump,
647 thread_cache_stats, ".MainThread",
648 detailed_);
649
650 const auto& all_thread_caches_stats = memory_stats->all_thread_caches_stats;
651 auto* all_thread_caches_dump =
652 memory_dump_->CreateAllocatorDump(dump_name + "/thread_cache");
653 ReportPartitionAllocThreadCacheStats(memory_dump_, all_thread_caches_dump,
654 all_thread_caches_stats, "",
655 detailed_);
656 }
657
658 if (memory_stats->has_scheduler_loop_quarantine) {
659 MemoryAllocatorDump* quarantine_dump_total =
660 memory_dump_->CreateAllocatorDump(dump_name +
661 "/scheduler_loop_quarantine");
662 ReportPartitionAllocLightweightQuarantineStats(
663 quarantine_dump_total,
664 memory_stats->scheduler_loop_quarantine_stats_total);
665 }
666 }
667
PartitionsDumpBucketStats(const char * partition_name,const partition_alloc::PartitionBucketMemoryStats * memory_stats)668 void MemoryDumpPartitionStatsDumper::PartitionsDumpBucketStats(
669 const char* partition_name,
670 const partition_alloc::PartitionBucketMemoryStats* memory_stats) {
671 DCHECK(memory_stats->is_valid);
672 std::string dump_name = GetPartitionDumpName(root_name_, partition_name);
673 if (memory_stats->is_direct_map) {
674 dump_name.append(base::StringPrintf("/buckets/directMap_%" PRIu64, ++uid_));
675 } else {
676 // Normal buckets go up to ~1MiB, 7 digits.
677 dump_name.append(base::StringPrintf("/buckets/bucket_%07" PRIu32,
678 memory_stats->bucket_slot_size));
679 }
680
681 MemoryAllocatorDump* allocator_dump =
682 memory_dump_->CreateAllocatorDump(dump_name);
683 allocator_dump->AddScalar(MemoryAllocatorDump::kNameSize,
684 MemoryAllocatorDump::kUnitsBytes,
685 memory_stats->resident_bytes);
686 allocator_dump->AddScalar("allocated_objects_size",
687 MemoryAllocatorDump::kUnitsBytes,
688 memory_stats->active_bytes);
689 allocator_dump->AddScalar("slot_size", MemoryAllocatorDump::kUnitsBytes,
690 memory_stats->bucket_slot_size);
691 allocator_dump->AddScalar("decommittable_size",
692 MemoryAllocatorDump::kUnitsBytes,
693 memory_stats->decommittable_bytes);
694 allocator_dump->AddScalar("discardable_size",
695 MemoryAllocatorDump::kUnitsBytes,
696 memory_stats->discardable_bytes);
697 // TODO(bartekn): Rename the scalar names.
698 allocator_dump->AddScalar("total_slot_span_size",
699 MemoryAllocatorDump::kUnitsBytes,
700 memory_stats->allocated_slot_span_size);
701 allocator_dump->AddScalar("active_slot_spans",
702 MemoryAllocatorDump::kUnitsObjects,
703 memory_stats->num_active_slot_spans);
704 allocator_dump->AddScalar("full_slot_spans",
705 MemoryAllocatorDump::kUnitsObjects,
706 memory_stats->num_full_slot_spans);
707 allocator_dump->AddScalar("empty_slot_spans",
708 MemoryAllocatorDump::kUnitsObjects,
709 memory_stats->num_empty_slot_spans);
710 allocator_dump->AddScalar("decommitted_slot_spans",
711 MemoryAllocatorDump::kUnitsObjects,
712 memory_stats->num_decommitted_slot_spans);
713 }
714 #endif // PA_BUILDFLAG(USE_PARTITION_ALLOC)
715
716 } // namespace trace_event
717 } // namespace base
718