1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/process_memory_dump.h"
6
7 #include <errno.h>
8
9 #include <memory>
10 #include <vector>
11
12 #include "base/bits.h"
13 #include "base/logging.h"
14 #include "base/memory/page_size.h"
15 #include "base/memory/ptr_util.h"
16 #include "base/memory/shared_memory_tracker.h"
17 #include "base/process/process_metrics.h"
18 #include "base/strings/string_util.h"
19 #include "base/strings/stringprintf.h"
20 #include "base/trace_event/memory_infra_background_allowlist.h"
21 #include "base/trace_event/trace_event_impl.h"
22 #include "base/trace_event/traced_value.h"
23 #include "base/unguessable_token.h"
24 #include "build/build_config.h"
25 #include "third_party/abseil-cpp/absl/types/optional.h"
26 #include "third_party/perfetto/protos/perfetto/trace/memory_graph.pbzero.h"
27 #include "third_party/perfetto/protos/perfetto/trace/trace_packet.pbzero.h"
28
29 #if BUILDFLAG(IS_IOS)
30 #include <mach/vm_page_size.h>
31 #endif
32
33 #if BUILDFLAG(IS_POSIX)
34 #include <sys/mman.h>
35 #endif
36
37 #if BUILDFLAG(IS_WIN)
38 #include <windows.h> // Must be in front of other Windows header files
39
40 #include <Psapi.h>
41 #endif
42
43 #if BUILDFLAG(IS_FUCHSIA)
44 #include <tuple>
45
46 #include "base/notreached.h"
47 #endif
48
49 using ProcessSnapshot =
50 ::perfetto::protos::pbzero::MemoryTrackerSnapshot_ProcessSnapshot;
51
52 namespace base {
53 namespace trace_event {
54
55 namespace {
56
57 const char kEdgeTypeOwnership[] = "ownership";
58
GetSharedGlobalAllocatorDumpName(const MemoryAllocatorDumpGuid & guid)59 std::string GetSharedGlobalAllocatorDumpName(
60 const MemoryAllocatorDumpGuid& guid) {
61 return "global/" + guid.ToString();
62 }
63
64 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
GetSystemPageCount(size_t mapped_size,size_t page_size)65 size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
66 return (mapped_size + page_size - 1) / page_size;
67 }
68 #endif
69
GetTokenForCurrentProcess()70 UnguessableToken GetTokenForCurrentProcess() {
71 static UnguessableToken instance = UnguessableToken::Create();
72 return instance;
73 }
74
75 } // namespace
76
77 // static
78 bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
79
80 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
81 // static
GetSystemPageSize()82 size_t ProcessMemoryDump::GetSystemPageSize() {
83 #if BUILDFLAG(IS_IOS)
84 // On iOS, getpagesize() returns the user page sizes, but for allocating
85 // arrays for mincore(), kernel page sizes is needed. Use vm_kernel_page_size
86 // as recommended by Apple, https://forums.developer.apple.com/thread/47532/.
87 // Refer to http://crbug.com/542671 and Apple rdar://23651782
88 return vm_kernel_page_size;
89 #else
90 return base::GetPageSize();
91 #endif // BUILDFLAG(IS_IOS)
92 }
93
94 // static
CountResidentBytes(void * start_address,size_t mapped_size)95 absl::optional<size_t> ProcessMemoryDump::CountResidentBytes(
96 void* start_address,
97 size_t mapped_size) {
98 const size_t page_size = GetSystemPageSize();
99 const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
100 DCHECK_EQ(0u, start_pointer % page_size);
101
102 size_t offset = 0;
103 size_t total_resident_pages = 0;
104 bool failure = false;
105
106 // An array as large as number of pages in memory segment needs to be passed
107 // to the query function. To avoid allocating a large array, the given block
108 // of memory is split into chunks of size |kMaxChunkSize|.
109 const size_t kMaxChunkSize = 8 * 1024 * 1024;
110 size_t max_vec_size =
111 GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
112 #if BUILDFLAG(IS_WIN)
113 std::unique_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
114 new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
115 #elif BUILDFLAG(IS_APPLE)
116 std::unique_ptr<char[]> vec(new char[max_vec_size]);
117 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
118 std::unique_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
119 #endif
120
121 while (offset < mapped_size) {
122 uintptr_t chunk_start = (start_pointer + offset);
123 const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
124 const size_t page_count = GetSystemPageCount(chunk_size, page_size);
125 size_t resident_page_count = 0;
126 #if BUILDFLAG(IS_WIN)
127 for (size_t i = 0; i < page_count; i++) {
128 vec[i].VirtualAddress =
129 reinterpret_cast<void*>(chunk_start + i * page_size);
130 }
131 DWORD vec_size = static_cast<DWORD>(
132 page_count * sizeof(PSAPI_WORKING_SET_EX_INFORMATION));
133 failure = !QueryWorkingSetEx(GetCurrentProcess(), vec.get(), vec_size);
134
135 for (size_t i = 0; i < page_count; i++)
136 resident_page_count += vec[i].VirtualAttributes.Valid;
137 #elif BUILDFLAG(IS_FUCHSIA)
138 // TODO(crbug.com/851760): Implement counting resident bytes.
139 // For now, log and avoid unused variable warnings.
140 NOTIMPLEMENTED_LOG_ONCE();
141 std::ignore = chunk_start;
142 std::ignore = page_count;
143 #elif BUILDFLAG(IS_APPLE)
144 // mincore in MAC does not fail with EAGAIN.
145 failure =
146 !!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
147 for (size_t i = 0; i < page_count; i++)
148 resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
149 #elif BUILDFLAG(IS_POSIX)
150 int error_counter = 0;
151 int result = 0;
152 // HANDLE_EINTR tries for 100 times. So following the same pattern.
153 do {
154 result =
155 #if BUILDFLAG(IS_AIX)
156 mincore(reinterpret_cast<char*>(chunk_start), chunk_size,
157 reinterpret_cast<char*>(vec.get()));
158 #else
159 mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
160 #endif
161 } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
162 failure = !!result;
163
164 for (size_t i = 0; i < page_count; i++)
165 resident_page_count += vec[i] & 1;
166 #endif
167
168 if (failure)
169 break;
170
171 total_resident_pages += resident_page_count * page_size;
172 offset += kMaxChunkSize;
173 }
174
175 DCHECK(!failure);
176 if (failure) {
177 LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
178 return absl::nullopt;
179 }
180 return total_resident_pages;
181 }
182
183 // static
CountResidentBytesInSharedMemory(void * start_address,size_t mapped_size)184 absl::optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
185 void* start_address,
186 size_t mapped_size) {
187 // `MapAt()` performs some internal arithmetic to allow non-page-aligned
188 // offsets, but the memory accounting still expects to work with page-aligned
189 // allocations.
190 //
191 // TODO(dcheng): one peculiarity here is that the shmem implementation uses
192 // `base::SysInfo::VMAllocationGranularity()` while this file uses
193 // `GetSystemPageSize()`. It'd be nice not to have two names for the same
194 // thing...
195 uint8_t* aligned_start_address = base::bits::AlignDown(
196 static_cast<uint8_t*>(start_address), GetSystemPageSize());
197 size_t adjusted_size =
198 mapped_size + static_cast<size_t>(static_cast<uint8_t*>(start_address) -
199 aligned_start_address);
200
201 #if BUILDFLAG(IS_MAC)
202 // On macOS, use mach_vm_region instead of mincore for performance
203 // (crbug.com/742042).
204 mach_vm_size_t dummy_size = 0;
205 mach_vm_address_t address =
206 reinterpret_cast<mach_vm_address_t>(aligned_start_address);
207 vm_region_top_info_data_t info;
208 MachVMRegionResult result =
209 GetTopInfo(mach_task_self(), &dummy_size, &address, &info);
210 if (result == MachVMRegionResult::Error) {
211 LOG(ERROR) << "CountResidentBytesInSharedMemory failed. The resident size "
212 "is invalid";
213 return absl::optional<size_t>();
214 }
215
216 size_t resident_pages =
217 info.private_pages_resident + info.shared_pages_resident;
218
219 // On macOS, measurements for private memory footprint overcount by
220 // faulted pages in anonymous shared memory. To discount for this, we touch
221 // all the resident pages in anonymous shared memory here, thus making them
222 // faulted as well. This relies on two assumptions:
223 //
224 // 1) Consumers use shared memory from front to back. Thus, if there are
225 // (N) resident pages, those pages represent the first N * PAGE_SIZE bytes in
226 // the shared memory region.
227 //
228 // 2) This logic is run shortly before the logic that calculates
229 // phys_footprint, thus ensuring that the discrepancy between faulted and
230 // resident pages is minimal.
231 //
232 // The performance penalty is expected to be small.
233 //
234 // * Most of the time, we expect the pages to already be resident and faulted,
235 // thus incurring a cache penalty read hit [since we read from each resident
236 // page].
237 //
238 // * Rarely, we expect the pages to be resident but not faulted, resulting in
239 // soft faults + cache penalty.
240 //
241 // * If assumption (1) is invalid, this will potentially fault some
242 // previously non-resident pages, thus increasing memory usage, without fixing
243 // the accounting.
244 //
245 // Sanity check in case the mapped size is less than the total size of the
246 // region.
247 size_t pages_to_fault =
248 std::min(resident_pages, (adjusted_size + PAGE_SIZE - 1) / PAGE_SIZE);
249
250 volatile uint8_t* base_address = const_cast<uint8_t*>(aligned_start_address);
251 for (size_t i = 0; i < pages_to_fault; ++i) {
252 // Reading from a volatile is a visible side-effect for the purposes of
253 // optimization. This guarantees that the optimizer will not kill this line.
254 base_address[i * PAGE_SIZE];
255 }
256
257 return resident_pages * PAGE_SIZE;
258 #else
259 return CountResidentBytes(aligned_start_address, adjusted_size);
260 #endif // BUILDFLAG(IS_MAC)
261 }
262
263 #endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
264
ProcessMemoryDump(const MemoryDumpArgs & dump_args)265 ProcessMemoryDump::ProcessMemoryDump(
266 const MemoryDumpArgs& dump_args)
267 : process_token_(GetTokenForCurrentProcess()),
268 dump_args_(dump_args) {}
269
270 ProcessMemoryDump::~ProcessMemoryDump() = default;
271 ProcessMemoryDump::ProcessMemoryDump(ProcessMemoryDump&& other) = default;
272 ProcessMemoryDump& ProcessMemoryDump::operator=(ProcessMemoryDump&& other) =
273 default;
274
CreateAllocatorDump(const std::string & absolute_name)275 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
276 const std::string& absolute_name) {
277 return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
278 absolute_name, dump_args_.level_of_detail, GetDumpId(absolute_name)));
279 }
280
CreateAllocatorDump(const std::string & absolute_name,const MemoryAllocatorDumpGuid & guid)281 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
282 const std::string& absolute_name,
283 const MemoryAllocatorDumpGuid& guid) {
284 return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
285 absolute_name, dump_args_.level_of_detail, guid));
286 }
287
AddAllocatorDumpInternal(std::unique_ptr<MemoryAllocatorDump> mad)288 MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
289 std::unique_ptr<MemoryAllocatorDump> mad) {
290 // In background mode return the black hole dump, if invalid dump name is
291 // given.
292 if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
293 !IsMemoryAllocatorDumpNameInAllowlist(mad->absolute_name())) {
294 return GetBlackHoleMad(mad->absolute_name());
295 }
296
297 auto insertion_result = allocator_dumps_.insert(
298 std::make_pair(mad->absolute_name(), std::move(mad)));
299 MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
300 DCHECK(insertion_result.second) << "Duplicate name: "
301 << inserted_mad->absolute_name();
302 return inserted_mad;
303 }
304
GetAllocatorDump(const std::string & absolute_name) const305 MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
306 const std::string& absolute_name) const {
307 auto it = allocator_dumps_.find(absolute_name);
308 if (it != allocator_dumps_.end())
309 return it->second.get();
310 return nullptr;
311 }
312
GetOrCreateAllocatorDump(const std::string & absolute_name)313 MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
314 const std::string& absolute_name) {
315 MemoryAllocatorDump* mad = GetAllocatorDump(absolute_name);
316 return mad ? mad : CreateAllocatorDump(absolute_name);
317 }
318
CreateSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid)319 MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
320 const MemoryAllocatorDumpGuid& guid) {
321 // A shared allocator dump can be shared within a process and the guid could
322 // have been created already.
323 MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
324 if (mad && mad != black_hole_mad_.get()) {
325 // The weak flag is cleared because this method should create a non-weak
326 // dump.
327 mad->clear_flags(MemoryAllocatorDump::Flags::WEAK);
328 return mad;
329 }
330 return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
331 }
332
CreateWeakSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid)333 MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
334 const MemoryAllocatorDumpGuid& guid) {
335 MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
336 if (mad && mad != black_hole_mad_.get())
337 return mad;
338 mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
339 mad->set_flags(MemoryAllocatorDump::Flags::WEAK);
340 return mad;
341 }
342
GetSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid) const343 MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
344 const MemoryAllocatorDumpGuid& guid) const {
345 return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
346 }
347
DumpHeapUsage(const std::unordered_map<base::trace_event::AllocationContext,base::trace_event::AllocationMetrics> & metrics_by_context,base::trace_event::TraceEventMemoryOverhead & overhead,const char * allocator_name)348 void ProcessMemoryDump::DumpHeapUsage(
349 const std::unordered_map<base::trace_event::AllocationContext,
350 base::trace_event::AllocationMetrics>&
351 metrics_by_context,
352 base::trace_event::TraceEventMemoryOverhead& overhead,
353 const char* allocator_name) {
354 std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
355 allocator_name);
356 overhead.DumpInto(base_name.c_str(), this);
357 }
358
SetAllocatorDumpsForSerialization(std::vector<std::unique_ptr<MemoryAllocatorDump>> dumps)359 void ProcessMemoryDump::SetAllocatorDumpsForSerialization(
360 std::vector<std::unique_ptr<MemoryAllocatorDump>> dumps) {
361 DCHECK(allocator_dumps_.empty());
362 for (std::unique_ptr<MemoryAllocatorDump>& dump : dumps)
363 AddAllocatorDumpInternal(std::move(dump));
364 }
365
366 std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>
GetAllEdgesForSerialization() const367 ProcessMemoryDump::GetAllEdgesForSerialization() const {
368 std::vector<MemoryAllocatorDumpEdge> edges;
369 edges.reserve(allocator_dumps_edges_.size());
370 for (const auto& it : allocator_dumps_edges_)
371 edges.push_back(it.second);
372 return edges;
373 }
374
SetAllEdgesForSerialization(const std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge> & edges)375 void ProcessMemoryDump::SetAllEdgesForSerialization(
376 const std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>& edges) {
377 DCHECK(allocator_dumps_edges_.empty());
378 for (const MemoryAllocatorDumpEdge& edge : edges) {
379 auto it_and_inserted = allocator_dumps_edges_.emplace(edge.source, edge);
380 DCHECK(it_and_inserted.second);
381 }
382 }
383
Clear()384 void ProcessMemoryDump::Clear() {
385 allocator_dumps_.clear();
386 allocator_dumps_edges_.clear();
387 }
388
TakeAllDumpsFrom(ProcessMemoryDump * other)389 void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
390 // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
391 // into this ProcessMemoryDump, checking for duplicates.
392 for (auto& it : other->allocator_dumps_)
393 AddAllocatorDumpInternal(std::move(it.second));
394 other->allocator_dumps_.clear();
395
396 // Move all the edges.
397 allocator_dumps_edges_.insert(other->allocator_dumps_edges_.begin(),
398 other->allocator_dumps_edges_.end());
399 other->allocator_dumps_edges_.clear();
400 }
401
SerializeAllocatorDumpsInto(TracedValue * value) const402 void ProcessMemoryDump::SerializeAllocatorDumpsInto(TracedValue* value) const {
403 if (allocator_dumps_.size() > 0) {
404 value->BeginDictionary("allocators");
405 for (const auto& allocator_dump_it : allocator_dumps_)
406 allocator_dump_it.second->AsValueInto(value);
407 value->EndDictionary();
408 }
409
410 value->BeginArray("allocators_graph");
411 for (const auto& it : allocator_dumps_edges_) {
412 const MemoryAllocatorDumpEdge& edge = it.second;
413 value->BeginDictionary();
414 value->SetString("source", edge.source.ToString());
415 value->SetString("target", edge.target.ToString());
416 value->SetInteger("importance", edge.importance);
417 value->SetString("type", kEdgeTypeOwnership);
418 value->EndDictionary();
419 }
420 value->EndArray();
421 }
422
SerializeAllocatorDumpsInto(perfetto::protos::pbzero::MemoryTrackerSnapshot * memory_snapshot,const base::ProcessId pid) const423 void ProcessMemoryDump::SerializeAllocatorDumpsInto(
424 perfetto::protos::pbzero::MemoryTrackerSnapshot* memory_snapshot,
425 const base::ProcessId pid) const {
426 ProcessSnapshot* process_snapshot =
427 memory_snapshot->add_process_memory_dumps();
428 process_snapshot->set_pid(static_cast<int>(pid));
429
430 for (const auto& allocator_dump_it : allocator_dumps_) {
431 ProcessSnapshot::MemoryNode* memory_node =
432 process_snapshot->add_allocator_dumps();
433 allocator_dump_it.second->AsProtoInto(memory_node);
434 }
435
436 for (const auto& it : allocator_dumps_edges_) {
437 const MemoryAllocatorDumpEdge& edge = it.second;
438 ProcessSnapshot::MemoryEdge* memory_edge =
439 process_snapshot->add_memory_edges();
440
441 memory_edge->set_source_id(edge.source.ToUint64());
442 memory_edge->set_target_id(edge.target.ToUint64());
443 // TODO(crbug.com/1333557): Fix .proto and remove this cast.
444 memory_edge->set_importance(static_cast<uint32_t>(edge.importance));
445 }
446 }
447
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target,int importance)448 void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
449 const MemoryAllocatorDumpGuid& target,
450 int importance) {
451 // This will either override an existing edge or create a new one.
452 auto it = allocator_dumps_edges_.find(source);
453 int max_importance = importance;
454 if (it != allocator_dumps_edges_.end()) {
455 DCHECK_EQ(target.ToUint64(), it->second.target.ToUint64());
456 max_importance = std::max(importance, it->second.importance);
457 }
458 allocator_dumps_edges_[source] = {source, target, max_importance,
459 false /* overridable */};
460 }
461
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target)462 void ProcessMemoryDump::AddOwnershipEdge(
463 const MemoryAllocatorDumpGuid& source,
464 const MemoryAllocatorDumpGuid& target) {
465 AddOwnershipEdge(source, target, 0 /* importance */);
466 }
467
AddOverridableOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target,int importance)468 void ProcessMemoryDump::AddOverridableOwnershipEdge(
469 const MemoryAllocatorDumpGuid& source,
470 const MemoryAllocatorDumpGuid& target,
471 int importance) {
472 if (allocator_dumps_edges_.count(source) == 0) {
473 allocator_dumps_edges_[source] = {source, target, importance,
474 true /* overridable */};
475 } else {
476 // An edge between the source and target already exits. So, do nothing here
477 // since the new overridable edge is implicitly overridden by a strong edge
478 // which was created earlier.
479 DCHECK(!allocator_dumps_edges_[source].overridable);
480 }
481 }
482
CreateSharedMemoryOwnershipEdge(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance)483 void ProcessMemoryDump::CreateSharedMemoryOwnershipEdge(
484 const MemoryAllocatorDumpGuid& client_local_dump_guid,
485 const UnguessableToken& shared_memory_guid,
486 int importance) {
487 CreateSharedMemoryOwnershipEdgeInternal(client_local_dump_guid,
488 shared_memory_guid, importance,
489 false /*is_weak*/);
490 }
491
CreateWeakSharedMemoryOwnershipEdge(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance)492 void ProcessMemoryDump::CreateWeakSharedMemoryOwnershipEdge(
493 const MemoryAllocatorDumpGuid& client_local_dump_guid,
494 const UnguessableToken& shared_memory_guid,
495 int importance) {
496 CreateSharedMemoryOwnershipEdgeInternal(
497 client_local_dump_guid, shared_memory_guid, importance, true /*is_weak*/);
498 }
499
CreateSharedMemoryOwnershipEdgeInternal(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance,bool is_weak)500 void ProcessMemoryDump::CreateSharedMemoryOwnershipEdgeInternal(
501 const MemoryAllocatorDumpGuid& client_local_dump_guid,
502 const UnguessableToken& shared_memory_guid,
503 int importance,
504 bool is_weak) {
505 DCHECK(!shared_memory_guid.is_empty());
506 // New model where the global dumps created by SharedMemoryTracker are used
507 // for the clients.
508
509 // The guid of the local dump created by SharedMemoryTracker for the memory
510 // segment.
511 auto local_shm_guid =
512 GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shared_memory_guid));
513
514 // The dump guid of the global dump created by the tracker for the memory
515 // segment.
516 auto global_shm_guid =
517 SharedMemoryTracker::GetGlobalDumpIdForTracing(shared_memory_guid);
518
519 // Create an edge between local dump of the client and the local dump of the
520 // SharedMemoryTracker. Do not need to create the dumps here since the tracker
521 // would create them. The importance is also required here for the case of
522 // single process mode.
523 AddOwnershipEdge(client_local_dump_guid, local_shm_guid, importance);
524
525 // TODO(ssid): Handle the case of weak dumps here. This needs a new function
526 // GetOrCreaetGlobalDump() in PMD since we need to change the behavior of the
527 // created global dump.
528 // Create an edge that overrides the edge created by SharedMemoryTracker.
529 AddOwnershipEdge(local_shm_guid, global_shm_guid, importance);
530 }
531
AddSuballocation(const MemoryAllocatorDumpGuid & source,const std::string & target_node_name)532 void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
533 const std::string& target_node_name) {
534 // Do not create new dumps for suballocations in background mode.
535 if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
536 return;
537
538 std::string child_mad_name = target_node_name + "/__" + source.ToString();
539 MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
540 AddOwnershipEdge(source, target_child_mad->guid());
541 }
542
GetBlackHoleMad(const std::string & absolute_name)543 MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad(
544 const std::string& absolute_name) {
545 DCHECK(is_black_hole_non_fatal_for_testing_)
546 << " unknown dump name " << absolute_name
547 << " this likely means kAllocatorDumpNameAllowlist needs to be updated";
548 if (!black_hole_mad_) {
549 std::string name = "discarded";
550 black_hole_mad_ = std::make_unique<MemoryAllocatorDump>(
551 name, dump_args_.level_of_detail, GetDumpId(name));
552 }
553 return black_hole_mad_.get();
554 }
555
GetDumpId(const std::string & absolute_name)556 MemoryAllocatorDumpGuid ProcessMemoryDump::GetDumpId(
557 const std::string& absolute_name) {
558 return MemoryAllocatorDumpGuid(StringPrintf(
559 "%s:%s", process_token().ToString().c_str(), absolute_name.c_str()));
560 }
561
operator ==(const MemoryAllocatorDumpEdge & other) const562 bool ProcessMemoryDump::MemoryAllocatorDumpEdge::operator==(
563 const MemoryAllocatorDumpEdge& other) const {
564 return source == other.source && target == other.target &&
565 importance == other.importance && overridable == other.overridable;
566 }
567
operator !=(const MemoryAllocatorDumpEdge & other) const568 bool ProcessMemoryDump::MemoryAllocatorDumpEdge::operator!=(
569 const MemoryAllocatorDumpEdge& other) const {
570 return !(*this == other);
571 }
572
573 } // namespace trace_event
574 } // namespace base
575