• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9 
10 #include "base/trace_event/process_memory_dump.h"
11 
12 #include <errno.h>
13 
14 #include <memory>
15 #include <optional>
16 #include <vector>
17 
18 #include "base/bits.h"
19 #include "base/containers/heap_array.h"
20 #include "base/logging.h"
21 #include "base/memory/page_size.h"
22 #include "base/memory/ptr_util.h"
23 #include "base/memory/shared_memory_tracker.h"
24 #include "base/notimplemented.h"
25 #include "base/process/process_metrics.h"
26 #include "base/strings/string_util.h"
27 #include "base/strings/stringprintf.h"
28 #include "base/trace_event/memory_infra_background_allowlist.h"
29 #include "base/trace_event/trace_event_impl.h"
30 #include "base/trace_event/traced_value.h"
31 #include "base/unguessable_token.h"
32 #include "build/build_config.h"
33 #include "third_party/perfetto/protos/perfetto/trace/memory_graph.pbzero.h"
34 #include "third_party/perfetto/protos/perfetto/trace/trace_packet.pbzero.h"
35 
36 #if BUILDFLAG(IS_IOS)
37 #include <mach/vm_page_size.h>
38 #endif
39 
40 #if BUILDFLAG(IS_POSIX)
41 #include <sys/mman.h>
42 #endif
43 
44 #if BUILDFLAG(IS_WIN)
45 #include <windows.h>  // Must be in front of other Windows header files
46 
47 #include <Psapi.h>
48 #endif
49 
50 #if BUILDFLAG(IS_FUCHSIA)
51 #include <tuple>
52 
53 #include "base/notreached.h"
54 #endif
55 
56 using ProcessSnapshot =
57     ::perfetto::protos::pbzero::MemoryTrackerSnapshot_ProcessSnapshot;
58 
59 namespace base {
60 namespace trace_event {
61 
62 namespace {
63 
64 const char kEdgeTypeOwnership[] = "ownership";
65 
GetSharedGlobalAllocatorDumpName(const MemoryAllocatorDumpGuid & guid)66 std::string GetSharedGlobalAllocatorDumpName(
67     const MemoryAllocatorDumpGuid& guid) {
68   return "global/" + guid.ToString();
69 }
70 
71 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
GetSystemPageCount(size_t mapped_size,size_t page_size)72 size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
73   return (mapped_size + page_size - 1) / page_size;
74 }
75 #endif
76 
GetTokenForCurrentProcess()77 UnguessableToken GetTokenForCurrentProcess() {
78   static UnguessableToken instance = UnguessableToken::Create();
79   return instance;
80 }
81 
82 }  // namespace
83 
84 // static
85 bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
86 
87 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
88 // static
GetSystemPageSize()89 size_t ProcessMemoryDump::GetSystemPageSize() {
90 #if BUILDFLAG(IS_IOS)
91   // On iOS, getpagesize() returns the user page sizes, but for allocating
92   // arrays for mincore(), kernel page sizes is needed. Use vm_kernel_page_size
93   // as recommended by Apple, https://forums.developer.apple.com/thread/47532/.
94   // Refer to http://crbug.com/542671 and Apple rdar://23651782
95   return vm_kernel_page_size;
96 #else
97   return base::GetPageSize();
98 #endif  // BUILDFLAG(IS_IOS)
99 }
100 
101 // static
CountResidentBytes(void * start_address,size_t mapped_size)102 std::optional<size_t> ProcessMemoryDump::CountResidentBytes(
103     void* start_address,
104     size_t mapped_size) {
105   const size_t page_size = GetSystemPageSize();
106   const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
107   DCHECK_EQ(0u, start_pointer % page_size);
108 
109   size_t offset = 0;
110   size_t total_resident_pages = 0;
111   bool failure = false;
112 
113   // An array as large as number of pages in memory segment needs to be passed
114   // to the query function. To avoid allocating a large array, the given block
115   // of memory is split into chunks of size |kMaxChunkSize|.
116   const size_t kMaxChunkSize = 8 * 1024 * 1024;
117   size_t max_vec_size =
118       GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
119 
120 #if BUILDFLAG(IS_WIN)
121   auto vec =
122       base::HeapArray<PSAPI_WORKING_SET_EX_INFORMATION>::WithSize(max_vec_size);
123 #elif BUILDFLAG(IS_APPLE)
124   auto vec = base::HeapArray<char>::WithSize(max_vec_size);
125 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
126   auto vec = base::HeapArray<unsigned char>::WithSize(max_vec_size);
127 #endif
128 
129   while (offset < mapped_size) {
130     uintptr_t chunk_start = (start_pointer + offset);
131     const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
132     const size_t page_count = GetSystemPageCount(chunk_size, page_size);
133     size_t resident_page_count = 0;
134 #if BUILDFLAG(IS_WIN)
135     for (size_t i = 0; i < page_count; i++) {
136       vec[i].VirtualAddress =
137           reinterpret_cast<void*>(chunk_start + i * page_size);
138     }
139 
140     auto span = vec.first(page_count);
141     failure = !QueryWorkingSetEx(GetCurrentProcess(), span.data(),
142                                  static_cast<DWORD>(span.size_bytes()));
143 
144     for (size_t i = 0; i < page_count; i++)
145       resident_page_count += vec[i].VirtualAttributes.Valid;
146 #elif BUILDFLAG(IS_FUCHSIA)
147     // TODO(crbug.com/42050620): Implement counting resident bytes.
148     // For now, log and avoid unused variable warnings.
149     NOTIMPLEMENTED_LOG_ONCE();
150     std::ignore = chunk_start;
151     std::ignore = page_count;
152 #elif BUILDFLAG(IS_APPLE)
153     // mincore in MAC does not fail with EAGAIN.
154     failure =
155         !!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.data());
156     for (size_t i = 0; i < page_count; i++)
157       resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
158 #elif BUILDFLAG(IS_POSIX)
159     int error_counter = 0;
160     int result = 0;
161     // HANDLE_EINTR tries for 100 times. So following the same pattern.
162     do {
163       result =
164 #if BUILDFLAG(IS_AIX)
165           mincore(reinterpret_cast<char*>(chunk_start), chunk_size,
166                   reinterpret_cast<char*>(vec.data()));
167 #else
168           mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.data());
169 #endif
170     } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
171     failure = !!result;
172 
173     for (size_t i = 0; i < page_count; i++)
174       resident_page_count += vec[i] & 1;
175 #endif
176 
177     if (failure)
178       break;
179 
180     total_resident_pages += resident_page_count * page_size;
181     offset += kMaxChunkSize;
182   }
183 
184   if (failure) {
185     PLOG(ERROR) << "CountResidentBytes";
186     return std::nullopt;
187   }
188   return total_resident_pages;
189 }
190 
191 // static
CountResidentBytesInSharedMemory(void * start_address,size_t mapped_size)192 std::optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
193     void* start_address,
194     size_t mapped_size) {
195   // `MapAt()` performs some internal arithmetic to allow non-page-aligned
196   // offsets, but the memory accounting still expects to work with page-aligned
197   // allocations.
198   //
199   // TODO(dcheng): one peculiarity here is that the shmem implementation uses
200   // `base::SysInfo::VMAllocationGranularity()` while this file uses
201   // `GetSystemPageSize()`. It'd be nice not to have two names for the same
202   // thing...
203   uint8_t* aligned_start_address = base::bits::AlignDown(
204       static_cast<uint8_t*>(start_address), GetSystemPageSize());
205   size_t adjusted_size =
206       mapped_size + static_cast<size_t>(static_cast<uint8_t*>(start_address) -
207                                         aligned_start_address);
208 
209 #if BUILDFLAG(IS_APPLE)
210   // On macOS and iOS, use mach_vm_region|vm_region_64 instead of mincore for
211   // performance (crbug.com/742042).
212   mach_vm_size_t dummy_size = 0;
213   mach_vm_address_t address =
214       reinterpret_cast<mach_vm_address_t>(aligned_start_address);
215   vm_region_top_info_data_t info;
216   MachVMRegionResult result =
217       GetTopInfo(mach_task_self(), &dummy_size, &address, &info);
218   if (result == MachVMRegionResult::Error) {
219     LOG(ERROR) << "CountResidentBytesInSharedMemory failed. The resident size "
220                   "is invalid";
221     return std::optional<size_t>();
222   }
223 
224   size_t resident_pages =
225       info.private_pages_resident + info.shared_pages_resident;
226 
227   // On macOS and iOS, measurements for private memory footprint overcount by
228   // faulted pages in anonymous shared memory. To discount for this, we touch
229   // all the resident pages in anonymous shared memory here, thus making them
230   // faulted as well. This relies on two assumptions:
231   //
232   // 1) Consumers use shared memory from front to back. Thus, if there are
233   // (N) resident pages, those pages represent the first N * PAGE_SIZE bytes in
234   // the shared memory region.
235   //
236   // 2) This logic is run shortly before the logic that calculates
237   // phys_footprint, thus ensuring that the discrepancy between faulted and
238   // resident pages is minimal.
239   //
240   // The performance penalty is expected to be small.
241   //
242   // * Most of the time, we expect the pages to already be resident and faulted,
243   // thus incurring a cache penalty read hit [since we read from each resident
244   // page].
245   //
246   // * Rarely, we expect the pages to be resident but not faulted, resulting in
247   // soft faults + cache penalty.
248   //
249   // * If assumption (1) is invalid, this will potentially fault some
250   // previously non-resident pages, thus increasing memory usage, without fixing
251   // the accounting.
252   //
253   // Sanity check in case the mapped size is less than the total size of the
254   // region.
255   size_t pages_to_fault =
256       std::min(resident_pages, (adjusted_size + PAGE_SIZE - 1) / PAGE_SIZE);
257 
258   volatile uint8_t* base_address = const_cast<uint8_t*>(aligned_start_address);
259   for (size_t i = 0; i < pages_to_fault; ++i) {
260     // Reading from a volatile is a visible side-effect for the purposes of
261     // optimization. This guarantees that the optimizer will not kill this line.
262     base_address[i * PAGE_SIZE];
263   }
264 
265   return resident_pages * PAGE_SIZE;
266 #else
267   return CountResidentBytes(aligned_start_address, adjusted_size);
268 #endif  // BUILDFLAG(IS_MAC)
269 }
270 
271 #endif  // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
272 
ProcessMemoryDump(const MemoryDumpArgs & dump_args)273 ProcessMemoryDump::ProcessMemoryDump(
274     const MemoryDumpArgs& dump_args)
275     : process_token_(GetTokenForCurrentProcess()),
276       dump_args_(dump_args) {}
277 
278 ProcessMemoryDump::~ProcessMemoryDump() = default;
279 ProcessMemoryDump::ProcessMemoryDump(ProcessMemoryDump&& other) = default;
280 ProcessMemoryDump& ProcessMemoryDump::operator=(ProcessMemoryDump&& other) =
281     default;
282 
CreateAllocatorDump(const std::string & absolute_name)283 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
284     const std::string& absolute_name) {
285   return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
286       absolute_name, dump_args_.level_of_detail, GetDumpId(absolute_name)));
287 }
288 
CreateAllocatorDump(const std::string & absolute_name,const MemoryAllocatorDumpGuid & guid)289 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
290     const std::string& absolute_name,
291     const MemoryAllocatorDumpGuid& guid) {
292   return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
293       absolute_name, dump_args_.level_of_detail, guid));
294 }
295 
AddAllocatorDumpInternal(std::unique_ptr<MemoryAllocatorDump> mad)296 MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
297     std::unique_ptr<MemoryAllocatorDump> mad) {
298   // In background mode return the black hole dump, if invalid dump name is
299   // given.
300   if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::kBackground &&
301       !IsMemoryAllocatorDumpNameInAllowlist(mad->absolute_name())) {
302     return GetBlackHoleMad(mad->absolute_name());
303   }
304 
305   auto insertion_result = allocator_dumps_.insert(
306       std::make_pair(mad->absolute_name(), std::move(mad)));
307   MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
308   DCHECK(insertion_result.second) << "Duplicate name: "
309                                   << inserted_mad->absolute_name();
310   return inserted_mad;
311 }
312 
GetAllocatorDump(const std::string & absolute_name) const313 MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
314     const std::string& absolute_name) const {
315   auto it = allocator_dumps_.find(absolute_name);
316   if (it != allocator_dumps_.end())
317     return it->second.get();
318   return nullptr;
319 }
320 
GetOrCreateAllocatorDump(const std::string & absolute_name)321 MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
322     const std::string& absolute_name) {
323   MemoryAllocatorDump* mad = GetAllocatorDump(absolute_name);
324   return mad ? mad : CreateAllocatorDump(absolute_name);
325 }
326 
CreateSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid)327 MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
328     const MemoryAllocatorDumpGuid& guid) {
329   // A shared allocator dump can be shared within a process and the guid could
330   // have been created already.
331   MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
332   if (mad && mad != black_hole_mad_.get()) {
333     // The kWeak flag is cleared because this method should create a non-weak
334     // dump.
335     mad->clear_flags(MemoryAllocatorDump::Flags::kWeak);
336     return mad;
337   }
338   return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
339 }
340 
CreateWeakSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid)341 MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
342     const MemoryAllocatorDumpGuid& guid) {
343   MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
344   if (mad && mad != black_hole_mad_.get())
345     return mad;
346   mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
347   mad->set_flags(MemoryAllocatorDump::Flags::kWeak);
348   return mad;
349 }
350 
GetSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid) const351 MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
352     const MemoryAllocatorDumpGuid& guid) const {
353   return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
354 }
355 
DumpHeapUsage(const std::unordered_map<base::trace_event::AllocationContext,base::trace_event::AllocationMetrics> & metrics_by_context,base::trace_event::TraceEventMemoryOverhead & overhead,const char * allocator_name)356 void ProcessMemoryDump::DumpHeapUsage(
357     const std::unordered_map<base::trace_event::AllocationContext,
358                              base::trace_event::AllocationMetrics>&
359         metrics_by_context,
360     base::trace_event::TraceEventMemoryOverhead& overhead,
361     const char* allocator_name) {
362   std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
363                                              allocator_name);
364   overhead.DumpInto(base_name.c_str(), this);
365 }
366 
SetAllocatorDumpsForSerialization(std::vector<std::unique_ptr<MemoryAllocatorDump>> dumps)367 void ProcessMemoryDump::SetAllocatorDumpsForSerialization(
368     std::vector<std::unique_ptr<MemoryAllocatorDump>> dumps) {
369   DCHECK(allocator_dumps_.empty());
370   for (std::unique_ptr<MemoryAllocatorDump>& dump : dumps)
371     AddAllocatorDumpInternal(std::move(dump));
372 }
373 
374 std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>
GetAllEdgesForSerialization() const375 ProcessMemoryDump::GetAllEdgesForSerialization() const {
376   std::vector<MemoryAllocatorDumpEdge> edges;
377   edges.reserve(allocator_dumps_edges_.size());
378   for (const auto& it : allocator_dumps_edges_)
379     edges.push_back(it.second);
380   return edges;
381 }
382 
SetAllEdgesForSerialization(const std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge> & edges)383 void ProcessMemoryDump::SetAllEdgesForSerialization(
384     const std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>& edges) {
385   DCHECK(allocator_dumps_edges_.empty());
386   for (const MemoryAllocatorDumpEdge& edge : edges) {
387     auto it_and_inserted = allocator_dumps_edges_.emplace(edge.source, edge);
388     DCHECK(it_and_inserted.second);
389   }
390 }
391 
Clear()392 void ProcessMemoryDump::Clear() {
393   allocator_dumps_.clear();
394   allocator_dumps_edges_.clear();
395 }
396 
TakeAllDumpsFrom(ProcessMemoryDump * other)397 void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
398   // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
399   // into this ProcessMemoryDump, checking for duplicates.
400   for (auto& it : other->allocator_dumps_)
401     AddAllocatorDumpInternal(std::move(it.second));
402   other->allocator_dumps_.clear();
403 
404   // Move all the edges.
405   allocator_dumps_edges_.insert(other->allocator_dumps_edges_.begin(),
406                                 other->allocator_dumps_edges_.end());
407   other->allocator_dumps_edges_.clear();
408 }
409 
SerializeAllocatorDumpsInto(TracedValue * value) const410 void ProcessMemoryDump::SerializeAllocatorDumpsInto(TracedValue* value) const {
411   if (allocator_dumps_.size() > 0) {
412     value->BeginDictionary("allocators");
413     for (const auto& allocator_dump_it : allocator_dumps_)
414       allocator_dump_it.second->AsValueInto(value);
415     value->EndDictionary();
416   }
417 
418   value->BeginArray("allocators_graph");
419   for (const auto& it : allocator_dumps_edges_) {
420     const MemoryAllocatorDumpEdge& edge = it.second;
421     value->BeginDictionary();
422     value->SetString("source", edge.source.ToString());
423     value->SetString("target", edge.target.ToString());
424     value->SetInteger("importance", edge.importance);
425     value->SetString("type", kEdgeTypeOwnership);
426     value->EndDictionary();
427   }
428   value->EndArray();
429 }
430 
SerializeAllocatorDumpsInto(perfetto::protos::pbzero::MemoryTrackerSnapshot * memory_snapshot,const base::ProcessId pid) const431 void ProcessMemoryDump::SerializeAllocatorDumpsInto(
432     perfetto::protos::pbzero::MemoryTrackerSnapshot* memory_snapshot,
433     const base::ProcessId pid) const {
434   ProcessSnapshot* process_snapshot =
435       memory_snapshot->add_process_memory_dumps();
436   process_snapshot->set_pid(static_cast<int>(pid));
437 
438   for (const auto& allocator_dump_it : allocator_dumps_) {
439     ProcessSnapshot::MemoryNode* memory_node =
440         process_snapshot->add_allocator_dumps();
441     allocator_dump_it.second->AsProtoInto(memory_node);
442   }
443 
444   for (const auto& it : allocator_dumps_edges_) {
445     const MemoryAllocatorDumpEdge& edge = it.second;
446     ProcessSnapshot::MemoryEdge* memory_edge =
447         process_snapshot->add_memory_edges();
448 
449     memory_edge->set_source_id(edge.source.ToUint64());
450     memory_edge->set_target_id(edge.target.ToUint64());
451     // TODO(crbug.com/40845742): Fix .proto and remove this cast.
452     memory_edge->set_importance(static_cast<uint32_t>(edge.importance));
453   }
454 }
455 
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target,int importance)456 void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
457                                          const MemoryAllocatorDumpGuid& target,
458                                          int importance) {
459   // This will either override an existing edge or create a new one.
460   auto it = allocator_dumps_edges_.find(source);
461   int max_importance = importance;
462   if (it != allocator_dumps_edges_.end()) {
463     DCHECK_EQ(target.ToUint64(), it->second.target.ToUint64());
464     max_importance = std::max(importance, it->second.importance);
465   }
466   allocator_dumps_edges_[source] = {source, target, max_importance,
467                                     false /* overridable */};
468 }
469 
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target)470 void ProcessMemoryDump::AddOwnershipEdge(
471     const MemoryAllocatorDumpGuid& source,
472     const MemoryAllocatorDumpGuid& target) {
473   AddOwnershipEdge(source, target, 0 /* importance */);
474 }
475 
AddOverridableOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target,int importance)476 void ProcessMemoryDump::AddOverridableOwnershipEdge(
477     const MemoryAllocatorDumpGuid& source,
478     const MemoryAllocatorDumpGuid& target,
479     int importance) {
480   if (allocator_dumps_edges_.count(source) == 0) {
481     allocator_dumps_edges_[source] = {source, target, importance,
482                                       true /* overridable */};
483   } else {
484     // An edge between the source and target already exits. So, do nothing here
485     // since the new overridable edge is implicitly overridden by a strong edge
486     // which was created earlier.
487     DCHECK(!allocator_dumps_edges_[source].overridable);
488   }
489 }
490 
CreateSharedMemoryOwnershipEdge(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance)491 void ProcessMemoryDump::CreateSharedMemoryOwnershipEdge(
492     const MemoryAllocatorDumpGuid& client_local_dump_guid,
493     const UnguessableToken& shared_memory_guid,
494     int importance) {
495   CreateSharedMemoryOwnershipEdgeInternal(client_local_dump_guid,
496                                           shared_memory_guid, importance,
497                                           false /*is_weak*/);
498 }
499 
CreateWeakSharedMemoryOwnershipEdge(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance)500 void ProcessMemoryDump::CreateWeakSharedMemoryOwnershipEdge(
501     const MemoryAllocatorDumpGuid& client_local_dump_guid,
502     const UnguessableToken& shared_memory_guid,
503     int importance) {
504   CreateSharedMemoryOwnershipEdgeInternal(
505       client_local_dump_guid, shared_memory_guid, importance, true /*is_weak*/);
506 }
507 
CreateSharedMemoryOwnershipEdgeInternal(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance,bool is_weak)508 void ProcessMemoryDump::CreateSharedMemoryOwnershipEdgeInternal(
509     const MemoryAllocatorDumpGuid& client_local_dump_guid,
510     const UnguessableToken& shared_memory_guid,
511     int importance,
512     bool is_weak) {
513   DCHECK(!shared_memory_guid.is_empty());
514   // New model where the global dumps created by SharedMemoryTracker are used
515   // for the clients.
516 
517   // The guid of the local dump created by SharedMemoryTracker for the memory
518   // segment.
519   auto local_shm_guid =
520       GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shared_memory_guid));
521 
522   // The dump guid of the global dump created by the tracker for the memory
523   // segment.
524   auto global_shm_guid =
525       SharedMemoryTracker::GetGlobalDumpIdForTracing(shared_memory_guid);
526 
527   // Create an edge between local dump of the client and the local dump of the
528   // SharedMemoryTracker. Do not need to create the dumps here since the tracker
529   // would create them. The importance is also required here for the case of
530   // single process mode.
531   AddOwnershipEdge(client_local_dump_guid, local_shm_guid, importance);
532 
533   // TODO(ssid): Handle the case of weak dumps here. This needs a new function
534   // GetOrCreaetGlobalDump() in PMD since we need to change the behavior of the
535   // created global dump.
536   // Create an edge that overrides the edge created by SharedMemoryTracker.
537   AddOwnershipEdge(local_shm_guid, global_shm_guid, importance);
538 }
539 
AddSuballocation(const MemoryAllocatorDumpGuid & source,const std::string & target_node_name)540 void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
541                                          const std::string& target_node_name) {
542   // Do not create new dumps for suballocations in background mode.
543   if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::kBackground) {
544     return;
545   }
546 
547   std::string child_mad_name = target_node_name + "/__" + source.ToString();
548   MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
549   AddOwnershipEdge(source, target_child_mad->guid());
550 }
551 
GetBlackHoleMad(const std::string & absolute_name)552 MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad(
553     const std::string& absolute_name) {
554   DCHECK(is_black_hole_non_fatal_for_testing_)
555       << " unknown dump name " << absolute_name
556       << " this likely means kAllocatorDumpNameAllowlist needs to be updated";
557   if (!black_hole_mad_) {
558     std::string name = "discarded";
559     black_hole_mad_ = std::make_unique<MemoryAllocatorDump>(
560         name, dump_args_.level_of_detail, GetDumpId(name));
561   }
562   return black_hole_mad_.get();
563 }
564 
GetDumpId(const std::string & absolute_name)565 MemoryAllocatorDumpGuid ProcessMemoryDump::GetDumpId(
566     const std::string& absolute_name) {
567   return MemoryAllocatorDumpGuid(StringPrintf(
568       "%s:%s", process_token().ToString().c_str(), absolute_name.c_str()));
569 }
570 
571 }  // namespace trace_event
572 }  // namespace base
573