• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/trace_event/process_memory_dump.h"
6 
7 #include <errno.h>
8 
9 #include <vector>
10 
11 #include "base/memory/ptr_util.h"
12 #include "base/memory/shared_memory_tracker.h"
13 #include "base/process/process_metrics.h"
14 #include "base/strings/stringprintf.h"
15 #include "base/trace_event/memory_infra_background_whitelist.h"
16 #include "base/trace_event/trace_event_argument.h"
17 #include "base/unguessable_token.h"
18 #include "build/build_config.h"
19 
20 #if defined(OS_IOS)
21 #include <mach/vm_page_size.h>
22 #endif
23 
24 #if defined(OS_POSIX) || defined(OS_FUCHSIA)
25 #include <sys/mman.h>
26 #endif
27 
28 #if defined(OS_WIN)
29 #include <windows.h>  // Must be in front of other Windows header files
30 
31 #include <Psapi.h>
32 #endif
33 
34 namespace base {
35 namespace trace_event {
36 
37 namespace {
38 
39 const char kEdgeTypeOwnership[] = "ownership";
40 
GetSharedGlobalAllocatorDumpName(const MemoryAllocatorDumpGuid & guid)41 std::string GetSharedGlobalAllocatorDumpName(
42     const MemoryAllocatorDumpGuid& guid) {
43   return "global/" + guid.ToString();
44 }
45 
46 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
GetSystemPageCount(size_t mapped_size,size_t page_size)47 size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
48   return (mapped_size + page_size - 1) / page_size;
49 }
50 #endif
51 
GetTokenForCurrentProcess()52 UnguessableToken GetTokenForCurrentProcess() {
53   static UnguessableToken instance = UnguessableToken::Create();
54   return instance;
55 }
56 
57 }  // namespace
58 
59 // static
60 bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
61 
62 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
63 // static
GetSystemPageSize()64 size_t ProcessMemoryDump::GetSystemPageSize() {
65 #if defined(OS_IOS)
66   // On iOS, getpagesize() returns the user page sizes, but for allocating
67   // arrays for mincore(), kernel page sizes is needed. Use vm_kernel_page_size
68   // as recommended by Apple, https://forums.developer.apple.com/thread/47532/.
69   // Refer to http://crbug.com/542671 and Apple rdar://23651782
70   return vm_kernel_page_size;
71 #else
72   return base::GetPageSize();
73 #endif  // defined(OS_IOS)
74 }
75 
76 // static
CountResidentBytes(void * start_address,size_t mapped_size)77 size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
78                                              size_t mapped_size) {
79   const size_t page_size = GetSystemPageSize();
80   const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
81   DCHECK_EQ(0u, start_pointer % page_size);
82 
83   size_t offset = 0;
84   size_t total_resident_pages = 0;
85   bool failure = false;
86 
87   // An array as large as number of pages in memory segment needs to be passed
88   // to the query function. To avoid allocating a large array, the given block
89   // of memory is split into chunks of size |kMaxChunkSize|.
90   const size_t kMaxChunkSize = 8 * 1024 * 1024;
91   size_t max_vec_size =
92       GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
93 #if defined(OS_WIN)
94   std::unique_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
95       new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
96 #elif defined(OS_MACOSX)
97   std::unique_ptr<char[]> vec(new char[max_vec_size]);
98 #elif defined(OS_POSIX) || defined(OS_FUCHSIA)
99   std::unique_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
100 #endif
101 
102   while (offset < mapped_size) {
103     uintptr_t chunk_start = (start_pointer + offset);
104     const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
105     const size_t page_count = GetSystemPageCount(chunk_size, page_size);
106     size_t resident_page_count = 0;
107 #if defined(OS_WIN)
108     for (size_t i = 0; i < page_count; i++) {
109       vec[i].VirtualAddress =
110           reinterpret_cast<void*>(chunk_start + i * page_size);
111     }
112     DWORD vec_size = static_cast<DWORD>(
113         page_count * sizeof(PSAPI_WORKING_SET_EX_INFORMATION));
114     failure = !QueryWorkingSetEx(GetCurrentProcess(), vec.get(), vec_size);
115 
116     for (size_t i = 0; i < page_count; i++)
117       resident_page_count += vec[i].VirtualAttributes.Valid;
118 #elif defined(OS_FUCHSIA)
119     // TODO(fuchsia): Port, see https://crbug.com/706592.
120     ALLOW_UNUSED_LOCAL(chunk_start);
121     ALLOW_UNUSED_LOCAL(page_count);
122 #elif defined(OS_MACOSX)
123     // mincore in MAC does not fail with EAGAIN.
124     failure =
125         !!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
126     for (size_t i = 0; i < page_count; i++)
127       resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
128 #elif defined(OS_POSIX)
129     int error_counter = 0;
130     int result = 0;
131     // HANDLE_EINTR tries for 100 times. So following the same pattern.
132     do {
133       result =
134 #if defined(OS_AIX)
135           mincore(reinterpret_cast<char*>(chunk_start), chunk_size,
136                   reinterpret_cast<char*>(vec.get()));
137 #else
138           mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
139 #endif
140     } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
141     failure = !!result;
142 
143     for (size_t i = 0; i < page_count; i++)
144       resident_page_count += vec[i] & 1;
145 #endif
146 
147     if (failure)
148       break;
149 
150     total_resident_pages += resident_page_count * page_size;
151     offset += kMaxChunkSize;
152   }
153 
154   DCHECK(!failure);
155   if (failure) {
156     total_resident_pages = 0;
157     LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
158   }
159   return total_resident_pages;
160 }
161 
162 // static
CountResidentBytesInSharedMemory(void * start_address,size_t mapped_size)163 base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
164     void* start_address,
165     size_t mapped_size) {
166 #if defined(OS_MACOSX) && !defined(OS_IOS)
167   // On macOS, use mach_vm_region instead of mincore for performance
168   // (crbug.com/742042).
169   mach_vm_size_t dummy_size = 0;
170   mach_vm_address_t address =
171       reinterpret_cast<mach_vm_address_t>(start_address);
172   vm_region_top_info_data_t info;
173   MachVMRegionResult result =
174       GetTopInfo(mach_task_self(), &dummy_size, &address, &info);
175   if (result == MachVMRegionResult::Error) {
176     LOG(ERROR) << "CountResidentBytesInSharedMemory failed. The resident size "
177                   "is invalid";
178     return base::Optional<size_t>();
179   }
180 
181   size_t resident_pages =
182       info.private_pages_resident + info.shared_pages_resident;
183 
184   // On macOS, measurements for private memory footprint overcount by
185   // faulted pages in anonymous shared memory. To discount for this, we touch
186   // all the resident pages in anonymous shared memory here, thus making them
187   // faulted as well. This relies on two assumptions:
188   //
189   // 1) Consumers use shared memory from front to back. Thus, if there are
190   // (N) resident pages, those pages represent the first N * PAGE_SIZE bytes in
191   // the shared memory region.
192   //
193   // 2) This logic is run shortly before the logic that calculates
194   // phys_footprint, thus ensuring that the discrepancy between faulted and
195   // resident pages is minimal.
196   //
197   // The performance penalty is expected to be small.
198   //
199   // * Most of the time, we expect the pages to already be resident and faulted,
200   // thus incurring a cache penalty read hit [since we read from each resident
201   // page].
202   //
203   // * Rarely, we expect the pages to be resident but not faulted, resulting in
204   // soft faults + cache penalty.
205   //
206   // * If assumption (1) is invalid, this will potentially fault some
207   // previously non-resident pages, thus increasing memory usage, without fixing
208   // the accounting.
209   //
210   // Sanity check in case the mapped size is less than the total size of the
211   // region.
212   size_t pages_to_fault =
213       std::min(resident_pages, (mapped_size + PAGE_SIZE - 1) / PAGE_SIZE);
214 
215   volatile char* base_address = static_cast<char*>(start_address);
216   for (size_t i = 0; i < pages_to_fault; ++i) {
217     // Reading from a volatile is a visible side-effect for the purposes of
218     // optimization. This guarantees that the optimizer will not kill this line.
219     base_address[i * PAGE_SIZE];
220   }
221 
222   return resident_pages * PAGE_SIZE;
223 #else
224   return CountResidentBytes(start_address, mapped_size);
225 #endif  // defined(OS_MACOSX) && !defined(OS_IOS)
226 }
227 
228 #endif  // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
229 
ProcessMemoryDump(const MemoryDumpArgs & dump_args)230 ProcessMemoryDump::ProcessMemoryDump(
231     const MemoryDumpArgs& dump_args)
232     : process_token_(GetTokenForCurrentProcess()),
233       dump_args_(dump_args) {}
234 
235 ProcessMemoryDump::~ProcessMemoryDump() = default;
236 ProcessMemoryDump::ProcessMemoryDump(ProcessMemoryDump&& other) = default;
237 ProcessMemoryDump& ProcessMemoryDump::operator=(ProcessMemoryDump&& other) =
238     default;
239 
CreateAllocatorDump(const std::string & absolute_name)240 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
241     const std::string& absolute_name) {
242   return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
243       absolute_name, dump_args_.level_of_detail, GetDumpId(absolute_name)));
244 }
245 
CreateAllocatorDump(const std::string & absolute_name,const MemoryAllocatorDumpGuid & guid)246 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
247     const std::string& absolute_name,
248     const MemoryAllocatorDumpGuid& guid) {
249   return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
250       absolute_name, dump_args_.level_of_detail, guid));
251 }
252 
AddAllocatorDumpInternal(std::unique_ptr<MemoryAllocatorDump> mad)253 MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
254     std::unique_ptr<MemoryAllocatorDump> mad) {
255   // In background mode return the black hole dump, if invalid dump name is
256   // given.
257   if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
258       !IsMemoryAllocatorDumpNameWhitelisted(mad->absolute_name())) {
259     return GetBlackHoleMad();
260   }
261 
262   auto insertion_result = allocator_dumps_.insert(
263       std::make_pair(mad->absolute_name(), std::move(mad)));
264   MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
265   DCHECK(insertion_result.second) << "Duplicate name: "
266                                   << inserted_mad->absolute_name();
267   return inserted_mad;
268 }
269 
GetAllocatorDump(const std::string & absolute_name) const270 MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
271     const std::string& absolute_name) const {
272   auto it = allocator_dumps_.find(absolute_name);
273   if (it != allocator_dumps_.end())
274     return it->second.get();
275   return nullptr;
276 }
277 
GetOrCreateAllocatorDump(const std::string & absolute_name)278 MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
279     const std::string& absolute_name) {
280   MemoryAllocatorDump* mad = GetAllocatorDump(absolute_name);
281   return mad ? mad : CreateAllocatorDump(absolute_name);
282 }
283 
CreateSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid)284 MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
285     const MemoryAllocatorDumpGuid& guid) {
286   // A shared allocator dump can be shared within a process and the guid could
287   // have been created already.
288   MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
289   if (mad && mad != black_hole_mad_.get()) {
290     // The weak flag is cleared because this method should create a non-weak
291     // dump.
292     mad->clear_flags(MemoryAllocatorDump::Flags::WEAK);
293     return mad;
294   }
295   return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
296 }
297 
CreateWeakSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid)298 MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
299     const MemoryAllocatorDumpGuid& guid) {
300   MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
301   if (mad && mad != black_hole_mad_.get())
302     return mad;
303   mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
304   mad->set_flags(MemoryAllocatorDump::Flags::WEAK);
305   return mad;
306 }
307 
GetSharedGlobalAllocatorDump(const MemoryAllocatorDumpGuid & guid) const308 MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
309     const MemoryAllocatorDumpGuid& guid) const {
310   return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
311 }
312 
DumpHeapUsage(const std::unordered_map<base::trace_event::AllocationContext,base::trace_event::AllocationMetrics> & metrics_by_context,base::trace_event::TraceEventMemoryOverhead & overhead,const char * allocator_name)313 void ProcessMemoryDump::DumpHeapUsage(
314     const std::unordered_map<base::trace_event::AllocationContext,
315                              base::trace_event::AllocationMetrics>&
316         metrics_by_context,
317     base::trace_event::TraceEventMemoryOverhead& overhead,
318     const char* allocator_name) {
319   std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
320                                              allocator_name);
321   overhead.DumpInto(base_name.c_str(), this);
322 }
323 
SetAllocatorDumpsForSerialization(std::vector<std::unique_ptr<MemoryAllocatorDump>> dumps)324 void ProcessMemoryDump::SetAllocatorDumpsForSerialization(
325     std::vector<std::unique_ptr<MemoryAllocatorDump>> dumps) {
326   DCHECK(allocator_dumps_.empty());
327   for (std::unique_ptr<MemoryAllocatorDump>& dump : dumps)
328     AddAllocatorDumpInternal(std::move(dump));
329 }
330 
331 std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>
GetAllEdgesForSerialization() const332 ProcessMemoryDump::GetAllEdgesForSerialization() const {
333   std::vector<MemoryAllocatorDumpEdge> edges;
334   edges.reserve(allocator_dumps_edges_.size());
335   for (const auto& it : allocator_dumps_edges_)
336     edges.push_back(it.second);
337   return edges;
338 }
339 
SetAllEdgesForSerialization(const std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge> & edges)340 void ProcessMemoryDump::SetAllEdgesForSerialization(
341     const std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>& edges) {
342   DCHECK(allocator_dumps_edges_.empty());
343   for (const MemoryAllocatorDumpEdge& edge : edges) {
344     auto it_and_inserted = allocator_dumps_edges_.emplace(edge.source, edge);
345     DCHECK(it_and_inserted.second);
346   }
347 }
348 
Clear()349 void ProcessMemoryDump::Clear() {
350   allocator_dumps_.clear();
351   allocator_dumps_edges_.clear();
352 }
353 
TakeAllDumpsFrom(ProcessMemoryDump * other)354 void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
355   // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
356   // into this ProcessMemoryDump, checking for duplicates.
357   for (auto& it : other->allocator_dumps_)
358     AddAllocatorDumpInternal(std::move(it.second));
359   other->allocator_dumps_.clear();
360 
361   // Move all the edges.
362   allocator_dumps_edges_.insert(other->allocator_dumps_edges_.begin(),
363                                 other->allocator_dumps_edges_.end());
364   other->allocator_dumps_edges_.clear();
365 }
366 
SerializeAllocatorDumpsInto(TracedValue * value) const367 void ProcessMemoryDump::SerializeAllocatorDumpsInto(TracedValue* value) const {
368   if (allocator_dumps_.size() > 0) {
369     value->BeginDictionary("allocators");
370     for (const auto& allocator_dump_it : allocator_dumps_)
371       allocator_dump_it.second->AsValueInto(value);
372     value->EndDictionary();
373   }
374 
375   value->BeginArray("allocators_graph");
376   for (const auto& it : allocator_dumps_edges_) {
377     const MemoryAllocatorDumpEdge& edge = it.second;
378     value->BeginDictionary();
379     value->SetString("source", edge.source.ToString());
380     value->SetString("target", edge.target.ToString());
381     value->SetInteger("importance", edge.importance);
382     value->SetString("type", kEdgeTypeOwnership);
383     value->EndDictionary();
384   }
385   value->EndArray();
386 }
387 
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target,int importance)388 void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
389                                          const MemoryAllocatorDumpGuid& target,
390                                          int importance) {
391   // This will either override an existing edge or create a new one.
392   auto it = allocator_dumps_edges_.find(source);
393   int max_importance = importance;
394   if (it != allocator_dumps_edges_.end()) {
395     DCHECK_EQ(target.ToUint64(), it->second.target.ToUint64());
396     max_importance = std::max(importance, it->second.importance);
397   }
398   allocator_dumps_edges_[source] = {source, target, max_importance,
399                                     false /* overridable */};
400 }
401 
AddOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target)402 void ProcessMemoryDump::AddOwnershipEdge(
403     const MemoryAllocatorDumpGuid& source,
404     const MemoryAllocatorDumpGuid& target) {
405   AddOwnershipEdge(source, target, 0 /* importance */);
406 }
407 
AddOverridableOwnershipEdge(const MemoryAllocatorDumpGuid & source,const MemoryAllocatorDumpGuid & target,int importance)408 void ProcessMemoryDump::AddOverridableOwnershipEdge(
409     const MemoryAllocatorDumpGuid& source,
410     const MemoryAllocatorDumpGuid& target,
411     int importance) {
412   if (allocator_dumps_edges_.count(source) == 0) {
413     allocator_dumps_edges_[source] = {source, target, importance,
414                                       true /* overridable */};
415   } else {
416     // An edge between the source and target already exits. So, do nothing here
417     // since the new overridable edge is implicitly overridden by a strong edge
418     // which was created earlier.
419     DCHECK(!allocator_dumps_edges_[source].overridable);
420   }
421 }
422 
CreateSharedMemoryOwnershipEdge(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance)423 void ProcessMemoryDump::CreateSharedMemoryOwnershipEdge(
424     const MemoryAllocatorDumpGuid& client_local_dump_guid,
425     const UnguessableToken& shared_memory_guid,
426     int importance) {
427   CreateSharedMemoryOwnershipEdgeInternal(client_local_dump_guid,
428                                           shared_memory_guid, importance,
429                                           false /*is_weak*/);
430 }
431 
CreateWeakSharedMemoryOwnershipEdge(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance)432 void ProcessMemoryDump::CreateWeakSharedMemoryOwnershipEdge(
433     const MemoryAllocatorDumpGuid& client_local_dump_guid,
434     const UnguessableToken& shared_memory_guid,
435     int importance) {
436   CreateSharedMemoryOwnershipEdgeInternal(
437       client_local_dump_guid, shared_memory_guid, importance, true /*is_weak*/);
438 }
439 
CreateSharedMemoryOwnershipEdgeInternal(const MemoryAllocatorDumpGuid & client_local_dump_guid,const UnguessableToken & shared_memory_guid,int importance,bool is_weak)440 void ProcessMemoryDump::CreateSharedMemoryOwnershipEdgeInternal(
441     const MemoryAllocatorDumpGuid& client_local_dump_guid,
442     const UnguessableToken& shared_memory_guid,
443     int importance,
444     bool is_weak) {
445   DCHECK(!shared_memory_guid.is_empty());
446   // New model where the global dumps created by SharedMemoryTracker are used
447   // for the clients.
448 
449   // The guid of the local dump created by SharedMemoryTracker for the memory
450   // segment.
451   auto local_shm_guid =
452       GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shared_memory_guid));
453 
454   // The dump guid of the global dump created by the tracker for the memory
455   // segment.
456   auto global_shm_guid =
457       SharedMemoryTracker::GetGlobalDumpIdForTracing(shared_memory_guid);
458 
459   // Create an edge between local dump of the client and the local dump of the
460   // SharedMemoryTracker. Do not need to create the dumps here since the tracker
461   // would create them. The importance is also required here for the case of
462   // single process mode.
463   AddOwnershipEdge(client_local_dump_guid, local_shm_guid, importance);
464 
465   // TODO(ssid): Handle the case of weak dumps here. This needs a new function
466   // GetOrCreaetGlobalDump() in PMD since we need to change the behavior of the
467   // created global dump.
468   // Create an edge that overrides the edge created by SharedMemoryTracker.
469   AddOwnershipEdge(local_shm_guid, global_shm_guid, importance);
470 }
471 
AddSuballocation(const MemoryAllocatorDumpGuid & source,const std::string & target_node_name)472 void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
473                                          const std::string& target_node_name) {
474   // Do not create new dumps for suballocations in background mode.
475   if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
476     return;
477 
478   std::string child_mad_name = target_node_name + "/__" + source.ToString();
479   MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
480   AddOwnershipEdge(source, target_child_mad->guid());
481 }
482 
GetBlackHoleMad()483 MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad() {
484   DCHECK(is_black_hole_non_fatal_for_testing_);
485   if (!black_hole_mad_) {
486     std::string name = "discarded";
487     black_hole_mad_.reset(new MemoryAllocatorDump(
488         name, dump_args_.level_of_detail, GetDumpId(name)));
489   }
490   return black_hole_mad_.get();
491 }
492 
GetDumpId(const std::string & absolute_name)493 MemoryAllocatorDumpGuid ProcessMemoryDump::GetDumpId(
494     const std::string& absolute_name) {
495   return MemoryAllocatorDumpGuid(StringPrintf(
496       "%s:%s", process_token().ToString().c_str(), absolute_name.c_str()));
497 }
498 
operator ==(const MemoryAllocatorDumpEdge & other) const499 bool ProcessMemoryDump::MemoryAllocatorDumpEdge::operator==(
500     const MemoryAllocatorDumpEdge& other) const {
501   return source == other.source && target == other.target &&
502          importance == other.importance && overridable == other.overridable;
503 }
504 
operator !=(const MemoryAllocatorDumpEdge & other) const505 bool ProcessMemoryDump::MemoryAllocatorDumpEdge::operator!=(
506     const MemoryAllocatorDumpEdge& other) const {
507   return !(*this == other);
508 }
509 
510 }  // namespace trace_event
511 }  // namespace base
512