1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/process_memory_dump.h"
6
7 #include <stddef.h>
8
9 #include <memory>
10 #include <optional>
11 #include <string_view>
12
13 #include "base/memory/aligned_memory.h"
14 #include "base/memory/ptr_util.h"
15 #include "base/memory/shared_memory_tracker.h"
16 #include "base/memory/writable_shared_memory_region.h"
17 #include "base/process/process_metrics.h"
18 #include "base/trace_event/memory_allocator_dump_guid.h"
19 #include "base/trace_event/memory_infra_background_allowlist.h"
20 #include "base/trace_event/trace_log.h"
21 #include "base/trace_event/traced_value.h"
22 #include "build/build_config.h"
23 #include "testing/gtest/include/gtest/gtest.h"
24
25 #if BUILDFLAG(IS_WIN)
26 #include <windows.h>
27
28 #include "winbase.h"
29 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
30 #include <sys/mman.h>
31 #endif
32
33 namespace base::trace_event {
34
35 namespace {
36
37 const MemoryDumpArgs kDetailedDumpArgs = {MemoryDumpLevelOfDetail::kDetailed};
38 constexpr std::string_view kTestDumpNameAllowlist[] = {
39 "Allowlisted/TestName", "Allowlisted/TestName_0x?",
40 "Allowlisted/0x?/TestName", "Allowlisted/0x?"};
41
Map(size_t size)42 void* Map(size_t size) {
43 #if BUILDFLAG(IS_WIN)
44 return ::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT,
45 PAGE_READWRITE);
46 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
47 return ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
48 0, 0);
49 #endif
50 }
51
Unmap(void * addr,size_t size)52 void Unmap(void* addr, size_t size) {
53 #if BUILDFLAG(IS_WIN)
54 ::VirtualFree(addr, 0, MEM_DECOMMIT);
55 #elif BUILDFLAG(IS_POSIX) || BUILDFLAG(IS_FUCHSIA)
56 ::munmap(addr, size);
57 #else
58 #error This architecture is not (yet) supported.
59 #endif
60 }
61
CountResidentBytesInSharedMemory(WritableSharedMemoryMapping & mapping)62 std::optional<size_t> CountResidentBytesInSharedMemory(
63 WritableSharedMemoryMapping& mapping) {
64 // SAFETY: We need the actual mapped memory size here. There's no public
65 // method to get this as a span, so we need to construct it unsafely. The
66 // mapped_size() is larger than `mem.size()` but represents the actual memory
67 // segment size in the SharedMemoryMapping.
68 auto mapped =
69 UNSAFE_BUFFERS(base::span(mapping.data(), mapping.mapped_size()));
70 return ProcessMemoryDump::CountResidentBytesInSharedMemory(mapped.data(),
71 mapped.size());
72 }
73
74 } // namespace
75
TEST(ProcessMemoryDumpTest,MoveConstructor)76 TEST(ProcessMemoryDumpTest, MoveConstructor) {
77 ProcessMemoryDump pmd1 = ProcessMemoryDump(kDetailedDumpArgs);
78 pmd1.CreateAllocatorDump("mad1");
79 pmd1.CreateAllocatorDump("mad2");
80 pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
81 MemoryAllocatorDumpGuid(4242));
82
83 ProcessMemoryDump pmd2(std::move(pmd1));
84
85 EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
86 EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
87 EXPECT_EQ(MemoryDumpLevelOfDetail::kDetailed,
88 pmd2.dump_args().level_of_detail);
89 EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
90
91 // Check that calling serialization routines doesn't cause a crash.
92 auto traced_value = std::make_unique<TracedValue>();
93 pmd2.SerializeAllocatorDumpsInto(traced_value.get());
94 }
95
TEST(ProcessMemoryDumpTest,MoveAssignment)96 TEST(ProcessMemoryDumpTest, MoveAssignment) {
97 ProcessMemoryDump pmd1 = ProcessMemoryDump(kDetailedDumpArgs);
98 pmd1.CreateAllocatorDump("mad1");
99 pmd1.CreateAllocatorDump("mad2");
100 pmd1.AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
101 MemoryAllocatorDumpGuid(4242));
102
103 ProcessMemoryDump pmd2({MemoryDumpLevelOfDetail::kBackground});
104 pmd2.CreateAllocatorDump("malloc");
105
106 pmd2 = std::move(pmd1);
107 EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad1"));
108 EXPECT_EQ(1u, pmd2.allocator_dumps().count("mad2"));
109 EXPECT_EQ(0u, pmd2.allocator_dumps().count("mad3"));
110 EXPECT_EQ(MemoryDumpLevelOfDetail::kDetailed,
111 pmd2.dump_args().level_of_detail);
112 EXPECT_EQ(1u, pmd2.allocator_dumps_edges().size());
113
114 // Check that calling serialization routines doesn't cause a crash.
115 auto traced_value = std::make_unique<TracedValue>();
116 pmd2.SerializeAllocatorDumpsInto(traced_value.get());
117 }
118
TEST(ProcessMemoryDumpTest,Clear)119 TEST(ProcessMemoryDumpTest, Clear) {
120 std::unique_ptr<ProcessMemoryDump> pmd1(
121 new ProcessMemoryDump(kDetailedDumpArgs));
122 pmd1->CreateAllocatorDump("mad1");
123 pmd1->CreateAllocatorDump("mad2");
124 ASSERT_FALSE(pmd1->allocator_dumps().empty());
125
126 pmd1->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
127 MemoryAllocatorDumpGuid(4242));
128
129 MemoryAllocatorDumpGuid shared_mad_guid1(1);
130 MemoryAllocatorDumpGuid shared_mad_guid2(2);
131 pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
132 pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid2);
133
134 pmd1->Clear();
135 ASSERT_TRUE(pmd1->allocator_dumps().empty());
136 ASSERT_TRUE(pmd1->allocator_dumps_edges().empty());
137 ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad1"));
138 ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
139 ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
140 ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
141
142 // Check that calling serialization routines doesn't cause a crash.
143 auto traced_value = std::make_unique<TracedValue>();
144 pmd1->SerializeAllocatorDumpsInto(traced_value.get());
145
146 // Check that the pmd can be reused and behaves as expected.
147 auto* mad1 = pmd1->CreateAllocatorDump("mad1");
148 auto* mad3 = pmd1->CreateAllocatorDump("mad3");
149 auto* shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
150 auto* shared_mad2 =
151 pmd1->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
152 ASSERT_EQ(4u, pmd1->allocator_dumps().size());
153 ASSERT_EQ(mad1, pmd1->GetAllocatorDump("mad1"));
154 ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
155 ASSERT_EQ(mad3, pmd1->GetAllocatorDump("mad3"));
156 ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
157 ASSERT_EQ(MemoryAllocatorDump::Flags::kDefault, shared_mad1->flags());
158 ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
159 ASSERT_EQ(MemoryAllocatorDump::Flags::kWeak, shared_mad2->flags());
160
161 traced_value = std::make_unique<TracedValue>();
162 pmd1->SerializeAllocatorDumpsInto(traced_value.get());
163
164 pmd1.reset();
165 }
166
TEST(ProcessMemoryDumpTest,TakeAllDumpsFrom)167 TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
168 std::unique_ptr<TracedValue> traced_value(new TracedValue);
169 std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
170 metrics_by_context[AllocationContext()] = {1, 1};
171 TraceEventMemoryOverhead overhead;
172
173 std::unique_ptr<ProcessMemoryDump> pmd1(
174 new ProcessMemoryDump(kDetailedDumpArgs));
175 auto* mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
176 auto* mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
177 pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
178 pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump1");
179 pmd1->DumpHeapUsage(metrics_by_context, overhead, "pmd1/heap_dump2");
180
181 std::unique_ptr<ProcessMemoryDump> pmd2(
182 new ProcessMemoryDump(kDetailedDumpArgs));
183 auto* mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
184 auto* mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
185 pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
186 pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump1");
187 pmd2->DumpHeapUsage(metrics_by_context, overhead, "pmd2/heap_dump2");
188
189 MemoryAllocatorDumpGuid shared_mad_guid1(1);
190 MemoryAllocatorDumpGuid shared_mad_guid2(2);
191 auto* shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
192 auto* shared_mad2 =
193 pmd2->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
194
195 pmd1->TakeAllDumpsFrom(pmd2.get());
196
197 // Make sure that pmd2 is empty but still usable after it has been emptied.
198 ASSERT_TRUE(pmd2->allocator_dumps().empty());
199 ASSERT_TRUE(pmd2->allocator_dumps_edges().empty());
200 pmd2->CreateAllocatorDump("pmd2/this_mad_stays_with_pmd2");
201 ASSERT_EQ(1u, pmd2->allocator_dumps().size());
202 ASSERT_EQ(1u, pmd2->allocator_dumps().count("pmd2/this_mad_stays_with_pmd2"));
203 pmd2->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
204 MemoryAllocatorDumpGuid(4242));
205
206 // Check that calling serialization routines doesn't cause a crash.
207 pmd2->SerializeAllocatorDumpsInto(traced_value.get());
208
209 // Free the |pmd2| to check that the memory ownership of the two MAD(s)
210 // has been transferred to |pmd1|.
211 pmd2.reset();
212
213 // Now check that |pmd1| has been effectively merged.
214 ASSERT_EQ(6u, pmd1->allocator_dumps().size());
215 ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad1"));
216 ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
217 ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd2/mad1"));
218 ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
219 ASSERT_EQ(2u, pmd1->allocator_dumps_edges().size());
220 ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
221 ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
222 ASSERT_TRUE(MemoryAllocatorDump::Flags::kWeak & shared_mad2->flags());
223
224 // Check that calling serialization routines doesn't cause a crash.
225 traced_value = std::make_unique<TracedValue>();
226 pmd1->SerializeAllocatorDumpsInto(traced_value.get());
227
228 pmd1.reset();
229 }
230
TEST(ProcessMemoryDumpTest,OverrideOwnershipEdge)231 TEST(ProcessMemoryDumpTest, OverrideOwnershipEdge) {
232 std::unique_ptr<ProcessMemoryDump> pmd(
233 new ProcessMemoryDump(kDetailedDumpArgs));
234
235 auto* shm_dump1 = pmd->CreateAllocatorDump("shared_mem/seg1");
236 auto* shm_dump2 = pmd->CreateAllocatorDump("shared_mem/seg2");
237 auto* shm_dump3 = pmd->CreateAllocatorDump("shared_mem/seg3");
238 auto* shm_dump4 = pmd->CreateAllocatorDump("shared_mem/seg4");
239
240 // Create one allocation with an auto-assigned guid and mark it as a
241 // suballocation of "fakealloc/allocated_objects".
242 auto* child1_dump = pmd->CreateAllocatorDump("shared_mem/child/seg1");
243 pmd->AddOverridableOwnershipEdge(child1_dump->guid(), shm_dump1->guid(),
244 0 /* importance */);
245 auto* child2_dump = pmd->CreateAllocatorDump("shared_mem/child/seg2");
246 pmd->AddOwnershipEdge(child2_dump->guid(), shm_dump2->guid(),
247 3 /* importance */);
248 MemoryAllocatorDumpGuid shared_mad_guid(1);
249 pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
250 pmd->AddOverridableOwnershipEdge(shm_dump3->guid(), shared_mad_guid,
251 0 /* importance */);
252 auto* child4_dump = pmd->CreateAllocatorDump("shared_mem/child/seg4");
253 pmd->AddOverridableOwnershipEdge(child4_dump->guid(), shm_dump4->guid(),
254 4 /* importance */);
255
256 const ProcessMemoryDump::AllocatorDumpEdgesMap& edges =
257 pmd->allocator_dumps_edges();
258 EXPECT_EQ(4u, edges.size());
259 EXPECT_EQ(shm_dump1->guid(), edges.find(child1_dump->guid())->second.target);
260 EXPECT_EQ(0, edges.find(child1_dump->guid())->second.importance);
261 EXPECT_TRUE(edges.find(child1_dump->guid())->second.overridable);
262 EXPECT_EQ(shm_dump2->guid(), edges.find(child2_dump->guid())->second.target);
263 EXPECT_EQ(3, edges.find(child2_dump->guid())->second.importance);
264 EXPECT_FALSE(edges.find(child2_dump->guid())->second.overridable);
265 EXPECT_EQ(shared_mad_guid, edges.find(shm_dump3->guid())->second.target);
266 EXPECT_EQ(0, edges.find(shm_dump3->guid())->second.importance);
267 EXPECT_TRUE(edges.find(shm_dump3->guid())->second.overridable);
268 EXPECT_EQ(shm_dump4->guid(), edges.find(child4_dump->guid())->second.target);
269 EXPECT_EQ(4, edges.find(child4_dump->guid())->second.importance);
270 EXPECT_TRUE(edges.find(child4_dump->guid())->second.overridable);
271
272 // These should override old edges:
273 pmd->AddOwnershipEdge(child1_dump->guid(), shm_dump1->guid(),
274 1 /* importance */);
275 pmd->AddOwnershipEdge(shm_dump3->guid(), shared_mad_guid, 2 /* importance */);
276 // This should not change the old edges.
277 pmd->AddOverridableOwnershipEdge(child2_dump->guid(), shm_dump2->guid(),
278 0 /* importance */);
279 pmd->AddOwnershipEdge(child4_dump->guid(), shm_dump4->guid(),
280 0 /* importance */);
281
282 EXPECT_EQ(4u, edges.size());
283 EXPECT_EQ(shm_dump1->guid(), edges.find(child1_dump->guid())->second.target);
284 EXPECT_EQ(1, edges.find(child1_dump->guid())->second.importance);
285 EXPECT_FALSE(edges.find(child1_dump->guid())->second.overridable);
286 EXPECT_EQ(shm_dump2->guid(), edges.find(child2_dump->guid())->second.target);
287 EXPECT_EQ(3, edges.find(child2_dump->guid())->second.importance);
288 EXPECT_FALSE(edges.find(child2_dump->guid())->second.overridable);
289 EXPECT_EQ(shared_mad_guid, edges.find(shm_dump3->guid())->second.target);
290 EXPECT_EQ(2, edges.find(shm_dump3->guid())->second.importance);
291 EXPECT_FALSE(edges.find(shm_dump3->guid())->second.overridable);
292 EXPECT_EQ(shm_dump4->guid(), edges.find(child4_dump->guid())->second.target);
293 EXPECT_EQ(4, edges.find(child4_dump->guid())->second.importance);
294 EXPECT_FALSE(edges.find(child4_dump->guid())->second.overridable);
295 }
296
TEST(ProcessMemoryDumpTest,Suballocations)297 TEST(ProcessMemoryDumpTest, Suballocations) {
298 std::unique_ptr<ProcessMemoryDump> pmd(
299 new ProcessMemoryDump(kDetailedDumpArgs));
300 const std::string allocator_dump_name = "fakealloc/allocated_objects";
301 pmd->CreateAllocatorDump(allocator_dump_name);
302
303 // Create one allocation with an auto-assigned guid and mark it as a
304 // suballocation of "fakealloc/allocated_objects".
305 auto* pic1_dump = pmd->CreateAllocatorDump("picturemanager/picture1");
306 pmd->AddSuballocation(pic1_dump->guid(), allocator_dump_name);
307
308 // Same here, but this time create an allocation with an explicit guid.
309 auto* pic2_dump = pmd->CreateAllocatorDump("picturemanager/picture2",
310 MemoryAllocatorDumpGuid(0x42));
311 pmd->AddSuballocation(pic2_dump->guid(), allocator_dump_name);
312
313 // Now check that AddSuballocation() has created anonymous child dumps under
314 // "fakealloc/allocated_objects".
315 auto anon_node_1_it = pmd->allocator_dumps().find(
316 allocator_dump_name + "/__" + pic1_dump->guid().ToString());
317 ASSERT_NE(pmd->allocator_dumps().end(), anon_node_1_it);
318
319 auto anon_node_2_it =
320 pmd->allocator_dumps().find(allocator_dump_name + "/__42");
321 ASSERT_NE(pmd->allocator_dumps().end(), anon_node_2_it);
322
323 // Finally check that AddSuballocation() has created also the
324 // edges between the pictures and the anonymous allocator child dumps.
325 bool found_edge[2]{false, false};
326 for (const auto& e : pmd->allocator_dumps_edges()) {
327 found_edge[0] |= (e.first == pic1_dump->guid() &&
328 e.second.target == anon_node_1_it->second->guid());
329 found_edge[1] |= (e.first == pic2_dump->guid() &&
330 e.second.target == anon_node_2_it->second->guid());
331 }
332 ASSERT_TRUE(found_edge[0]);
333 ASSERT_TRUE(found_edge[1]);
334
335 // Check that calling serialization routines doesn't cause a crash.
336 std::unique_ptr<TracedValue> traced_value(new TracedValue);
337 pmd->SerializeAllocatorDumpsInto(traced_value.get());
338
339 pmd.reset();
340 }
341
TEST(ProcessMemoryDumpTest,GlobalAllocatorDumpTest)342 TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
343 std::unique_ptr<ProcessMemoryDump> pmd(
344 new ProcessMemoryDump(kDetailedDumpArgs));
345 MemoryAllocatorDumpGuid shared_mad_guid(1);
346 auto* shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
347 ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
348 ASSERT_EQ(MemoryAllocatorDump::Flags::kWeak, shared_mad1->flags());
349
350 auto* shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
351 ASSERT_EQ(shared_mad1, shared_mad2);
352 ASSERT_EQ(MemoryAllocatorDump::Flags::kWeak, shared_mad1->flags());
353
354 auto* shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
355 ASSERT_EQ(shared_mad1, shared_mad3);
356 ASSERT_EQ(MemoryAllocatorDump::Flags::kWeak, shared_mad1->flags());
357
358 auto* shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
359 ASSERT_EQ(shared_mad1, shared_mad4);
360 ASSERT_EQ(MemoryAllocatorDump::Flags::kDefault, shared_mad1->flags());
361
362 auto* shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
363 ASSERT_EQ(shared_mad1, shared_mad5);
364 ASSERT_EQ(MemoryAllocatorDump::Flags::kDefault, shared_mad1->flags());
365 }
366
TEST(ProcessMemoryDumpTest,SharedMemoryOwnershipTest)367 TEST(ProcessMemoryDumpTest, SharedMemoryOwnershipTest) {
368 std::unique_ptr<ProcessMemoryDump> pmd(
369 new ProcessMemoryDump(kDetailedDumpArgs));
370 const ProcessMemoryDump::AllocatorDumpEdgesMap& edges =
371 pmd->allocator_dumps_edges();
372
373 auto* client_dump2 = pmd->CreateAllocatorDump("discardable/segment2");
374 auto shm_token2 = UnguessableToken::Create();
375 MemoryAllocatorDumpGuid shm_local_guid2 =
376 pmd->GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shm_token2));
377 MemoryAllocatorDumpGuid shm_global_guid2 =
378 SharedMemoryTracker::GetGlobalDumpIdForTracing(shm_token2);
379 pmd->AddOverridableOwnershipEdge(shm_local_guid2, shm_global_guid2,
380 0 /* importance */);
381
382 pmd->CreateSharedMemoryOwnershipEdge(client_dump2->guid(), shm_token2,
383 1 /* importance */);
384 EXPECT_EQ(2u, edges.size());
385
386 EXPECT_EQ(shm_global_guid2, edges.find(shm_local_guid2)->second.target);
387 EXPECT_EQ(1, edges.find(shm_local_guid2)->second.importance);
388 EXPECT_FALSE(edges.find(shm_local_guid2)->second.overridable);
389 EXPECT_EQ(shm_local_guid2, edges.find(client_dump2->guid())->second.target);
390 EXPECT_EQ(1, edges.find(client_dump2->guid())->second.importance);
391 EXPECT_FALSE(edges.find(client_dump2->guid())->second.overridable);
392 }
393
TEST(ProcessMemoryDumpTest,BackgroundModeTest)394 TEST(ProcessMemoryDumpTest, BackgroundModeTest) {
395 MemoryDumpArgs background_args = {MemoryDumpLevelOfDetail::kBackground};
396 std::unique_ptr<ProcessMemoryDump> pmd(
397 new ProcessMemoryDump(background_args));
398 ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = true;
399 SetAllocatorDumpNameAllowlistForTesting(kTestDumpNameAllowlist);
400 MemoryAllocatorDump* black_hole_mad = pmd->GetBlackHoleMad(std::string());
401
402 // GetAllocatorDump works for uncreated dumps.
403 EXPECT_EQ(nullptr, pmd->GetAllocatorDump("NotAllowlisted/TestName"));
404 EXPECT_EQ(nullptr, pmd->GetAllocatorDump("Allowlisted/TestName"));
405
406 // Invalid dump names.
407 EXPECT_EQ(black_hole_mad,
408 pmd->CreateAllocatorDump("NotAllowlisted/TestName"));
409 EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("TestName"));
410 EXPECT_EQ(black_hole_mad, pmd->CreateAllocatorDump("Allowlisted/Test"));
411 EXPECT_EQ(black_hole_mad,
412 pmd->CreateAllocatorDump("Not/Allowlisted/TestName"));
413 EXPECT_EQ(black_hole_mad,
414 pmd->CreateAllocatorDump("Allowlisted/TestName/Google"));
415 EXPECT_EQ(black_hole_mad,
416 pmd->CreateAllocatorDump("Allowlisted/TestName/0x1a2Google"));
417 EXPECT_EQ(black_hole_mad,
418 pmd->CreateAllocatorDump("Allowlisted/TestName/__12/Google"));
419
420 // Suballocations.
421 MemoryAllocatorDumpGuid guid(1);
422 pmd->AddSuballocation(guid, "malloc/allocated_objects");
423 EXPECT_EQ(0u, pmd->allocator_dumps_edges_.size());
424 EXPECT_EQ(0u, pmd->allocator_dumps_.size());
425
426 // Global dumps.
427 EXPECT_NE(black_hole_mad, pmd->CreateSharedGlobalAllocatorDump(guid));
428 EXPECT_NE(black_hole_mad, pmd->CreateWeakSharedGlobalAllocatorDump(guid));
429 EXPECT_NE(black_hole_mad, pmd->GetSharedGlobalAllocatorDump(guid));
430
431 // Valid dump names.
432 EXPECT_NE(black_hole_mad, pmd->CreateAllocatorDump("Allowlisted/TestName"));
433 EXPECT_NE(black_hole_mad,
434 pmd->CreateAllocatorDump("Allowlisted/TestName_0xA1b2"));
435 EXPECT_NE(black_hole_mad,
436 pmd->CreateAllocatorDump("Allowlisted/0xaB/TestName"));
437
438 // GetAllocatorDump is consistent.
439 EXPECT_EQ(nullptr, pmd->GetAllocatorDump("NotAllowlisted/TestName"));
440 EXPECT_NE(black_hole_mad, pmd->GetAllocatorDump("Allowlisted/TestName"));
441
442 // Test allowed entries.
443 ASSERT_TRUE(IsMemoryAllocatorDumpNameInAllowlist("Allowlisted/TestName"));
444
445 // Global dumps should be allowed.
446 ASSERT_TRUE(IsMemoryAllocatorDumpNameInAllowlist("global/13456"));
447
448 // Global dumps with non-guids should not be.
449 ASSERT_FALSE(IsMemoryAllocatorDumpNameInAllowlist("global/random"));
450
451 // Random names should not.
452 ASSERT_FALSE(IsMemoryAllocatorDumpNameInAllowlist("NotAllowlisted/TestName"));
453
454 // Check hex processing.
455 ASSERT_TRUE(IsMemoryAllocatorDumpNameInAllowlist("Allowlisted/0xA1b2"));
456 }
457
TEST(ProcessMemoryDumpTest,GuidsTest)458 TEST(ProcessMemoryDumpTest, GuidsTest) {
459 MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::kDetailed};
460
461 const auto process_token_one = UnguessableToken::Create();
462 const auto process_token_two = UnguessableToken::Create();
463
464 ProcessMemoryDump pmd1(dump_args);
465 pmd1.set_process_token_for_testing(process_token_one);
466 MemoryAllocatorDump* mad1 = pmd1.CreateAllocatorDump("foo");
467
468 ProcessMemoryDump pmd2(dump_args);
469 pmd2.set_process_token_for_testing(process_token_one);
470 MemoryAllocatorDump* mad2 = pmd2.CreateAllocatorDump("foo");
471
472 // If we don't pass the argument we get a random PMD:
473 ProcessMemoryDump pmd3(dump_args);
474 MemoryAllocatorDump* mad3 = pmd3.CreateAllocatorDump("foo");
475
476 // PMD's for different processes produce different GUIDs even for the same
477 // names:
478 ProcessMemoryDump pmd4(dump_args);
479 pmd4.set_process_token_for_testing(process_token_two);
480 MemoryAllocatorDump* mad4 = pmd4.CreateAllocatorDump("foo");
481
482 ASSERT_EQ(mad1->guid(), mad2->guid());
483
484 ASSERT_NE(mad2->guid(), mad3->guid());
485 ASSERT_NE(mad3->guid(), mad4->guid());
486 ASSERT_NE(mad4->guid(), mad2->guid());
487
488 ASSERT_EQ(mad1->guid(), pmd1.GetDumpId("foo"));
489 }
490
491 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
492 #if BUILDFLAG(IS_FUCHSIA)
493 // TODO(crbug.com/42050620): Counting resident bytes is not supported on
494 // Fuchsia.
495 #define MAYBE_CountResidentBytes DISABLED_CountResidentBytes
496 #else
497 #define MAYBE_CountResidentBytes CountResidentBytes
498 #endif
TEST(ProcessMemoryDumpTest,MAYBE_CountResidentBytes)499 TEST(ProcessMemoryDumpTest, MAYBE_CountResidentBytes) {
500 const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
501
502 // Allocate few page of dirty memory and check if it is resident.
503 const size_t size1 = 5 * page_size;
504 void* memory1 = Map(size1);
505 memset(memory1, 0, size1);
506 std::optional<size_t> res1 =
507 ProcessMemoryDump::CountResidentBytes(memory1, size1);
508 ASSERT_TRUE(res1.has_value());
509 ASSERT_EQ(res1.value(), size1);
510 Unmap(memory1, size1);
511
512 // Allocate a large memory segment (> 8Mib).
513 const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
514 void* memory2 = Map(kVeryLargeMemorySize);
515 memset(memory2, 0, kVeryLargeMemorySize);
516 std::optional<size_t> res2 =
517 ProcessMemoryDump::CountResidentBytes(memory2, kVeryLargeMemorySize);
518 ASSERT_TRUE(res2.has_value());
519 ASSERT_EQ(res2.value(), kVeryLargeMemorySize);
520 Unmap(memory2, kVeryLargeMemorySize);
521 }
522
523 #if BUILDFLAG(IS_FUCHSIA)
524 // TODO(crbug.com/42050620): Counting resident bytes is not supported on
525 // Fuchsia.
526 #define MAYBE_CountResidentBytesInSharedMemory \
527 DISABLED_CountResidentBytesInSharedMemory
528 #else
529 #define MAYBE_CountResidentBytesInSharedMemory CountResidentBytesInSharedMemory
530 #endif
TEST(ProcessMemoryDumpTest,MAYBE_CountResidentBytesInSharedMemory)531 TEST(ProcessMemoryDumpTest, MAYBE_CountResidentBytesInSharedMemory) {
532 const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
533
534 // Allocate few page of dirty memory and check if it is resident.
535 {
536 const size_t kDirtyMemorySize = 5 * page_size;
537 auto region = base::WritableSharedMemoryRegion::Create(kDirtyMemorySize);
538 base::WritableSharedMemoryMapping mapping = region.Map();
539 base::span<uint8_t> mapping_mem(mapping);
540 std::ranges::fill(mapping_mem, 0u);
541 std::optional<size_t> res1 = CountResidentBytesInSharedMemory(mapping);
542 ASSERT_TRUE(res1.has_value());
543 ASSERT_EQ(res1.value(), kDirtyMemorySize);
544 }
545
546 // Allocate a shared memory segment but map at a non-page-aligned offset.
547 {
548 const size_t kDirtyMemorySize = 5 * page_size;
549 auto region =
550 base::WritableSharedMemoryRegion::Create(kDirtyMemorySize + page_size);
551 base::WritableSharedMemoryMapping mapping =
552 region.MapAt(page_size / 2, kDirtyMemorySize);
553 base::span<uint8_t> mapping_mem(mapping);
554 std::ranges::fill(mapping_mem, 0u);
555 std::optional<size_t> res1 = CountResidentBytesInSharedMemory(mapping);
556 ASSERT_TRUE(res1.has_value());
557 ASSERT_EQ(res1.value(), kDirtyMemorySize + page_size);
558 }
559
560 // Allocate a large memory segment (> 8Mib).
561 {
562 const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
563 auto region =
564 base::WritableSharedMemoryRegion::Create(kVeryLargeMemorySize);
565 base::WritableSharedMemoryMapping mapping = region.Map();
566 base::span<uint8_t> mapping_mem(mapping);
567 std::ranges::fill(mapping_mem, 0u);
568 std::optional<size_t> res2 = CountResidentBytesInSharedMemory(mapping);
569 ASSERT_TRUE(res2.has_value());
570 ASSERT_EQ(res2.value(), kVeryLargeMemorySize);
571 }
572
573 // Allocate a large memory segment, but touch about half of all pages.
574 {
575 const size_t kTouchedMemorySize = 7 * 1024 * 1024;
576 auto region = base::WritableSharedMemoryRegion::Create(kTouchedMemorySize);
577 base::WritableSharedMemoryMapping mapping = region.Map();
578 base::span<uint8_t> mapping_mem(mapping);
579 std::ranges::fill(mapping_mem, 0u);
580 std::optional<size_t> res3 = CountResidentBytesInSharedMemory(mapping);
581 ASSERT_TRUE(res3.has_value());
582 ASSERT_EQ(res3.value(), kTouchedMemorySize);
583 }
584 }
585 #endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
586
587 } // namespace base::trace_event
588