1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <atomic>
18 #include <string>
19 #include <vector>
20
21 #include <fcntl.h>
22 #include <string.h>
23 #include <sys/stat.h>
24 #include <sys/types.h>
25 #include <sys/wait.h>
26 #include <unistd.h>
27
28 #include "perfetto/base/build_config.h"
29 #include "perfetto/base/logging.h"
30 #include "perfetto/ext/base/file_utils.h"
31 #include "perfetto/ext/base/optional.h"
32 #include "perfetto/ext/base/pipe.h"
33 #include "perfetto/ext/base/string_utils.h"
34 #include "perfetto/ext/base/subprocess.h"
35 #include "perfetto/ext/tracing/ipc/default_socket.h"
36 #include "perfetto/heap_profile.h"
37 #include "perfetto/trace_processor/trace_processor.h"
38 #include "protos/perfetto/trace/trace.gen.h"
39 #include "protos/perfetto/trace/trace.pbzero.h"
40 #include "src/base/test/test_task_runner.h"
41 #include "src/profiling/memory/heapprofd_producer.h"
42 #include "test/gtest_and_gmock.h"
43 #include "test/test_helper.h"
44
45 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
46 #include <sys/system_properties.h>
47 #endif
48
49 #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
50 #include "protos/perfetto/trace/interned_data/interned_data.gen.h"
51 #include "protos/perfetto/trace/profiling/profile_common.gen.h"
52 #include "protos/perfetto/trace/profiling/profile_packet.gen.h"
53
54 namespace perfetto {
55 namespace profiling {
56 namespace {
57
58 constexpr useconds_t kMsToUs = 1000;
59
60 constexpr auto kTracingDisabledTimeoutMs = 30000;
61 constexpr auto kWaitForReadDataTimeoutMs = 10000;
62 constexpr size_t kStartupAllocSize = 10;
63 constexpr size_t kFirstIterationBytes = 5;
64 constexpr size_t kSecondIterationBytes = 7;
65
66 enum class TestMode { kCentral, kStatic };
67 enum class AllocatorMode { kMalloc, kCustom };
68
69 using ::testing::AllOf;
70 using ::testing::AnyOf;
71 using ::testing::Bool;
72 using ::testing::Contains;
73 using ::testing::Eq;
74 using ::testing::Field;
75 using ::testing::HasSubstr;
76 using ::testing::Values;
77
78 constexpr const char* kOnlyFlamegraph =
79 "SELECT id, name, map_name, count, cumulative_count, size, "
80 "cumulative_size, "
81 "alloc_count, cumulative_alloc_count, alloc_size, cumulative_alloc_size, "
82 "parent_id "
83 "FROM experimental_flamegraph WHERE "
84 "(ts, upid) IN (SELECT distinct ts, upid from heap_profile_allocation) AND "
85 "profile_type = 'native' order by abs(cumulative_size) desc;";
86
87 struct FlamegraphNode {
88 int64_t id;
89 std::string name;
90 std::string map_name;
91 int64_t count;
92 int64_t cumulative_count;
93 int64_t size;
94 int64_t cumulative_size;
95 int64_t alloc_count;
96 int64_t cumulative_alloc_count;
97 int64_t alloc_size;
98 int64_t cumulative_alloc_size;
99 base::Optional<int64_t> parent_id;
100 };
101
GetFlamegraph(trace_processor::TraceProcessor * tp)102 std::vector<FlamegraphNode> GetFlamegraph(trace_processor::TraceProcessor* tp) {
103 std::vector<FlamegraphNode> result;
104 auto it = tp->ExecuteQuery(kOnlyFlamegraph);
105 while (it.Next()) {
106 result.push_back({
107 it.Get(0).AsLong(),
108 it.Get(1).AsString(),
109 it.Get(2).AsString(),
110 it.Get(3).AsLong(),
111 it.Get(4).AsLong(),
112 it.Get(5).AsLong(),
113 it.Get(6).AsLong(),
114 it.Get(7).AsLong(),
115 it.Get(8).AsLong(),
116 it.Get(9).AsLong(),
117 it.Get(10).AsLong(),
118 it.Get(11).is_null() ? base::nullopt
119 : base::Optional<int64_t>(it.Get(11).AsLong()),
120 });
121 }
122 PERFETTO_CHECK(it.Status().ok());
123 return result;
124 }
125
AllocatorName(AllocatorMode mode)126 std::string AllocatorName(AllocatorMode mode) {
127 switch (mode) {
128 case AllocatorMode::kMalloc:
129 return "libc.malloc";
130 case AllocatorMode::kCustom:
131 return "test";
132 }
133 }
134
AllocatorModeFromNameOrDie(std::string s)135 AllocatorMode AllocatorModeFromNameOrDie(std::string s) {
136 if (s == "libc.malloc")
137 return AllocatorMode::kMalloc;
138 if (s == "test")
139 return AllocatorMode::kCustom;
140 PERFETTO_FATAL("Invalid allocator mode [malloc | test]: %s", s.c_str());
141 }
142
ContinuousDump(HeapprofdConfig * cfg)143 void ContinuousDump(HeapprofdConfig* cfg) {
144 auto* cont_config = cfg->mutable_continuous_dump_config();
145 cont_config->set_dump_phase_ms(0);
146 cont_config->set_dump_interval_ms(100);
147 }
148
149 template <typename F>
MakeTraceConfig(F fn)150 TraceConfig MakeTraceConfig(F fn) {
151 TraceConfig trace_config;
152 trace_config.add_buffers()->set_size_kb(10 * 1024);
153 trace_config.set_duration_ms(2000);
154 trace_config.set_data_source_stop_timeout_ms(10000);
155
156 auto* ds_config = trace_config.add_data_sources()->mutable_config();
157 ds_config->set_name("android.heapprofd");
158 ds_config->set_target_buffer(0);
159
160 protos::gen::HeapprofdConfig heapprofd_config;
161 fn(&heapprofd_config);
162 ds_config->set_heapprofd_config_raw(heapprofd_config.SerializeAsString());
163 return trace_config;
164 }
165
CustomAllocateAndFree(size_t bytes)166 void CustomAllocateAndFree(size_t bytes) {
167 static uint32_t heap_id = AHeapProfile_registerHeap(AHeapInfo_create("test"));
168 AHeapProfile_reportAllocation(heap_id, 0x1234abc, bytes);
169 AHeapProfile_reportFree(heap_id, 0x1234abc);
170 }
171
SecondaryAllocAndFree(size_t bytes)172 void SecondaryAllocAndFree(size_t bytes) {
173 static uint32_t heap_id =
174 AHeapProfile_registerHeap(AHeapInfo_create("secondary"));
175 AHeapProfile_reportAllocation(heap_id, 0x1234abc, bytes);
176 AHeapProfile_reportFree(heap_id, 0x1234abc);
177 }
178
AllocateAndFree(size_t bytes)179 void AllocateAndFree(size_t bytes) {
180 // This volatile is needed to prevent the compiler from trying to be
181 // helpful and compiling a "useless" malloc + free into a noop.
182 volatile char* x = static_cast<char*>(malloc(bytes));
183 if (x) {
184 if (bytes > 0)
185 x[0] = 'x';
186 free(const_cast<char*>(x));
187 }
188 }
189
DoAllocation(AllocatorMode mode,size_t bytes)190 void DoAllocation(AllocatorMode mode, size_t bytes) {
191 switch (mode) {
192 case AllocatorMode::kMalloc:
193 AllocateAndFree(bytes);
194 break;
195 case AllocatorMode::kCustom:
196 // We need to run malloc(0) even if we want to test the custom allocator,
197 // as the init mechanism assumes the application uses malloc.
198 AllocateAndFree(1);
199 CustomAllocateAndFree(bytes);
200 break;
201 }
202 }
203
ContinuousMalloc(AllocatorMode mode,size_t primary_bytes,size_t secondary_bytes,ssize_t max_iter=-1)204 void ContinuousMalloc(AllocatorMode mode,
205 size_t primary_bytes,
206 size_t secondary_bytes,
207 ssize_t max_iter = -1) {
208 for (ssize_t i = 0; max_iter == -1 || i < max_iter; ++i) {
209 DoAllocation(mode, primary_bytes);
210 if (secondary_bytes)
211 SecondaryAllocAndFree(secondary_bytes);
212 usleep(10 * kMsToUs);
213 }
214 }
215
StartAndWaitForHandshake(base::Subprocess * child)216 void StartAndWaitForHandshake(base::Subprocess* child) {
217 // We cannot use base::Pipe because that assumes we want CLOEXEC.
218 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
219 int ready_pipe[2];
220 PERFETTO_CHECK(pipe(ready_pipe) == 0); // NOLINT(android-cloexec-pipe)
221
222 int ready_pipe_rd = ready_pipe[0];
223 int ready_pipe_wr = ready_pipe[1];
224 child->args.preserve_fds.push_back(ready_pipe_wr);
225 child->args.env.push_back("HEAPPROFD_TESTING_READY_PIPE=" +
226 std::to_string(ready_pipe_wr));
227 child->Start();
228 close(ready_pipe_wr);
229 // Wait for libc to initialize the signal handler. If we signal before the
230 // handler is installed, we can kill the process.
231 char buf[1];
232 PERFETTO_CHECK(PERFETTO_EINTR(read(ready_pipe_rd, buf, sizeof(buf))) == 0);
233 close(ready_pipe_rd);
234 }
235
ChildFinishHandshake()236 void ChildFinishHandshake() {
237 const char* ready_pipe = getenv("HEAPPROFD_TESTING_READY_PIPE");
238 if (ready_pipe != nullptr) {
239 close(static_cast<int>(base::StringToInt64(ready_pipe).value()));
240 }
241 }
242
ForkContinuousAlloc(AllocatorMode mode,size_t primary_bytes,size_t secondary_bytes=0,ssize_t max_iter=-1)243 base::Subprocess ForkContinuousAlloc(AllocatorMode mode,
244 size_t primary_bytes,
245 size_t secondary_bytes = 0,
246 ssize_t max_iter = -1) {
247 base::Subprocess child({"/proc/self/exe"});
248 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
249 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
250 AllocatorName(mode));
251 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
252 std::to_string(primary_bytes));
253 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
254 std::to_string(secondary_bytes));
255 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG3=" +
256 std::to_string(max_iter));
257
258 StartAndWaitForHandshake(&child);
259 return child;
260 }
261
RunContinuousMalloc()262 void __attribute__((constructor(1024))) RunContinuousMalloc() {
263 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG0");
264 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG1");
265 const char* a2 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG2");
266 const char* a3 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG3");
267 if (a0 == nullptr)
268 return;
269
270 AllocatorMode arg0 = AllocatorModeFromNameOrDie(a0);
271 uint32_t arg1 = a1 ? base::StringToUInt32(a1).value() : 0;
272 uint32_t arg2 = a2 ? base::StringToUInt32(a2).value() : 0;
273 int32_t arg3 = a3 ? base::StringToInt32(a3).value() : -1;
274
275 ChildFinishHandshake();
276
277 ContinuousMalloc(arg0, arg1, arg2, arg3);
278 exit(0);
279 }
280
RunAccurateMalloc()281 void __attribute__((constructor(1024))) RunAccurateMalloc() {
282 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC");
283 if (a0 == nullptr)
284 return;
285
286 static std::atomic<bool> initialized{false};
287 static uint32_t heap_id =
288 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
289 AHeapInfo_create("test"),
290 [](void*, const AHeapProfileEnableCallbackInfo*) {
291 initialized = true;
292 },
293 nullptr));
294
295 ChildFinishHandshake();
296
297 // heapprofd_client needs malloc to see the signal.
298 while (!initialized)
299 AllocateAndFree(1);
300 // We call the callback before setting enabled=true on the heap, so we
301 // wait a bit for the assignment to happen.
302 usleep(100000);
303 if (!AHeapProfile_reportAllocation(heap_id, 0x1, 10u))
304 PERFETTO_FATAL("Expected allocation to be sampled.");
305 AHeapProfile_reportFree(heap_id, 0x1);
306 if (!AHeapProfile_reportAllocation(heap_id, 0x2, 15u))
307 PERFETTO_FATAL("Expected allocation to be sampled.");
308 if (!AHeapProfile_reportAllocation(heap_id, 0x3, 15u))
309 PERFETTO_FATAL("Expected allocation to be sampled.");
310 AHeapProfile_reportFree(heap_id, 0x2);
311
312 // Wait around so we can verify it did't crash.
313 for (;;) {
314 }
315 }
316
RunAccurateMallocWithVforkCommon()317 void __attribute__((noreturn)) RunAccurateMallocWithVforkCommon() {
318 static std::atomic<bool> initialized{false};
319 static uint32_t heap_id =
320 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
321 AHeapInfo_create("test"),
322 [](void*, const AHeapProfileEnableCallbackInfo*) {
323 initialized = true;
324 },
325 nullptr));
326
327 ChildFinishHandshake();
328
329 // heapprofd_client needs malloc to see the signal.
330 while (!initialized)
331 AllocateAndFree(1);
332 // We call the callback before setting enabled=true on the heap, so we
333 // wait a bit for the assignment to happen.
334 usleep(100000);
335 if (!AHeapProfile_reportAllocation(heap_id, 0x1, 10u))
336 PERFETTO_FATAL("Expected allocation to be sampled.");
337 AHeapProfile_reportFree(heap_id, 0x1);
338 pid_t pid = vfork();
339 PERFETTO_CHECK(pid != -1);
340 if (pid == 0) {
341 AHeapProfile_reportAllocation(heap_id, 0x2, 15u);
342 AHeapProfile_reportAllocation(heap_id, 0x3, 15u);
343 exit(0);
344 }
345 if (!AHeapProfile_reportAllocation(heap_id, 0x2, 15u))
346 PERFETTO_FATAL("Expected allocation to be sampled.");
347 if (!AHeapProfile_reportAllocation(heap_id, 0x3, 15u))
348 PERFETTO_FATAL("Expected allocation to be sampled.");
349 AHeapProfile_reportFree(heap_id, 0x2);
350
351 // Wait around so we can verify it did't crash.
352 for (;;) {
353 }
354 }
355
RunAccurateSample()356 void __attribute__((constructor(1024))) RunAccurateSample() {
357 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_SAMPLE");
358 if (a0 == nullptr)
359 return;
360
361 static std::atomic<bool> initialized{false};
362 static uint32_t heap_id =
363 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
364 AHeapInfo_create("test"),
365 [](void*, const AHeapProfileEnableCallbackInfo*) {
366 initialized = true;
367 },
368 nullptr));
369
370 ChildFinishHandshake();
371
372 // heapprofd_client needs malloc to see the signal.
373 while (!initialized)
374 AllocateAndFree(1);
375 // We call the callback before setting enabled=true on the heap, so we
376 // wait a bit for the assignment to happen.
377 usleep(100000);
378 if (!AHeapProfile_reportSample(heap_id, 0x1, 10u))
379 PERFETTO_FATAL("Expected allocation to be sampled.");
380 AHeapProfile_reportFree(heap_id, 0x1);
381 if (!AHeapProfile_reportSample(heap_id, 0x2, 15u))
382 PERFETTO_FATAL("Expected allocation to be sampled.");
383 if (!AHeapProfile_reportSample(heap_id, 0x3, 15u))
384 PERFETTO_FATAL("Expected allocation to be sampled.");
385 AHeapProfile_reportFree(heap_id, 0x2);
386
387 // Wait around so we can verify it did't crash.
388 for (;;) {
389 }
390 }
391
RunAccurateMallocWithVfork()392 void __attribute__((constructor(1024))) RunAccurateMallocWithVfork() {
393 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK");
394 if (a0 == nullptr)
395 return;
396 RunAccurateMallocWithVforkCommon();
397 }
398
RunAccurateMallocWithVforkThread()399 void __attribute__((constructor(1024))) RunAccurateMallocWithVforkThread() {
400 const char* a0 =
401 getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK_THREAD");
402 if (a0 == nullptr)
403 return;
404 std::thread th(RunAccurateMallocWithVforkCommon);
405 th.join();
406 }
407
RunReInit()408 void __attribute__((constructor(1024))) RunReInit() {
409 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG0");
410 if (a0 == nullptr)
411 return;
412
413 AllocatorMode mode = AllocatorModeFromNameOrDie(a0);
414 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG1");
415 const char* a2 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG2");
416 PERFETTO_CHECK(a1 != nullptr && a2 != nullptr);
417 int signal_pipe_rd = static_cast<int>(base::StringToInt64(a1).value());
418 int ack_pipe_wr = static_cast<int>(base::StringToInt64(a2).value());
419
420 ChildFinishHandshake();
421
422 size_t bytes = kFirstIterationBytes;
423 bool signalled = false;
424 for (;;) {
425 DoAllocation(mode, bytes);
426 char buf[1];
427 if (!signalled && read(signal_pipe_rd, buf, sizeof(buf)) == 1) {
428 signalled = true;
429 close(signal_pipe_rd);
430
431 // make sure the client has noticed that the session has stopped
432 DoAllocation(mode, bytes);
433
434 bytes = kSecondIterationBytes;
435 PERFETTO_CHECK(PERFETTO_EINTR(write(ack_pipe_wr, "1", 1)) == 1);
436 close(ack_pipe_wr);
437 }
438 usleep(10 * kMsToUs);
439 }
440 PERFETTO_FATAL("Should be unreachable");
441 }
442
RunCustomLifetime()443 void __attribute__((constructor(1024))) RunCustomLifetime() {
444 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_LIFETIME_ARG0");
445 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_LIFETIME_ARG1");
446 if (a0 == nullptr)
447 return;
448 uint64_t arg0 = a0 ? base::StringToUInt64(a0).value() : 0;
449 uint64_t arg1 = a0 ? base::StringToUInt64(a1).value() : 0;
450
451 PERFETTO_CHECK(arg1);
452
453 static std::atomic<bool> initialized{false};
454 static std::atomic<bool> disabled{false};
455 static std::atomic<uint64_t> sampling_interval;
456
457 auto enabled_callback = [](void*,
458 const AHeapProfileEnableCallbackInfo* info) {
459 sampling_interval =
460 AHeapProfileEnableCallbackInfo_getSamplingInterval(info);
461 initialized = true;
462 };
463 auto disabled_callback = [](void*, const AHeapProfileDisableCallbackInfo*) {
464 disabled = true;
465 };
466 static uint32_t heap_id =
467 AHeapProfile_registerHeap(AHeapInfo_setDisabledCallback(
468 AHeapInfo_setEnabledCallback(AHeapInfo_create("test"),
469 enabled_callback, nullptr),
470 disabled_callback, nullptr));
471
472 ChildFinishHandshake();
473
474 // heapprofd_client needs malloc to see the signal.
475 while (!initialized)
476 AllocateAndFree(1);
477
478 if (sampling_interval.load() != arg0) {
479 PERFETTO_FATAL("%" PRIu64 " != %" PRIu64, sampling_interval.load(), arg0);
480 }
481
482 while (!disabled)
483 AHeapProfile_reportFree(heap_id, 0x2);
484
485 char x = 'x';
486 PERFETTO_CHECK(base::WriteAll(static_cast<int>(arg1), &x, sizeof(x)) == 1);
487 close(static_cast<int>(arg1));
488
489 // Wait around so we can verify it didn't crash.
490 for (;;) {
491 }
492 }
493
494 class TraceProcessorTestHelper : public TestHelper {
495 public:
TraceProcessorTestHelper(base::TestTaskRunner * task_runner)496 explicit TraceProcessorTestHelper(base::TestTaskRunner* task_runner)
497 : TestHelper(task_runner),
498 tp_(trace_processor::TraceProcessor::CreateInstance({})) {}
499
ReadTraceData(std::vector<TracePacket> packets)500 void ReadTraceData(std::vector<TracePacket> packets) override {
501 for (auto& packet : packets) {
502 auto preamble = packet.GetProtoPreamble();
503 std::string payload = packet.GetRawBytesForTesting();
504 char* preamble_payload = std::get<0>(preamble);
505 size_t preamble_size = std::get<1>(preamble);
506 size_t buf_size = preamble_size + payload.size();
507 std::unique_ptr<uint8_t[]> buf =
508 std::unique_ptr<uint8_t[]>(new uint8_t[buf_size]);
509 memcpy(&buf[0], preamble_payload, preamble_size);
510 memcpy(&buf[preamble_size], payload.data(), payload.size());
511 PERFETTO_CHECK(tp_->Parse(std::move(buf), buf_size).ok());
512 }
513 TestHelper::ReadTraceData(std::move(packets));
514 }
515
tp()516 trace_processor::TraceProcessor& tp() { return *tp_; }
517
518 private:
519 std::unique_ptr<trace_processor::TraceProcessor> tp_;
520 };
521
GetHelper(base::TestTaskRunner * task_runner)522 std::unique_ptr<TraceProcessorTestHelper> GetHelper(
523 base::TestTaskRunner* task_runner) {
524 std::unique_ptr<TraceProcessorTestHelper> helper(
525 new TraceProcessorTestHelper(task_runner));
526 helper->StartServiceIfRequired();
527
528 helper->ConnectConsumer();
529 helper->WaitForConsumerConnect();
530 return helper;
531 }
532
ReadAndWait(TraceProcessorTestHelper * helper)533 void ReadAndWait(TraceProcessorTestHelper* helper) {
534 helper->WaitForTracingDisabled(kTracingDisabledTimeoutMs);
535 helper->ReadData();
536 helper->WaitForReadData(0, kWaitForReadDataTimeoutMs);
537 helper->tp().NotifyEndOfFile();
538 }
539
ToTraceString(const std::vector<protos::gen::TracePacket> & packets)540 std::string ToTraceString(
541 const std::vector<protos::gen::TracePacket>& packets) {
542 protos::gen::Trace trace;
543 for (const protos::gen::TracePacket& packet : packets) {
544 *trace.add_packet() = packet;
545 }
546 return trace.SerializeAsString();
547 }
548
549 #define WRITE_TRACE(trace) \
550 do { \
551 WriteTrace(trace, __FILE__, __LINE__); \
552 } while (0)
553
FormatHistogram(const protos::gen::ProfilePacket_Histogram & hist)554 std::string FormatHistogram(const protos::gen::ProfilePacket_Histogram& hist) {
555 std::string out;
556 std::string prev_upper_limit = "-inf";
557 for (const auto& bucket : hist.buckets()) {
558 std::string upper_limit;
559 if (bucket.max_bucket())
560 upper_limit = "inf";
561 else
562 upper_limit = std::to_string(bucket.upper_limit());
563
564 out += "[" + prev_upper_limit + ", " + upper_limit +
565 "]: " + std::to_string(bucket.count()) + "; ";
566 prev_upper_limit = std::move(upper_limit);
567 }
568 return out + "\n";
569 }
570
FormatStats(const protos::gen::ProfilePacket_ProcessStats & stats)571 std::string FormatStats(const protos::gen::ProfilePacket_ProcessStats& stats) {
572 return std::string("unwinding_errors: ") +
573 std::to_string(stats.unwinding_errors()) + "\n" +
574 "heap_samples: " + std::to_string(stats.heap_samples()) + "\n" +
575 "map_reparses: " + std::to_string(stats.map_reparses()) + "\n" +
576 "unwinding_time_us: " + FormatHistogram(stats.unwinding_time_us());
577 }
578
Suffix(const std::tuple<TestMode,AllocatorMode> & param)579 std::string Suffix(const std::tuple<TestMode, AllocatorMode>& param) {
580 TestMode tm = std::get<0>(param);
581 AllocatorMode am = std::get<1>(param);
582
583 std::string result;
584 switch (tm) {
585 case TestMode::kCentral:
586 result += "CentralMode";
587 break;
588 case TestMode::kStatic:
589 result += "StaticMode";
590 break;
591 }
592 switch (am) {
593 case AllocatorMode::kMalloc:
594 result += "Malloc";
595 break;
596 case AllocatorMode::kCustom:
597 result += "Custom";
598 break;
599 }
600 return result;
601 }
602
TestSuffix(const::testing::TestParamInfo<std::tuple<TestMode,AllocatorMode>> & info)603 __attribute__((unused)) std::string TestSuffix(
604 const ::testing::TestParamInfo<std::tuple<TestMode, AllocatorMode>>& info) {
605 return Suffix(info.param);
606 }
607
608 class HeapprofdEndToEnd
609 : public ::testing::TestWithParam<std::tuple<TestMode, AllocatorMode>> {
610 protected:
611 base::TestTaskRunner task_runner;
612
test_mode()613 TestMode test_mode() { return std::get<0>(GetParam()); }
allocator_mode()614 AllocatorMode allocator_mode() { return std::get<1>(GetParam()); }
allocator_name()615 std::string allocator_name() { return AllocatorName(allocator_mode()); }
616
WriteTrace(const std::vector<protos::gen::TracePacket> & packets,const char * filename,uint64_t lineno)617 void WriteTrace(const std::vector<protos::gen::TracePacket>& packets,
618 const char* filename,
619 uint64_t lineno) {
620 const char* outdir = getenv("HEAPPROFD_TEST_PROFILE_OUT");
621 if (!outdir)
622 return;
623 const std::string fq_filename =
624 std::string(outdir) + "/" + basename(filename) + ":" +
625 std::to_string(lineno) + "_" + Suffix(GetParam());
626 base::ScopedFile fd(base::OpenFile(fq_filename, O_WRONLY | O_CREAT, 0666));
627 PERFETTO_CHECK(*fd);
628 std::string trace_string = ToTraceString(packets);
629 PERFETTO_CHECK(
630 base::WriteAll(*fd, trace_string.data(), trace_string.size()) >= 0);
631 }
632
Trace(const TraceConfig & trace_config)633 std::unique_ptr<TraceProcessorTestHelper> Trace(
634 const TraceConfig& trace_config) {
635 auto helper = GetHelper(&task_runner);
636
637 helper->StartTracing(trace_config);
638
639 ReadAndWait(helper.get());
640 return helper;
641 }
642
GetUnwindingErrors(TraceProcessorTestHelper * helper)643 std::vector<std::string> GetUnwindingErrors(
644 TraceProcessorTestHelper* helper) {
645 std::vector<std::string> out;
646 const auto& packets = helper->trace();
647 for (const protos::gen::TracePacket& packet : packets) {
648 for (const protos::gen::InternedString& fn :
649 packet.interned_data().function_names()) {
650 if (fn.str().find("ERROR ") == 0) {
651 out.push_back(fn.str());
652 }
653 }
654 }
655 return out;
656 }
657
PrintStats(TraceProcessorTestHelper * helper)658 void PrintStats(TraceProcessorTestHelper* helper) {
659 const auto& packets = helper->trace();
660 for (const protos::gen::TracePacket& packet : packets) {
661 for (const auto& dump : packet.profile_packet().process_dumps()) {
662 // protobuf uint64 does not like the PRIu64 formatter.
663 PERFETTO_LOG("Stats for %s: %s", std::to_string(dump.pid()).c_str(),
664 FormatStats(dump.stats()).c_str());
665 }
666 }
667 std::vector<std::string> errors = GetUnwindingErrors(helper);
668 for (const std::string& err : errors) {
669 PERFETTO_LOG("Unwinding error: %s", err.c_str());
670 }
671 }
672
ValidateSampleSizes(TraceProcessorTestHelper * helper,uint64_t pid,uint64_t alloc_size,const std::string & heap_name="")673 void ValidateSampleSizes(TraceProcessorTestHelper* helper,
674 uint64_t pid,
675 uint64_t alloc_size,
676 const std::string& heap_name = "") {
677 const auto& packets = helper->trace();
678 for (const protos::gen::TracePacket& packet : packets) {
679 for (const auto& dump : packet.profile_packet().process_dumps()) {
680 if (dump.pid() != pid ||
681 (!heap_name.empty() && heap_name != dump.heap_name())) {
682 continue;
683 }
684 for (const auto& sample : dump.samples()) {
685 EXPECT_EQ(sample.self_allocated() % alloc_size, 0u);
686 EXPECT_EQ(sample.self_freed() % alloc_size, 0u);
687 EXPECT_THAT(sample.self_allocated() - sample.self_freed(),
688 AnyOf(Eq(0u), Eq(alloc_size)));
689 }
690 }
691 }
692 }
693
ValidateFromStartup(TraceProcessorTestHelper * helper,uint64_t pid,bool from_startup)694 void ValidateFromStartup(TraceProcessorTestHelper* helper,
695 uint64_t pid,
696 bool from_startup) {
697 const auto& packets = helper->trace();
698 for (const protos::gen::TracePacket& packet : packets) {
699 for (const auto& dump : packet.profile_packet().process_dumps()) {
700 if (dump.pid() != pid)
701 continue;
702 EXPECT_EQ(dump.from_startup(), from_startup);
703 }
704 }
705 }
706
ValidateRejectedConcurrent(TraceProcessorTestHelper * helper,uint64_t pid,bool rejected_concurrent)707 void ValidateRejectedConcurrent(TraceProcessorTestHelper* helper,
708 uint64_t pid,
709 bool rejected_concurrent) {
710 const auto& packets = helper->trace();
711 for (const protos::gen::TracePacket& packet : packets) {
712 for (const auto& dump : packet.profile_packet().process_dumps()) {
713 if (dump.pid() != pid)
714 continue;
715 EXPECT_EQ(dump.rejected_concurrent(), rejected_concurrent);
716 }
717 }
718 }
719
ValidateNoSamples(TraceProcessorTestHelper * helper,uint64_t pid)720 void ValidateNoSamples(TraceProcessorTestHelper* helper, uint64_t pid) {
721 const auto& packets = helper->trace();
722 size_t samples = 0;
723 for (const protos::gen::TracePacket& packet : packets) {
724 for (const auto& dump : packet.profile_packet().process_dumps()) {
725 if (dump.pid() != pid)
726 continue;
727 samples += dump.samples().size();
728 }
729 }
730 EXPECT_EQ(samples, 0u);
731 }
732
ValidateHasSamples(TraceProcessorTestHelper * helper,uint64_t pid,const std::string & heap_name,uint64_t sampling_interval)733 void ValidateHasSamples(TraceProcessorTestHelper* helper,
734 uint64_t pid,
735 const std::string& heap_name,
736 uint64_t sampling_interval) {
737 const auto& packets = helper->trace();
738 ASSERT_GT(packets.size(), 0u);
739 size_t profile_packets = 0;
740 size_t samples = 0;
741 uint64_t last_allocated = 0;
742 uint64_t last_freed = 0;
743 for (const protos::gen::TracePacket& packet : packets) {
744 for (const auto& dump : packet.profile_packet().process_dumps()) {
745 if (dump.pid() != pid || dump.heap_name() != heap_name)
746 continue;
747 EXPECT_EQ(dump.sampling_interval_bytes(), sampling_interval);
748 for (const auto& sample : dump.samples()) {
749 last_allocated = sample.self_allocated();
750 last_freed = sample.self_freed();
751 samples++;
752 }
753 profile_packets++;
754 }
755 }
756 EXPECT_GT(profile_packets, 0u) << heap_name;
757 EXPECT_GT(samples, 0u) << heap_name;
758 EXPECT_GT(last_allocated, 0u) << heap_name;
759 EXPECT_GT(last_freed, 0u) << heap_name;
760 }
761
ValidateOnlyPID(TraceProcessorTestHelper * helper,uint64_t pid)762 void ValidateOnlyPID(TraceProcessorTestHelper* helper, uint64_t pid) {
763 size_t dumps = 0;
764 const auto& packets = helper->trace();
765 for (const protos::gen::TracePacket& packet : packets) {
766 for (const auto& dump : packet.profile_packet().process_dumps()) {
767 EXPECT_EQ(dump.pid(), pid);
768 dumps++;
769 }
770 }
771 EXPECT_GT(dumps, 0u);
772 }
773 };
774
775 // This checks that the child is still running (to ensure it didn't crash
776 // unxpectedly) and then kills it.
KillAssertRunning(base::Subprocess * child)777 void KillAssertRunning(base::Subprocess* child) {
778 ASSERT_EQ(child->Poll(), base::Subprocess::kRunning)
779 << "Target process not running. CHECK CRASH LOGS.";
780 PERFETTO_LOG("Shutting down profile target.");
781 child->KillAndWaitForTermination();
782 }
783
TEST_P(HeapprofdEndToEnd,Disabled)784 TEST_P(HeapprofdEndToEnd, Disabled) {
785 constexpr size_t kAllocSize = 1024;
786
787 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
788 const uint64_t pid = static_cast<uint64_t>(child.pid());
789
790 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
791 cfg->set_sampling_interval_bytes(1);
792 cfg->add_pid(pid);
793 cfg->add_heaps("invalid");
794 ContinuousDump(cfg);
795 });
796
797 auto helper = Trace(trace_config);
798 WRITE_TRACE(helper->full_trace());
799 PrintStats(helper.get());
800 KillAssertRunning(&child);
801
802 ValidateNoSamples(helper.get(), pid);
803 }
804
TEST_P(HeapprofdEndToEnd,Smoke)805 TEST_P(HeapprofdEndToEnd, Smoke) {
806 constexpr size_t kAllocSize = 1024;
807 constexpr size_t kSamplingInterval = 1;
808
809 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
810 const uint64_t pid = static_cast<uint64_t>(child.pid());
811
812 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
813 cfg->set_sampling_interval_bytes(kSamplingInterval);
814 cfg->add_pid(pid);
815 cfg->add_heaps(allocator_name());
816 ContinuousDump(cfg);
817 });
818
819 auto helper = Trace(trace_config);
820 WRITE_TRACE(helper->full_trace());
821 PrintStats(helper.get());
822 KillAssertRunning(&child);
823
824 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
825 ValidateOnlyPID(helper.get(), pid);
826 ValidateSampleSizes(helper.get(), pid, kAllocSize);
827 }
828
TEST_P(HeapprofdEndToEnd,TwoAllocators)829 TEST_P(HeapprofdEndToEnd, TwoAllocators) {
830 constexpr size_t kCustomAllocSize = 1024;
831 constexpr size_t kAllocSize = 7;
832 constexpr size_t kSamplingInterval = 1;
833
834 base::Subprocess child =
835 ForkContinuousAlloc(allocator_mode(), kAllocSize, kCustomAllocSize);
836 const uint64_t pid = static_cast<uint64_t>(child.pid());
837
838 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
839 cfg->set_sampling_interval_bytes(kSamplingInterval);
840 cfg->add_pid(pid);
841 cfg->add_heaps(allocator_name());
842 cfg->add_heaps("secondary");
843 ContinuousDump(cfg);
844 });
845
846 auto helper = Trace(trace_config);
847 WRITE_TRACE(helper->full_trace());
848 PrintStats(helper.get());
849 KillAssertRunning(&child);
850
851 ValidateHasSamples(helper.get(), pid, "secondary", kSamplingInterval);
852 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
853 ValidateOnlyPID(helper.get(), pid);
854 ValidateSampleSizes(helper.get(), pid, kCustomAllocSize, "secondary");
855 ValidateSampleSizes(helper.get(), pid, kAllocSize, allocator_name());
856 }
857
TEST_P(HeapprofdEndToEnd,TwoAllocatorsAll)858 TEST_P(HeapprofdEndToEnd, TwoAllocatorsAll) {
859 constexpr size_t kCustomAllocSize = 1024;
860 constexpr size_t kAllocSize = 7;
861 constexpr size_t kSamplingInterval = 1;
862
863 base::Subprocess child =
864 ForkContinuousAlloc(allocator_mode(), kAllocSize, kCustomAllocSize);
865 const uint64_t pid = static_cast<uint64_t>(child.pid());
866
867 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
868 cfg->set_sampling_interval_bytes(kSamplingInterval);
869 cfg->add_pid(pid);
870 cfg->set_all_heaps(true);
871 ContinuousDump(cfg);
872 });
873
874 auto helper = Trace(trace_config);
875 WRITE_TRACE(helper->full_trace());
876 PrintStats(helper.get());
877 KillAssertRunning(&child);
878
879 ValidateHasSamples(helper.get(), pid, "secondary", kSamplingInterval);
880 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
881 ValidateOnlyPID(helper.get(), pid);
882 ValidateSampleSizes(helper.get(), pid, kCustomAllocSize, "secondary");
883 ValidateSampleSizes(helper.get(), pid, kAllocSize, allocator_name());
884 }
885
TEST_P(HeapprofdEndToEnd,AccurateCustomReportAllocation)886 TEST_P(HeapprofdEndToEnd, AccurateCustomReportAllocation) {
887 if (allocator_mode() != AllocatorMode::kCustom)
888 GTEST_SKIP();
889
890 base::Subprocess child({"/proc/self/exe"});
891 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
892 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC=1");
893 StartAndWaitForHandshake(&child);
894
895 const uint64_t pid = static_cast<uint64_t>(child.pid());
896
897 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
898 cfg->set_sampling_interval_bytes(1);
899 cfg->add_pid(pid);
900 cfg->add_heaps("test");
901 });
902
903 auto helper = Trace(trace_config);
904 WRITE_TRACE(helper->full_trace());
905 PrintStats(helper.get());
906 KillAssertRunning(&child);
907
908 auto flamegraph = GetFlamegraph(&helper->tp());
909 EXPECT_THAT(flamegraph,
910 Contains(AllOf(
911 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
912 Field(&FlamegraphNode::cumulative_size, Eq(15)),
913 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
914
915 ValidateOnlyPID(helper.get(), pid);
916
917 size_t total_alloc = 0;
918 size_t total_freed = 0;
919 for (const protos::gen::TracePacket& packet : helper->trace()) {
920 for (const auto& dump : packet.profile_packet().process_dumps()) {
921 for (const auto& sample : dump.samples()) {
922 total_alloc += sample.self_allocated();
923 total_freed += sample.self_freed();
924 }
925 }
926 }
927 EXPECT_EQ(total_alloc, 40u);
928 EXPECT_EQ(total_freed, 25u);
929 }
930
931 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
932 #define MAYBE_AccurateCustomReportAllocationWithVfork \
933 AccurateCustomReportAllocationWithVfork
934 #define MAYBE_AccurateCustomReportAllocationWithVforkThread \
935 AccurateCustomReportAllocationWithVforkThread
936 #else
937 #define MAYBE_AccurateCustomReportAllocationWithVfork \
938 DISABLED_AccurateCustomReportAllocationWithVfork
939 #define MAYBE_AccurateCustomReportAllocationWithVforkThread \
940 DISABLED_AccurateCustomReportAllocationWithVforkThread
941 #endif
942
TEST_P(HeapprofdEndToEnd,MAYBE_AccurateCustomReportAllocationWithVfork)943 TEST_P(HeapprofdEndToEnd, MAYBE_AccurateCustomReportAllocationWithVfork) {
944 if (allocator_mode() != AllocatorMode::kCustom)
945 GTEST_SKIP();
946
947 base::Subprocess child({"/proc/self/exe"});
948 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
949 child.args.env.push_back(
950 "HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK=1");
951 StartAndWaitForHandshake(&child);
952
953 const uint64_t pid = static_cast<uint64_t>(child.pid());
954
955 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
956 cfg->set_sampling_interval_bytes(1);
957 cfg->add_pid(pid);
958 cfg->add_heaps("test");
959 });
960
961 auto helper = Trace(trace_config);
962 WRITE_TRACE(helper->full_trace());
963 PrintStats(helper.get());
964 KillAssertRunning(&child);
965
966 auto flamegraph = GetFlamegraph(&helper->tp());
967 EXPECT_THAT(flamegraph,
968 Contains(AllOf(
969 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
970 Field(&FlamegraphNode::cumulative_size, Eq(15)),
971 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
972
973 ValidateOnlyPID(helper.get(), pid);
974
975 size_t total_alloc = 0;
976 size_t total_freed = 0;
977 for (const protos::gen::TracePacket& packet : helper->trace()) {
978 for (const auto& dump : packet.profile_packet().process_dumps()) {
979 EXPECT_FALSE(dump.disconnected());
980 for (const auto& sample : dump.samples()) {
981 total_alloc += sample.self_allocated();
982 total_freed += sample.self_freed();
983 }
984 }
985 }
986 EXPECT_EQ(total_alloc, 40u);
987 EXPECT_EQ(total_freed, 25u);
988 }
989
TEST_P(HeapprofdEndToEnd,MAYBE_AccurateCustomReportAllocationWithVforkThread)990 TEST_P(HeapprofdEndToEnd, MAYBE_AccurateCustomReportAllocationWithVforkThread) {
991 if (allocator_mode() != AllocatorMode::kCustom)
992 GTEST_SKIP();
993
994 base::Subprocess child({"/proc/self/exe"});
995 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
996 child.args.env.push_back(
997 "HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK_THREAD=1");
998 StartAndWaitForHandshake(&child);
999
1000 const uint64_t pid = static_cast<uint64_t>(child.pid());
1001
1002 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1003 cfg->set_sampling_interval_bytes(1);
1004 cfg->add_pid(pid);
1005 cfg->add_heaps("test");
1006 });
1007
1008 auto helper = Trace(trace_config);
1009 WRITE_TRACE(helper->full_trace());
1010 PrintStats(helper.get());
1011 KillAssertRunning(&child);
1012
1013 auto flamegraph = GetFlamegraph(&helper->tp());
1014 EXPECT_THAT(flamegraph,
1015 Contains(AllOf(
1016 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
1017 Field(&FlamegraphNode::cumulative_size, Eq(15)),
1018 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
1019
1020 ValidateOnlyPID(helper.get(), pid);
1021
1022 size_t total_alloc = 0;
1023 size_t total_freed = 0;
1024 for (const protos::gen::TracePacket& packet : helper->trace()) {
1025 for (const auto& dump : packet.profile_packet().process_dumps()) {
1026 EXPECT_FALSE(dump.disconnected());
1027 for (const auto& sample : dump.samples()) {
1028 total_alloc += sample.self_allocated();
1029 total_freed += sample.self_freed();
1030 }
1031 }
1032 }
1033 EXPECT_EQ(total_alloc, 40u);
1034 EXPECT_EQ(total_freed, 25u);
1035 }
1036
TEST_P(HeapprofdEndToEnd,AccurateCustomReportSample)1037 TEST_P(HeapprofdEndToEnd, AccurateCustomReportSample) {
1038 if (allocator_mode() != AllocatorMode::kCustom)
1039 GTEST_SKIP();
1040
1041 base::Subprocess child({"/proc/self/exe"});
1042 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1043 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_SAMPLE=1");
1044 StartAndWaitForHandshake(&child);
1045
1046 const uint64_t pid = static_cast<uint64_t>(child.pid());
1047
1048 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1049 cfg->set_sampling_interval_bytes(1000000);
1050 cfg->add_pid(pid);
1051 cfg->add_heaps("test");
1052 });
1053
1054 auto helper = Trace(trace_config);
1055 WRITE_TRACE(helper->full_trace());
1056 PrintStats(helper.get());
1057 KillAssertRunning(&child);
1058
1059 ValidateOnlyPID(helper.get(), pid);
1060
1061 size_t total_alloc = 0;
1062 size_t total_freed = 0;
1063 for (const protos::gen::TracePacket& packet : helper->trace()) {
1064 for (const auto& dump : packet.profile_packet().process_dumps()) {
1065 for (const auto& sample : dump.samples()) {
1066 total_alloc += sample.self_allocated();
1067 total_freed += sample.self_freed();
1068 }
1069 }
1070 }
1071 EXPECT_EQ(total_alloc, 40u);
1072 EXPECT_EQ(total_freed, 25u);
1073 }
1074
TEST_P(HeapprofdEndToEnd,AccurateDumpAtMaxCustom)1075 TEST_P(HeapprofdEndToEnd, AccurateDumpAtMaxCustom) {
1076 if (allocator_mode() != AllocatorMode::kCustom)
1077 GTEST_SKIP();
1078
1079 base::Subprocess child({"/proc/self/exe"});
1080 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1081 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC=1");
1082 StartAndWaitForHandshake(&child);
1083
1084 const uint64_t pid = static_cast<uint64_t>(child.pid());
1085
1086 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1087 cfg->set_sampling_interval_bytes(1);
1088 cfg->add_pid(pid);
1089 cfg->add_heaps("test");
1090 cfg->set_dump_at_max(true);
1091 });
1092
1093 auto helper = Trace(trace_config);
1094 WRITE_TRACE(helper->full_trace());
1095 PrintStats(helper.get());
1096 KillAssertRunning(&child);
1097
1098 ValidateOnlyPID(helper.get(), pid);
1099
1100 size_t total_alloc = 0;
1101 size_t total_count = 0;
1102 for (const protos::gen::TracePacket& packet : helper->trace()) {
1103 for (const auto& dump : packet.profile_packet().process_dumps()) {
1104 for (const auto& sample : dump.samples()) {
1105 total_alloc += sample.self_max();
1106 total_count += sample.self_max_count();
1107 }
1108 }
1109 }
1110 EXPECT_EQ(total_alloc, 30u);
1111 EXPECT_EQ(total_count, 2u);
1112 }
1113
TEST_P(HeapprofdEndToEnd,CustomLifetime)1114 TEST_P(HeapprofdEndToEnd, CustomLifetime) {
1115 if (allocator_mode() != AllocatorMode::kCustom)
1116 GTEST_SKIP();
1117
1118 int disabled_pipe[2];
1119 PERFETTO_CHECK(pipe(disabled_pipe) == 0); // NOLINT(android-cloexec-pipe)
1120
1121 int disabled_pipe_rd = disabled_pipe[0];
1122 int disabled_pipe_wr = disabled_pipe[1];
1123
1124 base::Subprocess child({"/proc/self/exe"});
1125 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1126 child.args.env.push_back("HEAPPROFD_TESTING_RUN_LIFETIME_ARG0=1000000");
1127 child.args.env.push_back("HEAPPROFD_TESTING_RUN_LIFETIME_ARG1=" +
1128 std::to_string(disabled_pipe_wr));
1129 child.args.preserve_fds.push_back(disabled_pipe_wr);
1130 StartAndWaitForHandshake(&child);
1131 close(disabled_pipe_wr);
1132
1133 const uint64_t pid = static_cast<uint64_t>(child.pid());
1134
1135 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1136 cfg->set_sampling_interval_bytes(1000000);
1137 cfg->add_pid(pid);
1138 cfg->add_heaps("test");
1139 });
1140
1141 auto helper = Trace(trace_config);
1142 WRITE_TRACE(helper->full_trace());
1143 PrintStats(helper.get());
1144 // Give client some time to notice the disconnect.
1145 sleep(2);
1146 KillAssertRunning(&child);
1147
1148 char x;
1149 EXPECT_EQ(base::Read(disabled_pipe_rd, &x, sizeof(x)), 1);
1150 close(disabled_pipe_rd);
1151 }
1152
TEST_P(HeapprofdEndToEnd,TwoProcesses)1153 TEST_P(HeapprofdEndToEnd, TwoProcesses) {
1154 constexpr size_t kAllocSize = 1024;
1155 constexpr size_t kAllocSize2 = 7;
1156 constexpr size_t kSamplingInterval = 1;
1157
1158 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1159 base::Subprocess child2 = ForkContinuousAlloc(allocator_mode(), kAllocSize2);
1160 const uint64_t pid = static_cast<uint64_t>(child.pid());
1161 const auto pid2 = child2.pid();
1162
1163 TraceConfig trace_config =
1164 MakeTraceConfig([this, pid, pid2](HeapprofdConfig* cfg) {
1165 cfg->set_sampling_interval_bytes(kSamplingInterval);
1166 cfg->add_pid(pid);
1167 cfg->add_pid(static_cast<uint64_t>(pid2));
1168 cfg->add_heaps(allocator_name());
1169 });
1170
1171 auto helper = Trace(trace_config);
1172 WRITE_TRACE(helper->full_trace());
1173 PrintStats(helper.get());
1174
1175 KillAssertRunning(&child);
1176 KillAssertRunning(&child2);
1177
1178 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1179 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1180 ValidateHasSamples(helper.get(), static_cast<uint64_t>(pid2),
1181 allocator_name(), kSamplingInterval);
1182 ValidateSampleSizes(helper.get(), static_cast<uint64_t>(pid2), kAllocSize2);
1183 }
1184
TEST_P(HeapprofdEndToEnd,FinalFlush)1185 TEST_P(HeapprofdEndToEnd, FinalFlush) {
1186 constexpr size_t kAllocSize = 1024;
1187 constexpr size_t kSamplingInterval = 1;
1188
1189 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1190 const uint64_t pid = static_cast<uint64_t>(child.pid());
1191 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1192 cfg->set_sampling_interval_bytes(kSamplingInterval);
1193 cfg->add_pid(pid);
1194 cfg->add_heaps(allocator_name());
1195 });
1196
1197 auto helper = Trace(trace_config);
1198 WRITE_TRACE(helper->full_trace());
1199 PrintStats(helper.get());
1200 KillAssertRunning(&child);
1201
1202 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1203 ValidateOnlyPID(helper.get(), pid);
1204 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1205 }
1206
TEST_P(HeapprofdEndToEnd,NativeStartup)1207 TEST_P(HeapprofdEndToEnd, NativeStartup) {
1208 if (test_mode() == TestMode::kStatic)
1209 GTEST_SKIP();
1210
1211 auto helper = GetHelper(&task_runner);
1212
1213 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1214 cfg->set_sampling_interval_bytes(1);
1215 cfg->add_process_cmdline("heapprofd_continuous_malloc");
1216 cfg->add_heaps(allocator_name());
1217 });
1218 trace_config.set_duration_ms(5000);
1219
1220 helper->StartTracing(trace_config);
1221
1222 // Wait to guarantee that the process forked below is hooked by the profiler
1223 // by virtue of the startup check, and not by virtue of being seen as a
1224 // running process. This sleep is here to prevent that, accidentally, the
1225 // test gets to the fork()+exec() too soon, before the heap profiling daemon
1226 // has received the trace config.
1227 sleep(1);
1228
1229 base::Subprocess child({"/proc/self/exe"});
1230 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1231 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1232 allocator_name());
1233 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1234 std::to_string(kStartupAllocSize));
1235 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1236 std::string("0"));
1237 StartAndWaitForHandshake(&child);
1238
1239 ReadAndWait(helper.get());
1240 WRITE_TRACE(helper->full_trace());
1241
1242 KillAssertRunning(&child);
1243
1244 const auto& packets = helper->trace();
1245 ASSERT_GT(packets.size(), 0u);
1246 size_t profile_packets = 0;
1247 size_t samples = 0;
1248 uint64_t total_allocated = 0;
1249 uint64_t total_freed = 0;
1250 for (const protos::gen::TracePacket& packet : packets) {
1251 if (packet.has_profile_packet() &&
1252 !packet.profile_packet().process_dumps().empty()) {
1253 const auto& dumps = packet.profile_packet().process_dumps();
1254 ASSERT_EQ(dumps.size(), 1u);
1255 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1256 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1257 profile_packets++;
1258 for (const auto& sample : dump.samples()) {
1259 samples++;
1260 total_allocated += sample.self_allocated();
1261 total_freed += sample.self_freed();
1262 }
1263 }
1264 }
1265 EXPECT_EQ(profile_packets, 1u);
1266 EXPECT_GT(samples, 0u);
1267 EXPECT_GT(total_allocated, 0u);
1268 EXPECT_GT(total_freed, 0u);
1269 }
1270
TEST_P(HeapprofdEndToEnd,NativeStartupDenormalizedCmdline)1271 TEST_P(HeapprofdEndToEnd, NativeStartupDenormalizedCmdline) {
1272 if (test_mode() == TestMode::kStatic)
1273 GTEST_SKIP();
1274
1275 auto helper = GetHelper(&task_runner);
1276
1277 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1278 cfg->set_sampling_interval_bytes(1);
1279 cfg->add_process_cmdline("heapprofd_continuous_malloc@1.2.3");
1280 cfg->add_heaps(allocator_name());
1281 });
1282 trace_config.set_duration_ms(5000);
1283
1284 helper->StartTracing(trace_config);
1285
1286 // Wait to guarantee that the process forked below is hooked by the profiler
1287 // by virtue of the startup check, and not by virtue of being seen as a
1288 // running process. This sleep is here to prevent that, accidentally, the
1289 // test gets to the fork()+exec() too soon, before the heap profiling daemon
1290 // has received the trace config.
1291 sleep(1);
1292
1293 base::Subprocess child({"/proc/self/exe"});
1294 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1295 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1296 allocator_name());
1297 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1298 std::to_string(kStartupAllocSize));
1299 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1300 std::string("0"));
1301
1302 StartAndWaitForHandshake(&child);
1303
1304 ReadAndWait(helper.get());
1305 WRITE_TRACE(helper->full_trace());
1306
1307 KillAssertRunning(&child);
1308
1309 const auto& packets = helper->trace();
1310 ASSERT_GT(packets.size(), 0u);
1311 size_t profile_packets = 0;
1312 size_t samples = 0;
1313 uint64_t total_allocated = 0;
1314 uint64_t total_freed = 0;
1315 for (const protos::gen::TracePacket& packet : packets) {
1316 if (packet.has_profile_packet() &&
1317 !packet.profile_packet().process_dumps().empty()) {
1318 const auto& dumps = packet.profile_packet().process_dumps();
1319 ASSERT_EQ(dumps.size(), 1u);
1320 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1321 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1322 profile_packets++;
1323 for (const auto& sample : dump.samples()) {
1324 samples++;
1325 total_allocated += sample.self_allocated();
1326 total_freed += sample.self_freed();
1327 }
1328 }
1329 }
1330 EXPECT_EQ(profile_packets, 1u);
1331 EXPECT_GT(samples, 0u);
1332 EXPECT_GT(total_allocated, 0u);
1333 EXPECT_GT(total_freed, 0u);
1334 }
1335
TEST_P(HeapprofdEndToEnd,DiscoverByName)1336 TEST_P(HeapprofdEndToEnd, DiscoverByName) {
1337 auto helper = GetHelper(&task_runner);
1338
1339 base::Subprocess child({"/proc/self/exe"});
1340 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1341 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1342 allocator_name());
1343 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1344 std::to_string(kStartupAllocSize));
1345 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1346 std::string("0"));
1347
1348 StartAndWaitForHandshake(&child);
1349
1350 // Wait to make sure process is fully initialized, so we do not accidentally
1351 // match it by the startup logic.
1352 sleep(1);
1353
1354 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1355 cfg->set_sampling_interval_bytes(1);
1356 cfg->add_process_cmdline("heapprofd_continuous_malloc");
1357 cfg->add_heaps(allocator_name());
1358 });
1359 trace_config.set_duration_ms(5000);
1360
1361 helper->StartTracing(trace_config);
1362 ReadAndWait(helper.get());
1363 WRITE_TRACE(helper->full_trace());
1364
1365 KillAssertRunning(&child);
1366
1367 const auto& packets = helper->trace();
1368 ASSERT_GT(packets.size(), 0u);
1369 size_t profile_packets = 0;
1370 size_t samples = 0;
1371 uint64_t total_allocated = 0;
1372 uint64_t total_freed = 0;
1373 for (const protos::gen::TracePacket& packet : packets) {
1374 if (packet.has_profile_packet() &&
1375 !packet.profile_packet().process_dumps().empty()) {
1376 const auto& dumps = packet.profile_packet().process_dumps();
1377 ASSERT_EQ(dumps.size(), 1u);
1378 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1379 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1380 profile_packets++;
1381 for (const auto& sample : dump.samples()) {
1382 samples++;
1383 total_allocated += sample.self_allocated();
1384 total_freed += sample.self_freed();
1385 }
1386 }
1387 }
1388 EXPECT_EQ(profile_packets, 1u);
1389 EXPECT_GT(samples, 0u);
1390 EXPECT_GT(total_allocated, 0u);
1391 EXPECT_GT(total_freed, 0u);
1392 }
1393
TEST_P(HeapprofdEndToEnd,DiscoverByNameDenormalizedCmdline)1394 TEST_P(HeapprofdEndToEnd, DiscoverByNameDenormalizedCmdline) {
1395 auto helper = GetHelper(&task_runner);
1396
1397 // Make sure the forked process does not get reparented to init.
1398 base::Subprocess child({"/proc/self/exe"});
1399 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1400 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1401 allocator_name());
1402 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1403 std::to_string(kStartupAllocSize));
1404 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1405 std::string("0"));
1406
1407 StartAndWaitForHandshake(&child);
1408
1409 // Wait to make sure process is fully initialized, so we do not accidentally
1410 // match it by the startup logic.
1411 sleep(1);
1412
1413 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1414 cfg->set_sampling_interval_bytes(1);
1415 cfg->add_process_cmdline("heapprofd_continuous_malloc@1.2.3");
1416 cfg->add_heaps(allocator_name());
1417 });
1418 trace_config.set_duration_ms(5000);
1419
1420 helper->StartTracing(trace_config);
1421 ReadAndWait(helper.get());
1422 WRITE_TRACE(helper->full_trace());
1423
1424 KillAssertRunning(&child);
1425
1426 const auto& packets = helper->trace();
1427 ASSERT_GT(packets.size(), 0u);
1428 size_t profile_packets = 0;
1429 size_t samples = 0;
1430 uint64_t total_allocated = 0;
1431 uint64_t total_freed = 0;
1432 for (const protos::gen::TracePacket& packet : packets) {
1433 if (packet.has_profile_packet() &&
1434 !packet.profile_packet().process_dumps().empty()) {
1435 const auto& dumps = packet.profile_packet().process_dumps();
1436 ASSERT_EQ(dumps.size(), 1u);
1437 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1438 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1439 profile_packets++;
1440 for (const auto& sample : dump.samples()) {
1441 samples++;
1442 total_allocated += sample.self_allocated();
1443 total_freed += sample.self_freed();
1444 }
1445 }
1446 }
1447 EXPECT_EQ(profile_packets, 1u);
1448 EXPECT_GT(samples, 0u);
1449 EXPECT_GT(total_allocated, 0u);
1450 EXPECT_GT(total_freed, 0u);
1451 }
1452
TEST_P(HeapprofdEndToEnd,ReInit)1453 TEST_P(HeapprofdEndToEnd, ReInit) {
1454 constexpr size_t kSamplingInterval = 1;
1455
1456 // We cannot use base::Pipe because that assumes we want CLOEXEC.
1457 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
1458 int signal_pipe[2];
1459 int ack_pipe[2];
1460
1461 PERFETTO_CHECK(pipe(signal_pipe) == 0); // NOLINT(android-cloexec-pipe)
1462 PERFETTO_CHECK(pipe(ack_pipe) == 0); // NOLINT(android-cloexec-pipe)
1463
1464 int cur_flags = fcntl(signal_pipe[0], F_GETFL, 0);
1465 PERFETTO_CHECK(cur_flags >= 0);
1466 PERFETTO_CHECK(fcntl(signal_pipe[0], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1467 cur_flags = fcntl(signal_pipe[1], F_GETFL, 0);
1468 PERFETTO_CHECK(cur_flags >= 0);
1469 PERFETTO_CHECK(fcntl(signal_pipe[1], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1470
1471 int signal_pipe_rd = signal_pipe[0];
1472 int signal_pipe_wr = signal_pipe[1];
1473 int ack_pipe_rd = ack_pipe[0];
1474 int ack_pipe_wr = ack_pipe[1];
1475
1476 base::Subprocess child({"/proc/self/exe"});
1477 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1478 child.args.preserve_fds.push_back(signal_pipe_rd);
1479 child.args.preserve_fds.push_back(ack_pipe_wr);
1480 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG0=" +
1481 allocator_name());
1482 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG1=" +
1483 std::to_string(signal_pipe_rd));
1484 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG2=" +
1485 std::to_string(ack_pipe_wr));
1486 StartAndWaitForHandshake(&child);
1487
1488 const uint64_t pid = static_cast<uint64_t>(child.pid());
1489
1490 close(signal_pipe_rd);
1491 close(ack_pipe_wr);
1492
1493 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1494 cfg->set_sampling_interval_bytes(kSamplingInterval);
1495 cfg->add_pid(pid);
1496 cfg->add_heaps(allocator_name());
1497 });
1498
1499 auto helper = Trace(trace_config);
1500 WRITE_TRACE(helper->full_trace());
1501
1502 PrintStats(helper.get());
1503 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1504 ValidateOnlyPID(helper.get(), pid);
1505 ValidateSampleSizes(helper.get(), pid, kFirstIterationBytes);
1506
1507 PERFETTO_CHECK(PERFETTO_EINTR(write(signal_pipe_wr, "1", 1)) == 1);
1508 close(signal_pipe_wr);
1509 char buf[1];
1510 ASSERT_EQ(PERFETTO_EINTR(read(ack_pipe_rd, buf, sizeof(buf))), 1);
1511 close(ack_pipe_rd);
1512
1513 // A brief sleep to allow the client to notice that the profiling session is
1514 // to be torn down (as it rejects concurrent sessions).
1515 usleep(500 * kMsToUs);
1516
1517 PERFETTO_LOG("HeapprofdEndToEnd::Reinit: Starting second");
1518
1519 // We must keep alive the original helper because it owns the service thread.
1520 std::unique_ptr<TraceProcessorTestHelper> helper2 =
1521 std::unique_ptr<TraceProcessorTestHelper>(
1522 new TraceProcessorTestHelper(&task_runner));
1523
1524 helper2->ConnectConsumer();
1525 helper2->WaitForConsumerConnect();
1526 helper2->StartTracing(trace_config);
1527 ReadAndWait(helper2.get());
1528 WRITE_TRACE(helper2->trace());
1529
1530 PrintStats(helper2.get());
1531 KillAssertRunning(&child);
1532
1533 ValidateHasSamples(helper2.get(), pid, allocator_name(), kSamplingInterval);
1534 ValidateOnlyPID(helper2.get(), pid);
1535 ValidateSampleSizes(helper2.get(), pid, kSecondIterationBytes);
1536 }
1537
TEST_P(HeapprofdEndToEnd,ReInitAfterInvalid)1538 TEST_P(HeapprofdEndToEnd, ReInitAfterInvalid) {
1539 constexpr size_t kSamplingInterval = 1;
1540
1541 // We cannot use base::Pipe because that assumes we want CLOEXEC.
1542 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
1543 int signal_pipe[2];
1544 int ack_pipe[2];
1545
1546 PERFETTO_CHECK(pipe(signal_pipe) == 0); // NOLINT(android-cloexec-pipe)
1547 PERFETTO_CHECK(pipe(ack_pipe) == 0); // NOLINT(android-cloexec-pipe)
1548
1549 int cur_flags = fcntl(signal_pipe[0], F_GETFL, 0);
1550 PERFETTO_CHECK(cur_flags >= 0);
1551 PERFETTO_CHECK(fcntl(signal_pipe[0], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1552 cur_flags = fcntl(signal_pipe[1], F_GETFL, 0);
1553 PERFETTO_CHECK(cur_flags >= 0);
1554 PERFETTO_CHECK(fcntl(signal_pipe[1], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1555
1556 int signal_pipe_rd = signal_pipe[0];
1557 int signal_pipe_wr = signal_pipe[1];
1558 int ack_pipe_rd = ack_pipe[0];
1559 int ack_pipe_wr = ack_pipe[1];
1560
1561 base::Subprocess child({"/proc/self/exe"});
1562 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1563 child.args.preserve_fds.push_back(signal_pipe_rd);
1564 child.args.preserve_fds.push_back(ack_pipe_wr);
1565 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG0=" +
1566 allocator_name());
1567 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG1=" +
1568 std::to_string(signal_pipe_rd));
1569 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG2=" +
1570 std::to_string(ack_pipe_wr));
1571 StartAndWaitForHandshake(&child);
1572
1573 const uint64_t pid = static_cast<uint64_t>(child.pid());
1574
1575 close(signal_pipe_rd);
1576 close(ack_pipe_wr);
1577
1578 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1579 cfg->set_sampling_interval_bytes(kSamplingInterval);
1580 cfg->add_pid(pid);
1581 cfg->add_heaps(allocator_name());
1582 });
1583
1584 auto helper = Trace(trace_config);
1585 WRITE_TRACE(helper->full_trace());
1586
1587 PrintStats(helper.get());
1588 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1589 ValidateOnlyPID(helper.get(), pid);
1590 ValidateSampleSizes(helper.get(), pid, kFirstIterationBytes);
1591
1592 PERFETTO_CHECK(PERFETTO_EINTR(write(signal_pipe_wr, "1", 1)) == 1);
1593 close(signal_pipe_wr);
1594 char buf[1];
1595 ASSERT_EQ(PERFETTO_EINTR(read(ack_pipe_rd, buf, sizeof(buf))), 1);
1596 close(ack_pipe_rd);
1597
1598 // A brief sleep to allow the client to notice that the profiling session is
1599 // to be torn down (as it rejects concurrent sessions).
1600 usleep(500 * kMsToUs);
1601
1602 PERFETTO_LOG("HeapprofdEndToEnd::Reinit: Starting second");
1603
1604 // We must keep alive the original helper because it owns the service thread.
1605 std::unique_ptr<TraceProcessorTestHelper> helper2 =
1606 std::unique_ptr<TraceProcessorTestHelper>(
1607 new TraceProcessorTestHelper(&task_runner));
1608
1609 helper2->ConnectConsumer();
1610 helper2->WaitForConsumerConnect();
1611 helper2->StartTracing(trace_config);
1612 ReadAndWait(helper2.get());
1613
1614 WRITE_TRACE(helper2->trace());
1615
1616 PrintStats(helper2.get());
1617 KillAssertRunning(&child);
1618
1619 ValidateHasSamples(helper2.get(), pid, allocator_name(), kSamplingInterval);
1620 ValidateOnlyPID(helper2.get(), pid);
1621 ValidateSampleSizes(helper2.get(), pid, kSecondIterationBytes);
1622 }
1623
TEST_P(HeapprofdEndToEnd,ConcurrentSession)1624 TEST_P(HeapprofdEndToEnd, ConcurrentSession) {
1625 constexpr size_t kAllocSize = 1024;
1626 constexpr size_t kSamplingInterval = 1;
1627
1628 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1629 const uint64_t pid = static_cast<uint64_t>(child.pid());
1630
1631 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1632 cfg->set_sampling_interval_bytes(kSamplingInterval);
1633 cfg->add_pid(pid);
1634 cfg->add_heaps(allocator_name());
1635 ContinuousDump(cfg);
1636 });
1637 trace_config.set_duration_ms(5000);
1638
1639 auto helper = GetHelper(&task_runner);
1640 helper->StartTracing(trace_config);
1641 sleep(1);
1642
1643 PERFETTO_LOG("Starting concurrent.");
1644 std::unique_ptr<TraceProcessorTestHelper> helper_concurrent(
1645 new TraceProcessorTestHelper(&task_runner));
1646 helper_concurrent->ConnectConsumer();
1647 helper_concurrent->WaitForConsumerConnect();
1648 helper_concurrent->StartTracing(trace_config);
1649
1650 ReadAndWait(helper.get());
1651 WRITE_TRACE(helper->full_trace());
1652 PrintStats(helper.get());
1653
1654 ReadAndWait(helper_concurrent.get());
1655 WRITE_TRACE(helper_concurrent->trace());
1656 PrintStats(helper_concurrent.get());
1657 KillAssertRunning(&child);
1658
1659 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1660 ValidateOnlyPID(helper.get(), pid);
1661 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1662 ValidateRejectedConcurrent(helper.get(), pid, false);
1663
1664 ValidateOnlyPID(helper_concurrent.get(), pid);
1665 ValidateRejectedConcurrent(helper_concurrent.get(), pid, true);
1666 }
1667
TEST_P(HeapprofdEndToEnd,NativeProfilingActiveAtProcessExit)1668 TEST_P(HeapprofdEndToEnd, NativeProfilingActiveAtProcessExit) {
1669 constexpr uint64_t kTestAllocSize = 128;
1670 base::Pipe start_pipe = base::Pipe::Create(base::Pipe::kBothBlock);
1671 int start_pipe_wr = *start_pipe.wr;
1672
1673 base::Subprocess child({"/proc/self/exe"});
1674 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1675 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1676 allocator_name());
1677 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1678 std::to_string(kTestAllocSize));
1679 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1680 std::to_string(0));
1681 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG3=" +
1682 std::to_string(200));
1683 child.args.preserve_fds.push_back(start_pipe_wr);
1684 child.args.posix_entrypoint_for_testing = [start_pipe_wr] {
1685 PERFETTO_CHECK(PERFETTO_EINTR(write(start_pipe_wr, "1", 1)) == 1);
1686 PERFETTO_CHECK(close(start_pipe_wr) == 0 || errno == EINTR);
1687 };
1688
1689 StartAndWaitForHandshake(&child);
1690
1691 const uint64_t pid = static_cast<uint64_t>(child.pid());
1692 start_pipe.wr.reset();
1693
1694 // Construct tracing config (without starting profiling).
1695 auto helper = GetHelper(&task_runner);
1696
1697 // Wait for child to have been scheduled at least once.
1698 char buf[1] = {};
1699 ASSERT_EQ(PERFETTO_EINTR(read(*start_pipe.rd, buf, sizeof(buf))), 1);
1700 start_pipe.rd.reset();
1701
1702 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1703 cfg->set_sampling_interval_bytes(1);
1704 cfg->add_pid(pid);
1705 cfg->add_heaps(allocator_name());
1706 });
1707 trace_config.set_duration_ms(5000);
1708
1709 // Trace until child exits.
1710 helper->StartTracing(trace_config);
1711
1712 // Wait for the child and assert that it exited successfully.
1713 EXPECT_TRUE(child.Wait(30000));
1714 EXPECT_EQ(child.status(), base::Subprocess::kTerminated);
1715 EXPECT_EQ(child.returncode(), 0);
1716
1717 // Assert that we did profile the process.
1718 helper->FlushAndWait(2000);
1719 helper->DisableTracing();
1720 ReadAndWait(helper.get());
1721 WRITE_TRACE(helper->full_trace());
1722
1723 const auto& packets = helper->trace();
1724 ASSERT_GT(packets.size(), 0u);
1725 size_t profile_packets = 0;
1726 size_t samples = 0;
1727 uint64_t total_allocated = 0;
1728 for (const protos::gen::TracePacket& packet : packets) {
1729 if (packet.has_profile_packet() &&
1730 !packet.profile_packet().process_dumps().empty()) {
1731 const auto& dumps = packet.profile_packet().process_dumps();
1732 ASSERT_EQ(dumps.size(), 1u);
1733 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1734 EXPECT_EQ(dump.pid(), pid);
1735 profile_packets++;
1736 for (const auto& sample : dump.samples()) {
1737 samples++;
1738 total_allocated += sample.self_allocated();
1739 }
1740 }
1741 }
1742 EXPECT_EQ(profile_packets, 1u);
1743 EXPECT_GT(samples, 0u);
1744 EXPECT_GT(total_allocated, 0u);
1745 }
1746
1747 // On in-tree Android, we use the system heapprofd in fork or central mode.
1748 // For Linux and out-of-tree Android, we statically include a copy of
1749 // heapprofd and use that. This one does not support intercepting malloc.
1750 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
1751 #if !PERFETTO_BUILDFLAG(PERFETTO_START_DAEMONS)
1752 #error "Need to start daemons for Linux test."
1753 #endif
1754
1755 INSTANTIATE_TEST_CASE_P(DISABLED_Run,
1756 HeapprofdEndToEnd,
1757 Values(std::make_tuple(TestMode::kStatic,
1758 AllocatorMode::kCustom)),
1759 TestSuffix);
1760 #elif !PERFETTO_BUILDFLAG(PERFETTO_START_DAEMONS)
1761 INSTANTIATE_TEST_CASE_P(
1762 Run,
1763 HeapprofdEndToEnd,
1764 Values(std::make_tuple(TestMode::kCentral, AllocatorMode::kMalloc),
1765 std::make_tuple(TestMode::kCentral, AllocatorMode::kCustom)),
1766 TestSuffix);
1767 #endif
1768
1769 } // namespace
1770 } // namespace profiling
1771 } // namespace perfetto
1772