1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "gmock/gmock.h"
18 #include "gtest/gtest.h"
19 #include "perfetto/base/build_config.h"
20 #include "perfetto/base/pipe.h"
21 #include "src/base/test/test_task_runner.h"
22 #include "test/test_helper.h"
23
24 #include "src/profiling/memory/heapprofd_producer.h"
25 #include "src/tracing/ipc/default_socket.h"
26
27 #include <sys/system_properties.h>
28
29 #include <fcntl.h>
30 #include <sys/types.h>
31 #include <sys/wait.h>
32
33 // This test only works when run on Android using an Android Q version of
34 // Bionic.
35 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
36 #error "This test can only be used on Android."
37 #endif
38
39 namespace perfetto {
40 namespace profiling {
41 namespace {
42
43 // If we're building on Android and starting the daemons ourselves,
44 // create the sockets in a world-writable location.
45 #if PERFETTO_BUILDFLAG(PERFETTO_START_DAEMONS)
46 constexpr const char* kTestProducerSockName "/data/local/tmp/traced_producer";
47 #endif
48
49 constexpr useconds_t kMsToUs = 1000;
50
51 using ::testing::AnyOf;
52 using ::testing::Eq;
53
54 class HeapprofdDelegate : public ThreadDelegate {
55 public:
HeapprofdDelegate(const std::string & producer_socket)56 HeapprofdDelegate(const std::string& producer_socket)
57 : producer_socket_(producer_socket) {}
58 ~HeapprofdDelegate() override = default;
59
Initialize(base::TaskRunner * task_runner)60 void Initialize(base::TaskRunner* task_runner) override {
61 producer_.reset(
62 new HeapprofdProducer(HeapprofdMode::kCentral, task_runner));
63 producer_->ConnectWithRetries(producer_socket_.c_str());
64 }
65
66 private:
67 std::string producer_socket_;
68 std::unique_ptr<HeapprofdProducer> producer_;
69 };
70
71 constexpr const char* kEnableHeapprofdProperty = "persist.heapprofd.enable";
72 constexpr const char* kHeapprofdModeProperty = "heapprofd.userdebug.mode";
73
ReadProperty(const std::string & name,std::string def)74 std::string ReadProperty(const std::string& name, std::string def) {
75 const prop_info* pi = __system_property_find(name.c_str());
76 if (pi) {
77 __system_property_read_callback(
78 pi,
79 [](void* cookie, const char*, const char* value, uint32_t) {
80 *reinterpret_cast<std::string*>(cookie) = value;
81 },
82 &def);
83 }
84 return def;
85 }
86
SetModeProperty(std::string * value)87 int __attribute__((unused)) SetModeProperty(std::string* value) {
88 if (value) {
89 __system_property_set(kHeapprofdModeProperty, value->c_str());
90 delete value;
91 }
92 return 0;
93 }
94
EnableFork()95 base::ScopedResource<std::string*, SetModeProperty, nullptr> EnableFork() {
96 std::string prev_property_value = ReadProperty(kHeapprofdModeProperty, "");
97 __system_property_set(kHeapprofdModeProperty, "fork");
98 return base::ScopedResource<std::string*, SetModeProperty, nullptr>(
99 new std::string(prev_property_value));
100 }
101
DisableFork()102 base::ScopedResource<std::string*, SetModeProperty, nullptr> DisableFork() {
103 std::string prev_property_value = ReadProperty(kHeapprofdModeProperty, "");
104 __system_property_set(kHeapprofdModeProperty, "");
105 return base::ScopedResource<std::string*, SetModeProperty, nullptr>(
106 new std::string(prev_property_value));
107 }
108
SetEnableProperty(std::string * value)109 int __attribute__((unused)) SetEnableProperty(std::string* value) {
110 if (value) {
111 __system_property_set(kEnableHeapprofdProperty, value->c_str());
112 delete value;
113 }
114 return 0;
115 }
116
117 constexpr size_t kStartupAllocSize = 10;
118
AllocateAndFree(size_t bytes)119 void AllocateAndFree(size_t bytes) {
120 // This volatile is needed to prevent the compiler from trying to be
121 // helpful and compiling a "useless" malloc + free into a noop.
122 volatile char* x = static_cast<char*>(malloc(bytes));
123 if (x) {
124 x[1] = 'x';
125 free(const_cast<char*>(x));
126 }
127 }
128
ContinuousMalloc(size_t bytes)129 void __attribute__((noreturn)) ContinuousMalloc(size_t bytes) {
130 for (;;) {
131 AllocateAndFree(bytes);
132 usleep(10 * kMsToUs);
133 }
134 }
135
ForkContinuousMalloc(size_t bytes)136 pid_t ForkContinuousMalloc(size_t bytes) {
137 // Make sure forked process does not get reparented to init.
138 setsid();
139 pid_t pid = fork();
140 switch (pid) {
141 case -1:
142 PERFETTO_FATAL("Failed to fork.");
143 case 0:
144 ContinuousMalloc(bytes);
145 default:
146 break;
147 }
148 return pid;
149 }
150
RunContinuousMalloc()151 void __attribute__((constructor)) RunContinuousMalloc() {
152 if (getenv("HEAPPROFD_TESTING_RUN_MALLOC") != nullptr)
153 ContinuousMalloc(kStartupAllocSize);
154 }
155
GetHelper(base::TestTaskRunner * task_runner)156 std::unique_ptr<TestHelper> GetHelper(base::TestTaskRunner* task_runner) {
157 std::unique_ptr<TestHelper> helper(new TestHelper(task_runner));
158 helper->ConnectConsumer();
159 helper->WaitForConsumerConnect();
160 return helper;
161 }
162
FormatHistogram(const protos::ProfilePacket_Histogram & hist)163 std::string FormatHistogram(const protos::ProfilePacket_Histogram& hist) {
164 std::string out;
165 std::string prev_upper_limit = "-inf";
166 for (const auto& bucket : hist.buckets()) {
167 std::string upper_limit;
168 if (bucket.max_bucket())
169 upper_limit = "inf";
170 else
171 upper_limit = std::to_string(bucket.upper_limit());
172
173 out += "[" + prev_upper_limit + ", " + upper_limit +
174 "]: " + std::to_string(bucket.count()) + "; ";
175 prev_upper_limit = std::move(upper_limit);
176 }
177 return out + "\n";
178 }
179
FormatStats(const protos::ProfilePacket_ProcessStats & stats)180 std::string FormatStats(const protos::ProfilePacket_ProcessStats& stats) {
181 return std::string("unwinding_errors: ") +
182 std::to_string(stats.unwinding_errors()) + "\n" +
183 "heap_samples: " + std::to_string(stats.heap_samples()) + "\n" +
184 "map_reparses: " + std::to_string(stats.map_reparses()) + "\n" +
185 "unwinding_time_us: " + FormatHistogram(stats.unwinding_time_us());
186 }
187
188 class HeapprofdEndToEnd : public ::testing::Test {
189 public:
HeapprofdEndToEnd()190 HeapprofdEndToEnd() {
191 // This is not needed for correctness, but works around a init behavior that
192 // makes this test take much longer. If persist.heapprofd.enable is set to 0
193 // and then set to 1 again too quickly, init decides that the service is
194 // "restarting" and waits before restarting it.
195 usleep(50000);
196 }
197
198 protected:
199 base::TestTaskRunner task_runner;
200
Trace(const TraceConfig & trace_config)201 std::unique_ptr<TestHelper> Trace(const TraceConfig& trace_config) {
202 auto helper = GetHelper(&task_runner);
203
204 helper->StartTracing(trace_config);
205 helper->WaitForTracingDisabled(20000);
206
207 helper->ReadData();
208 helper->WaitForReadData();
209 return helper;
210 }
211
PrintStats(TestHelper * helper)212 void PrintStats(TestHelper* helper) {
213 const auto& packets = helper->trace();
214 for (const protos::TracePacket& packet : packets) {
215 for (const auto& dump : packet.profile_packet().process_dumps()) {
216 // protobuf uint64 does not like the PRIu64 formatter.
217 PERFETTO_LOG("Stats for %s: %s", std::to_string(dump.pid()).c_str(),
218 FormatStats(dump.stats()).c_str());
219 }
220 }
221 }
222
ValidateSampleSizes(TestHelper * helper,uint64_t pid,uint64_t alloc_size)223 void ValidateSampleSizes(TestHelper* helper,
224 uint64_t pid,
225 uint64_t alloc_size) {
226 const auto& packets = helper->trace();
227 for (const protos::TracePacket& packet : packets) {
228 for (const auto& dump : packet.profile_packet().process_dumps()) {
229 if (dump.pid() != pid)
230 continue;
231 for (const auto& sample : dump.samples()) {
232 EXPECT_EQ(sample.self_allocated() % alloc_size, 0);
233 EXPECT_EQ(sample.self_freed() % alloc_size, 0);
234 EXPECT_THAT(sample.self_allocated() - sample.self_freed(),
235 AnyOf(Eq(0), Eq(alloc_size)));
236 }
237 }
238 }
239 }
240
ValidateFromStartup(TestHelper * helper,uint64_t pid,bool from_startup)241 void ValidateFromStartup(TestHelper* helper,
242 uint64_t pid,
243 bool from_startup) {
244 const auto& packets = helper->trace();
245 for (const protos::TracePacket& packet : packets) {
246 for (const auto& dump : packet.profile_packet().process_dumps()) {
247 if (dump.pid() != pid)
248 continue;
249 EXPECT_EQ(dump.from_startup(), from_startup);
250 }
251 }
252 }
253
ValidateRejectedConcurrent(TestHelper * helper,uint64_t pid,bool rejected_concurrent)254 void ValidateRejectedConcurrent(TestHelper* helper,
255 uint64_t pid,
256 bool rejected_concurrent) {
257 const auto& packets = helper->trace();
258 for (const protos::TracePacket& packet : packets) {
259 for (const auto& dump : packet.profile_packet().process_dumps()) {
260 if (dump.pid() != pid)
261 continue;
262 EXPECT_EQ(dump.rejected_concurrent(), rejected_concurrent);
263 }
264 }
265 }
266
ValidateHasSamples(TestHelper * helper,uint64_t pid)267 void ValidateHasSamples(TestHelper* helper, uint64_t pid) {
268 const auto& packets = helper->trace();
269 ASSERT_GT(packets.size(), 0u);
270 size_t profile_packets = 0;
271 size_t samples = 0;
272 uint64_t last_allocated = 0;
273 uint64_t last_freed = 0;
274 for (const protos::TracePacket& packet : packets) {
275 for (const auto& dump : packet.profile_packet().process_dumps()) {
276 if (dump.pid() != pid)
277 continue;
278 for (const auto& sample : dump.samples()) {
279 last_allocated = sample.self_allocated();
280 last_freed = sample.self_freed();
281 samples++;
282 }
283 profile_packets++;
284 }
285 }
286 EXPECT_GT(profile_packets, 0);
287 EXPECT_GT(samples, 0);
288 EXPECT_GT(last_allocated, 0);
289 EXPECT_GT(last_freed, 0);
290 }
291
ValidateOnlyPID(TestHelper * helper,uint64_t pid)292 void ValidateOnlyPID(TestHelper* helper, uint64_t pid) {
293 size_t dumps = 0;
294 const auto& packets = helper->trace();
295 for (const protos::TracePacket& packet : packets) {
296 for (const auto& dump : packet.profile_packet().process_dumps()) {
297 EXPECT_EQ(dump.pid(), pid);
298 dumps++;
299 }
300 }
301 EXPECT_GT(dumps, 0);
302 }
303
304 #if PERFETTO_BUILDFLAG(PERFETTO_START_DAEMONS)
305 TaskRunnerThread producer_thread("perfetto.prd");
306 producer_thread.Start(std::unique_ptr<HeapprofdDelegate>(
307 new HeapprofdDelegate(kTestProducerSockName)));
308 #endif
309
Smoke()310 void Smoke() {
311 constexpr size_t kAllocSize = 1024;
312
313 pid_t pid = ForkContinuousMalloc(kAllocSize);
314
315 TraceConfig trace_config;
316 trace_config.add_buffers()->set_size_kb(10 * 1024);
317 trace_config.set_duration_ms(2000);
318 trace_config.set_flush_timeout_ms(10000);
319
320 auto* ds_config = trace_config.add_data_sources()->mutable_config();
321 ds_config->set_name("android.heapprofd");
322 ds_config->set_target_buffer(0);
323
324 auto* heapprofd_config = ds_config->mutable_heapprofd_config();
325 heapprofd_config->set_sampling_interval_bytes(1);
326 *heapprofd_config->add_pid() = static_cast<uint64_t>(pid);
327 heapprofd_config->set_all(false);
328 heapprofd_config->mutable_continuous_dump_config()->set_dump_phase_ms(0);
329 heapprofd_config->mutable_continuous_dump_config()->set_dump_interval_ms(
330 100);
331
332 auto helper = Trace(trace_config);
333 PrintStats(helper.get());
334 ValidateHasSamples(helper.get(), static_cast<uint64_t>(pid));
335 ValidateOnlyPID(helper.get(), static_cast<uint64_t>(pid));
336 ValidateSampleSizes(helper.get(), static_cast<uint64_t>(pid), kAllocSize);
337
338 PERFETTO_CHECK(kill(pid, SIGKILL) == 0);
339 PERFETTO_CHECK(PERFETTO_EINTR(waitpid(pid, nullptr, 0)) == pid);
340 }
341
TwoProcesses()342 void TwoProcesses() {
343 constexpr size_t kAllocSize = 1024;
344 constexpr size_t kAllocSize2 = 7;
345
346 pid_t pid = ForkContinuousMalloc(kAllocSize);
347 pid_t pid2 = ForkContinuousMalloc(kAllocSize2);
348
349 TraceConfig trace_config;
350 trace_config.add_buffers()->set_size_kb(10 * 1024);
351 trace_config.set_duration_ms(2000);
352 trace_config.set_flush_timeout_ms(10000);
353
354 auto* ds_config = trace_config.add_data_sources()->mutable_config();
355 ds_config->set_name("android.heapprofd");
356 ds_config->set_target_buffer(0);
357
358 auto* heapprofd_config = ds_config->mutable_heapprofd_config();
359 heapprofd_config->set_sampling_interval_bytes(1);
360 *heapprofd_config->add_pid() = static_cast<uint64_t>(pid);
361 *heapprofd_config->add_pid() = static_cast<uint64_t>(pid2);
362 heapprofd_config->set_all(false);
363
364 auto helper = Trace(trace_config);
365 PrintStats(helper.get());
366 ValidateHasSamples(helper.get(), static_cast<uint64_t>(pid));
367 ValidateSampleSizes(helper.get(), static_cast<uint64_t>(pid), kAllocSize);
368 ValidateHasSamples(helper.get(), static_cast<uint64_t>(pid2));
369 ValidateSampleSizes(helper.get(), static_cast<uint64_t>(pid2), kAllocSize2);
370
371 PERFETTO_CHECK(kill(pid, SIGKILL) == 0);
372 PERFETTO_CHECK(PERFETTO_EINTR(waitpid(pid, nullptr, 0)) == pid);
373 PERFETTO_CHECK(kill(pid2, SIGKILL) == 0);
374 PERFETTO_CHECK(PERFETTO_EINTR(waitpid(pid2, nullptr, 0)) == pid2);
375 }
376
FinalFlush()377 void FinalFlush() {
378 constexpr size_t kAllocSize = 1024;
379
380 pid_t pid = ForkContinuousMalloc(kAllocSize);
381
382 TraceConfig trace_config;
383 trace_config.add_buffers()->set_size_kb(10 * 1024);
384 trace_config.set_duration_ms(2000);
385 trace_config.set_flush_timeout_ms(10000);
386
387 auto* ds_config = trace_config.add_data_sources()->mutable_config();
388 ds_config->set_name("android.heapprofd");
389 ds_config->set_target_buffer(0);
390
391 auto* heapprofd_config = ds_config->mutable_heapprofd_config();
392 heapprofd_config->set_sampling_interval_bytes(1);
393 *heapprofd_config->add_pid() = static_cast<uint64_t>(pid);
394 heapprofd_config->set_all(false);
395
396 auto helper = Trace(trace_config);
397 PrintStats(helper.get());
398 ValidateHasSamples(helper.get(), static_cast<uint64_t>(pid));
399 ValidateOnlyPID(helper.get(), static_cast<uint64_t>(pid));
400 ValidateSampleSizes(helper.get(), static_cast<uint64_t>(pid), kAllocSize);
401
402 PERFETTO_CHECK(kill(pid, SIGKILL) == 0);
403 PERFETTO_CHECK(PERFETTO_EINTR(waitpid(pid, nullptr, 0)) == pid);
404 }
405
NativeStartup()406 void NativeStartup() {
407 auto helper = GetHelper(&task_runner);
408
409 TraceConfig trace_config;
410 trace_config.add_buffers()->set_size_kb(10 * 1024);
411 trace_config.set_duration_ms(5000);
412 trace_config.set_flush_timeout_ms(10000);
413
414 auto* ds_config = trace_config.add_data_sources()->mutable_config();
415 ds_config->set_name("android.heapprofd");
416
417 auto* heapprofd_config = ds_config->mutable_heapprofd_config();
418 heapprofd_config->set_sampling_interval_bytes(1);
419 *heapprofd_config->add_process_cmdline() = "heapprofd_continuous_malloc";
420 heapprofd_config->set_all(false);
421
422 helper->StartTracing(trace_config);
423
424 // Wait to guarantee that the process forked below is hooked by the profiler
425 // by virtue of the startup check, and not by virtue of being seen as a
426 // running process. This sleep is here to prevent that, accidentally, the
427 // test gets to the fork()+exec() too soon, before the heap profiling daemon
428 // has received the trace config.
429 sleep(1);
430
431 // Make sure the forked process does not get reparented to init.
432 setsid();
433 pid_t pid = fork();
434 switch (pid) {
435 case -1:
436 PERFETTO_FATAL("Failed to fork.");
437 case 0: {
438 const char* envp[] = {"HEAPPROFD_TESTING_RUN_MALLOC=1", nullptr};
439 int null = open("/dev/null", O_RDWR);
440 dup2(null, STDIN_FILENO);
441 dup2(null, STDOUT_FILENO);
442 dup2(null, STDERR_FILENO);
443 PERFETTO_CHECK(execle("/proc/self/exe", "heapprofd_continuous_malloc",
444 nullptr, envp) == 0);
445 break;
446 }
447 default:
448 break;
449 }
450
451 helper->WaitForTracingDisabled(20000);
452
453 helper->ReadData();
454 helper->WaitForReadData();
455
456 PERFETTO_CHECK(kill(pid, SIGKILL) == 0);
457 PERFETTO_CHECK(PERFETTO_EINTR(waitpid(pid, nullptr, 0)) == pid);
458
459 const auto& packets = helper->trace();
460 ASSERT_GT(packets.size(), 0u);
461 size_t profile_packets = 0;
462 size_t samples = 0;
463 uint64_t total_allocated = 0;
464 uint64_t total_freed = 0;
465 for (const protos::TracePacket& packet : packets) {
466 if (packet.has_profile_packet() &&
467 packet.profile_packet().process_dumps().size() > 0) {
468 const auto& dumps = packet.profile_packet().process_dumps();
469 ASSERT_EQ(dumps.size(), 1);
470 const protos::ProfilePacket_ProcessHeapSamples& dump = dumps.Get(0);
471 EXPECT_EQ(dump.pid(), pid);
472 profile_packets++;
473 for (const auto& sample : dump.samples()) {
474 samples++;
475 total_allocated += sample.self_allocated();
476 total_freed += sample.self_freed();
477 }
478 }
479 }
480 EXPECT_EQ(profile_packets, 1);
481 EXPECT_GT(samples, 0);
482 EXPECT_GT(total_allocated, 0);
483 EXPECT_GT(total_freed, 0);
484 }
485
NativeStartupDenormalizedCmdline()486 void NativeStartupDenormalizedCmdline() {
487 auto helper = GetHelper(&task_runner);
488
489 TraceConfig trace_config;
490 trace_config.add_buffers()->set_size_kb(10 * 1024);
491 trace_config.set_duration_ms(5000);
492 trace_config.set_flush_timeout_ms(10000);
493
494 auto* ds_config = trace_config.add_data_sources()->mutable_config();
495 ds_config->set_name("android.heapprofd");
496
497 auto* heapprofd_config = ds_config->mutable_heapprofd_config();
498 heapprofd_config->set_sampling_interval_bytes(1);
499 *heapprofd_config->add_process_cmdline() =
500 "heapprofd_continuous_malloc@something";
501 heapprofd_config->set_all(false);
502
503 helper->StartTracing(trace_config);
504
505 // Wait to guarantee that the process forked below is hooked by the profiler
506 // by virtue of the startup check, and not by virtue of being seen as a
507 // running process. This sleep is here to prevent that, accidentally, the
508 // test gets to the fork()+exec() too soon, before the heap profiling daemon
509 // has received the trace config.
510 sleep(1);
511
512 // Make sure the forked process does not get reparented to init.
513 setsid();
514 pid_t pid = fork();
515 switch (pid) {
516 case -1:
517 PERFETTO_FATAL("Failed to fork.");
518 case 0: {
519 const char* envp[] = {"HEAPPROFD_TESTING_RUN_MALLOC=1", nullptr};
520 int null = open("/dev/null", O_RDWR);
521 dup2(null, STDIN_FILENO);
522 dup2(null, STDOUT_FILENO);
523 dup2(null, STDERR_FILENO);
524 PERFETTO_CHECK(execle("/proc/self/exe", "heapprofd_continuous_malloc",
525 nullptr, envp) == 0);
526 break;
527 }
528 default:
529 break;
530 }
531
532 helper->WaitForTracingDisabled(20000);
533
534 helper->ReadData();
535 helper->WaitForReadData();
536
537 PERFETTO_CHECK(kill(pid, SIGKILL) == 0);
538 PERFETTO_CHECK(PERFETTO_EINTR(waitpid(pid, nullptr, 0)) == pid);
539
540 const auto& packets = helper->trace();
541 ASSERT_GT(packets.size(), 0u);
542 size_t profile_packets = 0;
543 size_t samples = 0;
544 uint64_t total_allocated = 0;
545 uint64_t total_freed = 0;
546 for (const protos::TracePacket& packet : packets) {
547 if (packet.has_profile_packet() &&
548 packet.profile_packet().process_dumps().size() > 0) {
549 const auto& dumps = packet.profile_packet().process_dumps();
550 ASSERT_EQ(dumps.size(), 1);
551 const protos::ProfilePacket_ProcessHeapSamples& dump = dumps.Get(0);
552 EXPECT_EQ(dump.pid(), pid);
553 profile_packets++;
554 for (const auto& sample : dump.samples()) {
555 samples++;
556 total_allocated += sample.self_allocated();
557 total_freed += sample.self_freed();
558 }
559 }
560 }
561 EXPECT_EQ(profile_packets, 1);
562 EXPECT_GT(samples, 0);
563 EXPECT_GT(total_allocated, 0);
564 EXPECT_GT(total_freed, 0);
565 }
566
DiscoverByName()567 void DiscoverByName() {
568 auto helper = GetHelper(&task_runner);
569
570 TraceConfig trace_config;
571 trace_config.add_buffers()->set_size_kb(10 * 1024);
572 trace_config.set_duration_ms(5000);
573 trace_config.set_flush_timeout_ms(10000);
574
575 auto* ds_config = trace_config.add_data_sources()->mutable_config();
576 ds_config->set_name("android.heapprofd");
577
578 auto* heapprofd_config = ds_config->mutable_heapprofd_config();
579 heapprofd_config->set_sampling_interval_bytes(1);
580 *heapprofd_config->add_process_cmdline() = "heapprofd_continuous_malloc";
581 heapprofd_config->set_all(false);
582
583 // Make sure the forked process does not get reparented to init.
584 setsid();
585 pid_t pid = fork();
586 switch (pid) {
587 case -1:
588 PERFETTO_FATAL("Failed to fork.");
589 case 0: {
590 const char* envp[] = {"HEAPPROFD_TESTING_RUN_MALLOC=1", nullptr};
591 int null = open("/dev/null", O_RDWR);
592 dup2(null, STDIN_FILENO);
593 dup2(null, STDOUT_FILENO);
594 dup2(null, STDERR_FILENO);
595 PERFETTO_CHECK(execle("/proc/self/exe", "heapprofd_continuous_malloc",
596 nullptr, envp) == 0);
597 break;
598 }
599 default:
600 break;
601 }
602
603 // Wait to make sure process is fully initialized, so we do not accidentally
604 // match it by the startup logic.
605 sleep(1);
606
607 helper->StartTracing(trace_config);
608 helper->WaitForTracingDisabled(20000);
609
610 helper->ReadData();
611 helper->WaitForReadData();
612
613 PERFETTO_CHECK(kill(pid, SIGKILL) == 0);
614 PERFETTO_CHECK(PERFETTO_EINTR(waitpid(pid, nullptr, 0)) == pid);
615
616 const auto& packets = helper->trace();
617 ASSERT_GT(packets.size(), 0u);
618 size_t profile_packets = 0;
619 size_t samples = 0;
620 uint64_t total_allocated = 0;
621 uint64_t total_freed = 0;
622 for (const protos::TracePacket& packet : packets) {
623 if (packet.has_profile_packet() &&
624 packet.profile_packet().process_dumps().size() > 0) {
625 const auto& dumps = packet.profile_packet().process_dumps();
626 ASSERT_EQ(dumps.size(), 1);
627 const protos::ProfilePacket_ProcessHeapSamples& dump = dumps.Get(0);
628 EXPECT_EQ(dump.pid(), pid);
629 profile_packets++;
630 for (const auto& sample : dump.samples()) {
631 samples++;
632 total_allocated += sample.self_allocated();
633 total_freed += sample.self_freed();
634 }
635 }
636 }
637 EXPECT_EQ(profile_packets, 1);
638 EXPECT_GT(samples, 0);
639 EXPECT_GT(total_allocated, 0);
640 EXPECT_GT(total_freed, 0);
641 }
642
DiscoverByNameDenormalizedCmdline()643 void DiscoverByNameDenormalizedCmdline() {
644 auto helper = GetHelper(&task_runner);
645
646 TraceConfig trace_config;
647 trace_config.add_buffers()->set_size_kb(10 * 1024);
648 trace_config.set_duration_ms(5000);
649 trace_config.set_flush_timeout_ms(10000);
650
651 auto* ds_config = trace_config.add_data_sources()->mutable_config();
652 ds_config->set_name("android.heapprofd");
653
654 auto* heapprofd_config = ds_config->mutable_heapprofd_config();
655 heapprofd_config->set_sampling_interval_bytes(1);
656 *heapprofd_config->add_process_cmdline() =
657 "heapprofd_continuous_malloc@something";
658 heapprofd_config->set_all(false);
659
660 // Make sure the forked process does not get reparented to init.
661 setsid();
662 pid_t pid = fork();
663 switch (pid) {
664 case -1:
665 PERFETTO_FATAL("Failed to fork.");
666 case 0: {
667 const char* envp[] = {"HEAPPROFD_TESTING_RUN_MALLOC=1", nullptr};
668 int null = open("/dev/null", O_RDWR);
669 dup2(null, STDIN_FILENO);
670 dup2(null, STDOUT_FILENO);
671 dup2(null, STDERR_FILENO);
672 PERFETTO_CHECK(execle("/proc/self/exe", "heapprofd_continuous_malloc",
673 nullptr, envp) == 0);
674 break;
675 }
676 default:
677 break;
678 }
679
680 // Wait to make sure process is fully initialized, so we do not accidentally
681 // match it by the startup logic.
682 sleep(1);
683
684 helper->StartTracing(trace_config);
685 helper->WaitForTracingDisabled(20000);
686
687 helper->ReadData();
688 helper->WaitForReadData();
689
690 PERFETTO_CHECK(kill(pid, SIGKILL) == 0);
691 PERFETTO_CHECK(PERFETTO_EINTR(waitpid(pid, nullptr, 0)) == pid);
692
693 const auto& packets = helper->trace();
694 ASSERT_GT(packets.size(), 0u);
695 size_t profile_packets = 0;
696 size_t samples = 0;
697 uint64_t total_allocated = 0;
698 uint64_t total_freed = 0;
699 for (const protos::TracePacket& packet : packets) {
700 if (packet.has_profile_packet() &&
701 packet.profile_packet().process_dumps().size() > 0) {
702 const auto& dumps = packet.profile_packet().process_dumps();
703 ASSERT_EQ(dumps.size(), 1);
704 const protos::ProfilePacket_ProcessHeapSamples& dump = dumps.Get(0);
705 EXPECT_EQ(dump.pid(), pid);
706 profile_packets++;
707 for (const auto& sample : dump.samples()) {
708 samples++;
709 total_allocated += sample.self_allocated();
710 total_freed += sample.self_freed();
711 }
712 }
713 }
714 EXPECT_EQ(profile_packets, 1);
715 EXPECT_GT(samples, 0);
716 EXPECT_GT(total_allocated, 0);
717 EXPECT_GT(total_freed, 0);
718 }
719
ReInit()720 void ReInit() {
721 constexpr uint64_t kFirstIterationBytes = 5;
722 constexpr uint64_t kSecondIterationBytes = 7;
723
724 base::Pipe signal_pipe = base::Pipe::Create(base::Pipe::kBothNonBlock);
725 base::Pipe ack_pipe = base::Pipe::Create(base::Pipe::kBothBlock);
726
727 pid_t pid = fork();
728 switch (pid) {
729 case -1:
730 PERFETTO_FATAL("Failed to fork.");
731 case 0: {
732 uint64_t bytes = kFirstIterationBytes;
733 signal_pipe.wr.reset();
734 ack_pipe.rd.reset();
735 for (;;) {
736 AllocateAndFree(bytes);
737 char buf[1];
738 if (bool(signal_pipe.rd) &&
739 read(*signal_pipe.rd, buf, sizeof(buf)) == 0) {
740 // make sure the client has noticed that the session has stopped
741 AllocateAndFree(bytes);
742
743 bytes = kSecondIterationBytes;
744 signal_pipe.rd.reset();
745 ack_pipe.wr.reset();
746 }
747 usleep(10 * kMsToUs);
748 }
749 PERFETTO_FATAL("Should be unreachable");
750 }
751 default:
752 break;
753 }
754
755 signal_pipe.rd.reset();
756 ack_pipe.wr.reset();
757
758 TraceConfig trace_config;
759 trace_config.add_buffers()->set_size_kb(10 * 1024);
760 trace_config.set_duration_ms(2000);
761 trace_config.set_flush_timeout_ms(10000);
762
763 auto* ds_config = trace_config.add_data_sources()->mutable_config();
764 ds_config->set_name("android.heapprofd");
765 ds_config->set_target_buffer(0);
766
767 auto* heapprofd_config = ds_config->mutable_heapprofd_config();
768 heapprofd_config->set_sampling_interval_bytes(1);
769 *heapprofd_config->add_pid() = static_cast<uint64_t>(pid);
770 heapprofd_config->set_all(false);
771
772 auto helper = Trace(trace_config);
773 PrintStats(helper.get());
774 ValidateHasSamples(helper.get(), static_cast<uint64_t>(pid));
775 ValidateOnlyPID(helper.get(), static_cast<uint64_t>(pid));
776 ValidateSampleSizes(helper.get(), static_cast<uint64_t>(pid),
777 kFirstIterationBytes);
778
779 signal_pipe.wr.reset();
780 char buf[1];
781 ASSERT_EQ(read(*ack_pipe.rd, buf, sizeof(buf)), 0);
782 ack_pipe.rd.reset();
783
784 // A brief sleep to allow the client to notice that the profiling session is
785 // to be torn down (as it rejects concurrent sessions).
786 usleep(100 * kMsToUs);
787
788 PERFETTO_LOG("HeapprofdEndToEnd::Reinit: Starting second");
789 helper = Trace(trace_config);
790 PrintStats(helper.get());
791 ValidateHasSamples(helper.get(), static_cast<uint64_t>(pid));
792 ValidateOnlyPID(helper.get(), static_cast<uint64_t>(pid));
793 ValidateSampleSizes(helper.get(), static_cast<uint64_t>(pid),
794 kSecondIterationBytes);
795
796 PERFETTO_CHECK(kill(pid, SIGKILL) == 0);
797 PERFETTO_CHECK(PERFETTO_EINTR(waitpid(pid, nullptr, 0)) == pid);
798 }
799
ConcurrentSession()800 void ConcurrentSession() {
801 constexpr size_t kAllocSize = 1024;
802
803 pid_t pid = ForkContinuousMalloc(kAllocSize);
804
805 TraceConfig trace_config;
806 trace_config.add_buffers()->set_size_kb(10 * 1024);
807 trace_config.set_duration_ms(5000);
808 trace_config.set_flush_timeout_ms(10000);
809
810 auto* ds_config = trace_config.add_data_sources()->mutable_config();
811 ds_config->set_name("android.heapprofd");
812 ds_config->set_target_buffer(0);
813
814 auto* heapprofd_config = ds_config->mutable_heapprofd_config();
815 heapprofd_config->set_sampling_interval_bytes(1);
816 *heapprofd_config->add_pid() = static_cast<uint64_t>(pid);
817 heapprofd_config->set_all(false);
818 heapprofd_config->mutable_continuous_dump_config()->set_dump_phase_ms(0);
819 heapprofd_config->mutable_continuous_dump_config()->set_dump_interval_ms(
820 100);
821
822 auto helper = GetHelper(&task_runner);
823 helper->StartTracing(trace_config);
824 sleep(1);
825 auto helper_concurrent = GetHelper(&task_runner);
826 helper_concurrent->StartTracing(trace_config);
827
828 helper->WaitForTracingDisabled(20000);
829 helper->ReadData();
830 helper->WaitForReadData();
831 PrintStats(helper.get());
832 ValidateHasSamples(helper.get(), static_cast<uint64_t>(pid));
833 ValidateOnlyPID(helper.get(), static_cast<uint64_t>(pid));
834 ValidateSampleSizes(helper.get(), static_cast<uint64_t>(pid), kAllocSize);
835 ValidateRejectedConcurrent(helper_concurrent.get(),
836 static_cast<uint64_t>(pid), false);
837
838 helper_concurrent->WaitForTracingDisabled(20000);
839 helper_concurrent->ReadData();
840 helper_concurrent->WaitForReadData();
841 PrintStats(helper.get());
842 ValidateOnlyPID(helper_concurrent.get(), static_cast<uint64_t>(pid));
843 ValidateRejectedConcurrent(helper_concurrent.get(),
844 static_cast<uint64_t>(pid), true);
845
846 PERFETTO_CHECK(kill(pid, SIGKILL) == 0);
847 PERFETTO_CHECK(PERFETTO_EINTR(waitpid(pid, nullptr, 0)) == pid);
848 }
849
850 // TODO(rsavitski): fold exit status assertions into existing tests where
851 // possible.
NativeProfilingActiveAtProcessExit()852 void NativeProfilingActiveAtProcessExit() {
853 constexpr uint64_t kTestAllocSize = 128;
854 base::Pipe start_pipe = base::Pipe::Create(base::Pipe::kBothBlock);
855
856 pid_t pid = fork();
857 if (pid == 0) { // child
858 start_pipe.rd.reset();
859 start_pipe.wr.reset();
860 for (int i = 0; i < 200; i++) {
861 // malloc and leak, otherwise the free batching will cause us to filter
862 // out the allocations (as we don't see the interleaved frees).
863 volatile char* x = static_cast<char*>(malloc(kTestAllocSize));
864 if (x) {
865 x[0] = 'x';
866 }
867 usleep(10 * kMsToUs);
868 }
869 exit(0);
870 }
871
872 ASSERT_NE(pid, -1) << "Failed to fork.";
873 start_pipe.wr.reset();
874
875 // Construct tracing config (without starting profiling).
876 auto helper = GetHelper(&task_runner);
877 TraceConfig trace_config;
878 trace_config.add_buffers()->set_size_kb(10 * 1024);
879 trace_config.set_duration_ms(5000);
880 trace_config.set_flush_timeout_ms(10000);
881
882 auto* ds_config = trace_config.add_data_sources()->mutable_config();
883 ds_config->set_name("android.heapprofd");
884
885 auto* heapprofd_config = ds_config->mutable_heapprofd_config();
886 heapprofd_config->set_sampling_interval_bytes(1);
887 *heapprofd_config->add_pid() = static_cast<uint64_t>(pid);
888
889 // Wait for child to have been scheduled at least once.
890 char buf[1] = {};
891 ASSERT_EQ(PERFETTO_EINTR(read(*start_pipe.rd, buf, sizeof(buf))), 0);
892 start_pipe.rd.reset();
893
894 // Trace until child exits.
895 helper->StartTracing(trace_config);
896
897 siginfo_t siginfo = {};
898 int wait_ret = PERFETTO_EINTR(
899 waitid(P_PID, static_cast<id_t>(pid), &siginfo, WEXITED));
900 ASSERT_FALSE(wait_ret) << "Failed to waitid.";
901
902 // Assert that the child exited successfully.
903 EXPECT_EQ(siginfo.si_code, CLD_EXITED) << "Child did not exit by itself.";
904 EXPECT_EQ(siginfo.si_status, 0) << "Child's exit status not successful.";
905
906 // Assert that we did profile the process.
907 helper->FlushAndWait(2000);
908 helper->DisableTracing();
909 helper->WaitForTracingDisabled(10000);
910 helper->ReadData();
911 helper->WaitForReadData();
912
913 const auto& packets = helper->trace();
914 ASSERT_GT(packets.size(), 0u);
915 size_t profile_packets = 0;
916 size_t samples = 0;
917 uint64_t total_allocated = 0;
918 for (const protos::TracePacket& packet : packets) {
919 if (packet.has_profile_packet() &&
920 packet.profile_packet().process_dumps().size() > 0) {
921 const auto& dumps = packet.profile_packet().process_dumps();
922 ASSERT_EQ(dumps.size(), 1);
923 const protos::ProfilePacket_ProcessHeapSamples& dump = dumps.Get(0);
924 EXPECT_EQ(dump.pid(), pid);
925 profile_packets++;
926 for (const auto& sample : dump.samples()) {
927 samples++;
928 total_allocated += sample.self_allocated();
929 }
930 }
931 }
932 EXPECT_EQ(profile_packets, 1);
933 EXPECT_GT(samples, 0);
934 EXPECT_GT(total_allocated, 0);
935 }
936 };
937
938 // TODO(b/118428762): look into unwinding issues on x86.
939 #if defined(__i386__) || defined(__x86_64__)
940 #define MAYBE_SKIP(x) DISABLED_##x
941 #else
942 #define MAYBE_SKIP(x) x
943 #endif
944
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (Smoke_Central))945 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(Smoke_Central)) {
946 auto prop = DisableFork();
947 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "");
948 Smoke();
949 }
950
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (TwoProcesses_Fork))951 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(TwoProcesses_Fork)) {
952 // RAII handle that resets to central mode when out of scope.
953 auto prop = EnableFork();
954 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "fork");
955 TwoProcesses();
956 }
957
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (TwoProcesses_Central))958 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(TwoProcesses_Central)) {
959 auto prop = DisableFork();
960 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "");
961 TwoProcesses();
962 }
963
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (Smoke_Fork))964 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(Smoke_Fork)) {
965 // RAII handle that resets to central mode when out of scope.
966 auto prop = EnableFork();
967 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "fork");
968 Smoke();
969 }
970
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (FinalFlush_Central))971 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(FinalFlush_Central)) {
972 auto prop = DisableFork();
973 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "");
974 FinalFlush();
975 }
976
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (FinalFlush_Fork))977 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(FinalFlush_Fork)) {
978 // RAII handle that resets to central mode when out of scope.
979 auto prop = EnableFork();
980 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "fork");
981 FinalFlush();
982 }
983
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (NativeStartup_Central))984 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(NativeStartup_Central)) {
985 auto prop = DisableFork();
986 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "");
987 NativeStartup();
988 }
989
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (NativeStartup_Fork))990 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(NativeStartup_Fork)) {
991 // RAII handle that resets to central mode when out of scope.
992 auto prop = EnableFork();
993 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "fork");
994 NativeStartup();
995 }
996
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (NativeStartupDenormalizedCmdline_Central))997 TEST_F(HeapprofdEndToEnd,
998 MAYBE_SKIP(NativeStartupDenormalizedCmdline_Central)) {
999 auto prop = DisableFork();
1000 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "");
1001 NativeStartupDenormalizedCmdline();
1002 }
1003
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (NativeStartupDenormalizedCmdline_Fork))1004 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(NativeStartupDenormalizedCmdline_Fork)) {
1005 // RAII handle that resets to central mode when out of scope.
1006 auto prop = EnableFork();
1007 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "fork");
1008 NativeStartupDenormalizedCmdline();
1009 }
1010
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (DiscoverByName_Central))1011 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(DiscoverByName_Central)) {
1012 auto prop = DisableFork();
1013 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "");
1014 DiscoverByName();
1015 }
1016
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (DiscoverByName_Fork))1017 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(DiscoverByName_Fork)) {
1018 // RAII handle that resets to central mode when out of scope.
1019 auto prop = EnableFork();
1020 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "fork");
1021 DiscoverByName();
1022 }
1023
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (DiscoverByNameDenormalizedCmdline_Central))1024 TEST_F(HeapprofdEndToEnd,
1025 MAYBE_SKIP(DiscoverByNameDenormalizedCmdline_Central)) {
1026 auto prop = DisableFork();
1027 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "");
1028 DiscoverByNameDenormalizedCmdline();
1029 }
1030
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (DiscoverByNameDenormalizedCmdline_Fork))1031 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(DiscoverByNameDenormalizedCmdline_Fork)) {
1032 auto prop = DisableFork();
1033 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "");
1034 DiscoverByNameDenormalizedCmdline();
1035 }
1036
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (ReInit_Central))1037 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(ReInit_Central)) {
1038 auto prop = DisableFork();
1039 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "");
1040 ReInit();
1041 }
1042
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (ReInit_Fork))1043 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(ReInit_Fork)) {
1044 // RAII handle that resets to central mode when out of scope.
1045 auto prop = EnableFork();
1046 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "fork");
1047 ReInit();
1048 }
1049
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (ConcurrentSession_Central))1050 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(ConcurrentSession_Central)) {
1051 auto prop = DisableFork();
1052 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "");
1053 ConcurrentSession();
1054 }
1055
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (ConcurrentSession_Fork))1056 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(ConcurrentSession_Fork)) {
1057 // RAII handle that resets to central mode when out of scope.
1058 auto prop = EnableFork();
1059 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "fork");
1060 ConcurrentSession();
1061 }
1062
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (NativeProfilingActiveAtProcessExit_Central))1063 TEST_F(HeapprofdEndToEnd,
1064 MAYBE_SKIP(NativeProfilingActiveAtProcessExit_Central)) {
1065 auto prop = DisableFork();
1066 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "");
1067 NativeProfilingActiveAtProcessExit();
1068 }
1069
TEST_F(HeapprofdEndToEnd,MAYBE_SKIP (NativeProfilingActiveAtProcessExit_Fork))1070 TEST_F(HeapprofdEndToEnd, MAYBE_SKIP(NativeProfilingActiveAtProcessExit_Fork)) {
1071 // RAII handle that resets to central mode when out of scope.
1072 auto prop = EnableFork();
1073 ASSERT_EQ(ReadProperty(kHeapprofdModeProperty, ""), "fork");
1074 NativeProfilingActiveAtProcessExit();
1075 }
1076
1077 } // namespace
1078 } // namespace profiling
1079 } // namespace perfetto
1080