1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <atomic>
18 #include <string>
19 #include <vector>
20
21 #include <fcntl.h>
22 #include <stdint.h>
23 #include <string.h>
24 #include <sys/stat.h>
25 #include <sys/types.h>
26 #include <sys/wait.h>
27 #include <unistd.h>
28 #include <optional>
29
30 #include "perfetto/base/build_config.h"
31 #include "perfetto/base/logging.h"
32 #include "perfetto/ext/base/file_utils.h"
33 #include "perfetto/ext/base/pipe.h"
34 #include "perfetto/ext/base/string_utils.h"
35 #include "perfetto/ext/base/subprocess.h"
36 #include "perfetto/heap_profile.h"
37 #include "perfetto/trace_processor/trace_processor.h"
38 #include "perfetto/tracing/default_socket.h"
39 #include "protos/perfetto/trace/trace.gen.h"
40 #include "protos/perfetto/trace/trace.pbzero.h"
41 #include "src/base/test/test_task_runner.h"
42 #include "src/profiling/memory/heapprofd_producer.h"
43 #include "test/gtest_and_gmock.h"
44 #include "test/test_helper.h"
45
46 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
47 #include <sys/system_properties.h>
48 #endif
49
50 #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
51 #include "protos/perfetto/trace/interned_data/interned_data.gen.h"
52 #include "protos/perfetto/trace/profiling/profile_common.gen.h"
53 #include "protos/perfetto/trace/profiling/profile_packet.gen.h"
54
55 namespace perfetto {
56 namespace profiling {
57 namespace {
58
59 constexpr useconds_t kMsToUs = 1000;
60
61 constexpr auto kTracingDisabledTimeoutMs = 30000;
62 constexpr auto kWaitForReadDataTimeoutMs = 10000;
63 constexpr size_t kStartupAllocSize = 10;
64 constexpr size_t kFirstIterationBytes = 5;
65 constexpr size_t kSecondIterationBytes = 7;
66
67 enum class TestMode { kCentral, kStatic };
68 enum class AllocatorMode { kMalloc, kCustom };
69
70 using ::testing::AllOf;
71 using ::testing::AnyOf;
72 using ::testing::Bool;
73 using ::testing::Contains;
74 using ::testing::Eq;
75 using ::testing::Field;
76 using ::testing::HasSubstr;
77 using ::testing::Values;
78
79 constexpr const char* kOnlyFlamegraph = R"(
80 SELECT
81 id,
82 name,
83 map_name,
84 count,
85 cumulative_count,
86 size,
87 cumulative_size,
88 alloc_count,
89 cumulative_alloc_count,
90 alloc_size,
91 cumulative_alloc_size,
92 parent_id
93 FROM (SELECT distinct ts, upid from heap_profile_allocation) hpa
94 JOIN experimental_flamegraph(
95 'native',
96 hpa.ts,
97 NULL,
98 hpa.upid,
99 NULL,
100 NULL
101 )
102 order by abs(cumulative_size) desc;
103 )";
104
105 struct FlamegraphNode {
106 int64_t id;
107 std::string name;
108 std::string map_name;
109 int64_t count;
110 int64_t cumulative_count;
111 int64_t size;
112 int64_t cumulative_size;
113 int64_t alloc_count;
114 int64_t cumulative_alloc_count;
115 int64_t alloc_size;
116 int64_t cumulative_alloc_size;
117 std::optional<int64_t> parent_id;
118 };
119
GetFlamegraph(trace_processor::TraceProcessor * tp)120 std::vector<FlamegraphNode> GetFlamegraph(trace_processor::TraceProcessor* tp) {
121 std::vector<FlamegraphNode> result;
122 auto it = tp->ExecuteQuery(kOnlyFlamegraph);
123 while (it.Next()) {
124 result.push_back({
125 it.Get(0).AsLong(),
126 it.Get(1).AsString(),
127 it.Get(2).AsString(),
128 it.Get(3).AsLong(),
129 it.Get(4).AsLong(),
130 it.Get(5).AsLong(),
131 it.Get(6).AsLong(),
132 it.Get(7).AsLong(),
133 it.Get(8).AsLong(),
134 it.Get(9).AsLong(),
135 it.Get(10).AsLong(),
136 it.Get(11).is_null() ? std::nullopt
137 : std::optional<int64_t>(it.Get(11).AsLong()),
138 });
139 }
140 PERFETTO_CHECK(it.Status().ok());
141 return result;
142 }
143
AllocatorName(AllocatorMode mode)144 std::string AllocatorName(AllocatorMode mode) {
145 switch (mode) {
146 case AllocatorMode::kMalloc:
147 return "libc.malloc";
148 case AllocatorMode::kCustom:
149 return "test";
150 }
151 }
152
AllocatorModeFromNameOrDie(std::string s)153 AllocatorMode AllocatorModeFromNameOrDie(std::string s) {
154 if (s == "libc.malloc")
155 return AllocatorMode::kMalloc;
156 if (s == "test")
157 return AllocatorMode::kCustom;
158 PERFETTO_FATAL("Invalid allocator mode [malloc | test]: %s", s.c_str());
159 }
160
ContinuousDump(HeapprofdConfig * cfg)161 void ContinuousDump(HeapprofdConfig* cfg) {
162 auto* cont_config = cfg->mutable_continuous_dump_config();
163 cont_config->set_dump_phase_ms(0);
164 cont_config->set_dump_interval_ms(100);
165 }
166
167 template <typename F>
MakeTraceConfig(F fn)168 TraceConfig MakeTraceConfig(F fn) {
169 TraceConfig trace_config;
170 trace_config.add_buffers()->set_size_kb(10 * 1024);
171 trace_config.set_duration_ms(2000);
172 trace_config.set_data_source_stop_timeout_ms(10000);
173
174 auto* ds_config = trace_config.add_data_sources()->mutable_config();
175 ds_config->set_name("android.heapprofd");
176 ds_config->set_target_buffer(0);
177
178 protos::gen::HeapprofdConfig heapprofd_config;
179 fn(&heapprofd_config);
180 ds_config->set_heapprofd_config_raw(heapprofd_config.SerializeAsString());
181 return trace_config;
182 }
183
CustomAllocateAndFree(size_t bytes)184 void CustomAllocateAndFree(size_t bytes) {
185 static uint32_t heap_id = AHeapProfile_registerHeap(AHeapInfo_create("test"));
186 AHeapProfile_reportAllocation(heap_id, 0x1234abc, bytes);
187 AHeapProfile_reportFree(heap_id, 0x1234abc);
188 }
189
SecondaryAllocAndFree(size_t bytes)190 void SecondaryAllocAndFree(size_t bytes) {
191 static uint32_t heap_id =
192 AHeapProfile_registerHeap(AHeapInfo_create("secondary"));
193 AHeapProfile_reportAllocation(heap_id, 0x1234abc, bytes);
194 AHeapProfile_reportFree(heap_id, 0x1234abc);
195 }
196
AllocateAndFree(size_t bytes)197 void AllocateAndFree(size_t bytes) {
198 // This volatile is needed to prevent the compiler from trying to be
199 // helpful and compiling a "useless" malloc + free into a noop.
200 volatile char* x = static_cast<char*>(malloc(bytes));
201 if (x) {
202 if (bytes > 0)
203 x[0] = 'x';
204 free(const_cast<char*>(x));
205 }
206 }
207
DoAllocation(AllocatorMode mode,size_t bytes)208 void DoAllocation(AllocatorMode mode, size_t bytes) {
209 switch (mode) {
210 case AllocatorMode::kMalloc:
211 AllocateAndFree(bytes);
212 break;
213 case AllocatorMode::kCustom:
214 // We need to run malloc(0) even if we want to test the custom allocator,
215 // as the init mechanism assumes the application uses malloc.
216 AllocateAndFree(1);
217 CustomAllocateAndFree(bytes);
218 break;
219 }
220 }
221
ContinuousMalloc(AllocatorMode mode,size_t primary_bytes,size_t secondary_bytes,ssize_t max_iter=-1)222 void ContinuousMalloc(AllocatorMode mode,
223 size_t primary_bytes,
224 size_t secondary_bytes,
225 ssize_t max_iter = -1) {
226 for (ssize_t i = 0; max_iter == -1 || i < max_iter; ++i) {
227 DoAllocation(mode, primary_bytes);
228 if (secondary_bytes)
229 SecondaryAllocAndFree(secondary_bytes);
230 usleep(10 * kMsToUs);
231 }
232 }
233
StartAndWaitForHandshake(base::Subprocess * child)234 void StartAndWaitForHandshake(base::Subprocess* child) {
235 // We cannot use base::Pipe because that assumes we want CLOEXEC.
236 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
237 int ready_pipe[2];
238 PERFETTO_CHECK(pipe(ready_pipe) == 0); // NOLINT(android-cloexec-pipe)
239
240 int ready_pipe_rd = ready_pipe[0];
241 int ready_pipe_wr = ready_pipe[1];
242 child->args.preserve_fds.push_back(ready_pipe_wr);
243 child->args.env.push_back("HEAPPROFD_TESTING_READY_PIPE=" +
244 std::to_string(ready_pipe_wr));
245 child->Start();
246 close(ready_pipe_wr);
247 // Wait for libc to initialize the signal handler. If we signal before the
248 // handler is installed, we can kill the process.
249 char buf[1];
250 PERFETTO_CHECK(PERFETTO_EINTR(read(ready_pipe_rd, buf, sizeof(buf))) == 0);
251 close(ready_pipe_rd);
252 }
253
ChildFinishHandshake()254 void ChildFinishHandshake() {
255 const char* ready_pipe = getenv("HEAPPROFD_TESTING_READY_PIPE");
256 if (ready_pipe != nullptr) {
257 close(static_cast<int>(base::StringToInt64(ready_pipe).value()));
258 }
259 }
260
ForkContinuousAlloc(AllocatorMode mode,size_t primary_bytes,size_t secondary_bytes=0,ssize_t max_iter=-1)261 base::Subprocess ForkContinuousAlloc(AllocatorMode mode,
262 size_t primary_bytes,
263 size_t secondary_bytes = 0,
264 ssize_t max_iter = -1) {
265 base::Subprocess child({"/proc/self/exe"});
266 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
267 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
268 AllocatorName(mode));
269 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
270 std::to_string(primary_bytes));
271 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
272 std::to_string(secondary_bytes));
273 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG3=" +
274 std::to_string(max_iter));
275
276 StartAndWaitForHandshake(&child);
277 return child;
278 }
279
RunContinuousMalloc()280 void __attribute__((constructor(1024))) RunContinuousMalloc() {
281 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG0");
282 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG1");
283 const char* a2 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG2");
284 const char* a3 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG3");
285 if (a0 == nullptr)
286 return;
287
288 AllocatorMode arg0 = AllocatorModeFromNameOrDie(a0);
289 uint32_t arg1 = a1 ? base::StringToUInt32(a1).value() : 0;
290 uint32_t arg2 = a2 ? base::StringToUInt32(a2).value() : 0;
291 int32_t arg3 = a3 ? base::StringToInt32(a3).value() : -1;
292
293 ChildFinishHandshake();
294
295 ContinuousMalloc(arg0, arg1, arg2, arg3);
296 exit(0);
297 }
298
RunAccurateMalloc()299 void __attribute__((constructor(1024))) RunAccurateMalloc() {
300 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC");
301 if (a0 == nullptr)
302 return;
303
304 static std::atomic<bool> initialized{false};
305 static uint32_t heap_id =
306 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
307 AHeapInfo_create("test"),
308 [](void*, const AHeapProfileEnableCallbackInfo*) {
309 initialized = true;
310 },
311 nullptr));
312
313 ChildFinishHandshake();
314
315 // heapprofd_client needs malloc to see the signal.
316 while (!initialized)
317 AllocateAndFree(1);
318 // We call the callback before setting enabled=true on the heap, so we
319 // wait a bit for the assignment to happen.
320 usleep(100000);
321 if (!AHeapProfile_reportAllocation(heap_id, 0x1, 10u))
322 PERFETTO_FATAL("Expected allocation to be sampled.");
323 AHeapProfile_reportFree(heap_id, 0x1);
324 if (!AHeapProfile_reportAllocation(heap_id, 0x2, 15u))
325 PERFETTO_FATAL("Expected allocation to be sampled.");
326 if (!AHeapProfile_reportAllocation(heap_id, 0x3, 15u))
327 PERFETTO_FATAL("Expected allocation to be sampled.");
328 AHeapProfile_reportFree(heap_id, 0x2);
329
330 // Wait around so we can verify it did't crash.
331 for (;;) {
332 // Call sleep, otherwise an empty busy loop is undefined behavior:
333 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
334 sleep(1);
335 }
336 }
337
RunAccurateMallocWithVforkCommon()338 void __attribute__((noreturn)) RunAccurateMallocWithVforkCommon() {
339 static std::atomic<bool> initialized{false};
340 static uint32_t heap_id =
341 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
342 AHeapInfo_create("test"),
343 [](void*, const AHeapProfileEnableCallbackInfo*) {
344 initialized = true;
345 },
346 nullptr));
347
348 ChildFinishHandshake();
349
350 // heapprofd_client needs malloc to see the signal.
351 while (!initialized)
352 AllocateAndFree(1);
353 // We call the callback before setting enabled=true on the heap, so we
354 // wait a bit for the assignment to happen.
355 usleep(100000);
356 if (!AHeapProfile_reportAllocation(heap_id, 0x1, 10u))
357 PERFETTO_FATAL("Expected allocation to be sampled.");
358 AHeapProfile_reportFree(heap_id, 0x1);
359 pid_t pid = vfork();
360 PERFETTO_CHECK(pid != -1);
361 if (pid == 0) {
362 AHeapProfile_reportAllocation(heap_id, 0x2, 15u);
363 AHeapProfile_reportAllocation(heap_id, 0x3, 15u);
364 exit(0);
365 }
366 if (!AHeapProfile_reportAllocation(heap_id, 0x2, 15u))
367 PERFETTO_FATAL("Expected allocation to be sampled.");
368 if (!AHeapProfile_reportAllocation(heap_id, 0x3, 15u))
369 PERFETTO_FATAL("Expected allocation to be sampled.");
370 AHeapProfile_reportFree(heap_id, 0x2);
371
372 // Wait around so we can verify it did't crash.
373 for (;;) {
374 // Call sleep, otherwise an empty busy loop is undefined behavior:
375 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
376 sleep(1);
377 }
378 }
379
RunAccurateSample()380 void __attribute__((constructor(1024))) RunAccurateSample() {
381 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_SAMPLE");
382 if (a0 == nullptr)
383 return;
384
385 static std::atomic<bool> initialized{false};
386 static uint32_t heap_id =
387 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
388 AHeapInfo_create("test"),
389 [](void*, const AHeapProfileEnableCallbackInfo*) {
390 initialized = true;
391 },
392 nullptr));
393
394 ChildFinishHandshake();
395
396 // heapprofd_client needs malloc to see the signal.
397 while (!initialized)
398 AllocateAndFree(1);
399 // We call the callback before setting enabled=true on the heap, so we
400 // wait a bit for the assignment to happen.
401 usleep(100000);
402 if (!AHeapProfile_reportSample(heap_id, 0x1, 10u))
403 PERFETTO_FATAL("Expected allocation to be sampled.");
404 AHeapProfile_reportFree(heap_id, 0x1);
405 if (!AHeapProfile_reportSample(heap_id, 0x2, 15u))
406 PERFETTO_FATAL("Expected allocation to be sampled.");
407 if (!AHeapProfile_reportSample(heap_id, 0x3, 15u))
408 PERFETTO_FATAL("Expected allocation to be sampled.");
409 AHeapProfile_reportFree(heap_id, 0x2);
410
411 // Wait around so we can verify it did't crash.
412 for (;;) {
413 // Call sleep, otherwise an empty busy loop is undefined behavior:
414 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
415 sleep(1);
416 }
417 }
418
RunAccurateMallocWithVfork()419 void __attribute__((constructor(1024))) RunAccurateMallocWithVfork() {
420 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK");
421 if (a0 == nullptr)
422 return;
423 RunAccurateMallocWithVforkCommon();
424 }
425
RunAccurateMallocWithVforkThread()426 void __attribute__((constructor(1024))) RunAccurateMallocWithVforkThread() {
427 const char* a0 =
428 getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK_THREAD");
429 if (a0 == nullptr)
430 return;
431 std::thread th(RunAccurateMallocWithVforkCommon);
432 th.join();
433 }
434
RunReInit()435 void __attribute__((constructor(1024))) RunReInit() {
436 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG0");
437 if (a0 == nullptr)
438 return;
439
440 AllocatorMode mode = AllocatorModeFromNameOrDie(a0);
441 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG1");
442 const char* a2 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG2");
443 PERFETTO_CHECK(a1 != nullptr && a2 != nullptr);
444 int signal_pipe_rd = static_cast<int>(base::StringToInt64(a1).value());
445 int ack_pipe_wr = static_cast<int>(base::StringToInt64(a2).value());
446
447 ChildFinishHandshake();
448
449 size_t bytes = kFirstIterationBytes;
450 bool signalled = false;
451 for (;;) {
452 DoAllocation(mode, bytes);
453 char buf[1];
454 if (!signalled && read(signal_pipe_rd, buf, sizeof(buf)) == 1) {
455 signalled = true;
456 close(signal_pipe_rd);
457
458 // make sure the client has noticed that the session has stopped
459 DoAllocation(mode, bytes);
460
461 bytes = kSecondIterationBytes;
462 PERFETTO_CHECK(PERFETTO_EINTR(write(ack_pipe_wr, "1", 1)) == 1);
463 close(ack_pipe_wr);
464 }
465 usleep(10 * kMsToUs);
466 }
467 PERFETTO_FATAL("Should be unreachable");
468 }
469
RunCustomLifetime()470 void __attribute__((constructor(1024))) RunCustomLifetime() {
471 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_LIFETIME_ARG0");
472 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_LIFETIME_ARG1");
473 if (a0 == nullptr)
474 return;
475 uint64_t arg0 = a0 ? base::StringToUInt64(a0).value() : 0;
476 uint64_t arg1 = a0 ? base::StringToUInt64(a1).value() : 0;
477
478 PERFETTO_CHECK(arg1);
479
480 static std::atomic<bool> initialized{false};
481 static std::atomic<bool> disabled{false};
482 static std::atomic<uint64_t> sampling_interval;
483
484 static uint32_t other_heap_id = 0;
485 auto enabled_callback = [](void*,
486 const AHeapProfileEnableCallbackInfo* info) {
487 sampling_interval =
488 AHeapProfileEnableCallbackInfo_getSamplingInterval(info);
489 initialized = true;
490 };
491 auto disabled_callback = [](void*, const AHeapProfileDisableCallbackInfo*) {
492 PERFETTO_CHECK(other_heap_id);
493 AHeapProfile_reportFree(other_heap_id, 0);
494 disabled = true;
495 };
496 static uint32_t heap_id =
497 AHeapProfile_registerHeap(AHeapInfo_setDisabledCallback(
498 AHeapInfo_setEnabledCallback(AHeapInfo_create("test"),
499 enabled_callback, nullptr),
500 disabled_callback, nullptr));
501
502 other_heap_id = AHeapProfile_registerHeap(AHeapInfo_create("othertest"));
503 ChildFinishHandshake();
504
505 // heapprofd_client needs malloc to see the signal.
506 while (!initialized)
507 AllocateAndFree(1);
508
509 if (sampling_interval.load() != arg0) {
510 PERFETTO_FATAL("%" PRIu64 " != %" PRIu64, sampling_interval.load(), arg0);
511 }
512
513 while (!disabled)
514 AHeapProfile_reportFree(heap_id, 0x2);
515
516 char x = 'x';
517 PERFETTO_CHECK(base::WriteAll(static_cast<int>(arg1), &x, sizeof(x)) == 1);
518 close(static_cast<int>(arg1));
519
520 // Wait around so we can verify it didn't crash.
521 for (;;) {
522 // Call sleep, otherwise an empty busy loop is undefined behavior:
523 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
524 sleep(1);
525 }
526 }
527
528 class TraceProcessorTestHelper : public TestHelper {
529 public:
TraceProcessorTestHelper(base::TestTaskRunner * task_runner)530 explicit TraceProcessorTestHelper(base::TestTaskRunner* task_runner)
531 : TestHelper(task_runner),
532 tp_(trace_processor::TraceProcessor::CreateInstance({})) {}
533
ReadTraceData(std::vector<TracePacket> packets)534 void ReadTraceData(std::vector<TracePacket> packets) override {
535 for (auto& packet : packets) {
536 auto preamble = packet.GetProtoPreamble();
537 std::string payload = packet.GetRawBytesForTesting();
538 char* preamble_payload = std::get<0>(preamble);
539 size_t preamble_size = std::get<1>(preamble);
540 size_t buf_size = preamble_size + payload.size();
541 std::unique_ptr<uint8_t[]> buf =
542 std::unique_ptr<uint8_t[]>(new uint8_t[buf_size]);
543 memcpy(&buf[0], preamble_payload, preamble_size);
544 memcpy(&buf[preamble_size], payload.data(), payload.size());
545 PERFETTO_CHECK(tp_->Parse(std::move(buf), buf_size).ok());
546 }
547 TestHelper::ReadTraceData(std::move(packets));
548 }
549
tp()550 trace_processor::TraceProcessor& tp() { return *tp_; }
551
552 private:
553 std::unique_ptr<trace_processor::TraceProcessor> tp_;
554 };
555
GetHelper(base::TestTaskRunner * task_runner)556 std::unique_ptr<TraceProcessorTestHelper> GetHelper(
557 base::TestTaskRunner* task_runner) {
558 std::unique_ptr<TraceProcessorTestHelper> helper(
559 new TraceProcessorTestHelper(task_runner));
560 helper->StartServiceIfRequired();
561
562 helper->ConnectConsumer();
563 helper->WaitForConsumerConnect();
564 return helper;
565 }
566
ReadAndWait(TraceProcessorTestHelper * helper)567 void ReadAndWait(TraceProcessorTestHelper* helper) {
568 helper->WaitForTracingDisabled(kTracingDisabledTimeoutMs);
569 helper->ReadData();
570 helper->WaitForReadData(0, kWaitForReadDataTimeoutMs);
571 helper->tp().NotifyEndOfFile();
572 }
573
ToTraceString(const std::vector<protos::gen::TracePacket> & packets)574 std::string ToTraceString(
575 const std::vector<protos::gen::TracePacket>& packets) {
576 protos::gen::Trace trace;
577 for (const protos::gen::TracePacket& packet : packets) {
578 *trace.add_packet() = packet;
579 }
580 return trace.SerializeAsString();
581 }
582
583 #define WRITE_TRACE(trace) \
584 do { \
585 WriteTrace(trace, __FILE__, __LINE__); \
586 } while (0)
587
FormatHistogram(const protos::gen::ProfilePacket_Histogram & hist)588 std::string FormatHistogram(const protos::gen::ProfilePacket_Histogram& hist) {
589 std::string out;
590 std::string prev_upper_limit = "-inf";
591 for (const auto& bucket : hist.buckets()) {
592 std::string upper_limit;
593 if (bucket.max_bucket())
594 upper_limit = "inf";
595 else
596 upper_limit = std::to_string(bucket.upper_limit());
597
598 out += "[" + prev_upper_limit + ", " + upper_limit +
599 "]: " + std::to_string(bucket.count()) + "; ";
600 prev_upper_limit = std::move(upper_limit);
601 }
602 return out + "\n";
603 }
604
FormatStats(const protos::gen::ProfilePacket_ProcessStats & stats)605 std::string FormatStats(const protos::gen::ProfilePacket_ProcessStats& stats) {
606 return std::string("unwinding_errors: ") +
607 std::to_string(stats.unwinding_errors()) + "\n" +
608 "heap_samples: " + std::to_string(stats.heap_samples()) + "\n" +
609 "map_reparses: " + std::to_string(stats.map_reparses()) + "\n" +
610 "unwinding_time_us: " + FormatHistogram(stats.unwinding_time_us());
611 }
612
Suffix(const std::tuple<TestMode,AllocatorMode> & param)613 std::string Suffix(const std::tuple<TestMode, AllocatorMode>& param) {
614 TestMode tm = std::get<0>(param);
615 AllocatorMode am = std::get<1>(param);
616
617 std::string result;
618 switch (tm) {
619 case TestMode::kCentral:
620 result += "CentralMode";
621 break;
622 case TestMode::kStatic:
623 result += "StaticMode";
624 break;
625 }
626 switch (am) {
627 case AllocatorMode::kMalloc:
628 result += "Malloc";
629 break;
630 case AllocatorMode::kCustom:
631 result += "Custom";
632 break;
633 }
634 return result;
635 }
636
TestSuffix(const::testing::TestParamInfo<std::tuple<TestMode,AllocatorMode>> & info)637 __attribute__((unused)) std::string TestSuffix(
638 const ::testing::TestParamInfo<std::tuple<TestMode, AllocatorMode>>& info) {
639 return Suffix(info.param);
640 }
641
642 class HeapprofdEndToEnd
643 : public ::testing::TestWithParam<std::tuple<TestMode, AllocatorMode>> {
644 protected:
645 base::TestTaskRunner task_runner;
646
test_mode()647 TestMode test_mode() { return std::get<0>(GetParam()); }
allocator_mode()648 AllocatorMode allocator_mode() { return std::get<1>(GetParam()); }
allocator_name()649 std::string allocator_name() { return AllocatorName(allocator_mode()); }
650
WriteTrace(const std::vector<protos::gen::TracePacket> & packets,const char * filename,uint64_t lineno)651 void WriteTrace(const std::vector<protos::gen::TracePacket>& packets,
652 const char* filename,
653 uint64_t lineno) {
654 const char* outdir = getenv("HEAPPROFD_TEST_PROFILE_OUT");
655 if (!outdir)
656 return;
657 const std::string fq_filename =
658 std::string(outdir) + "/" + basename(filename) + ":" +
659 std::to_string(lineno) + "_" + Suffix(GetParam());
660 base::ScopedFile fd(base::OpenFile(fq_filename, O_WRONLY | O_CREAT, 0666));
661 PERFETTO_CHECK(*fd);
662 std::string trace_string = ToTraceString(packets);
663 PERFETTO_CHECK(
664 base::WriteAll(*fd, trace_string.data(), trace_string.size()) >= 0);
665 }
666
Trace(const TraceConfig & trace_config)667 std::unique_ptr<TraceProcessorTestHelper> Trace(
668 const TraceConfig& trace_config) {
669 auto helper = GetHelper(&task_runner);
670
671 helper->StartTracing(trace_config);
672
673 ReadAndWait(helper.get());
674 return helper;
675 }
676
GetUnwindingErrors(TraceProcessorTestHelper * helper)677 std::vector<std::string> GetUnwindingErrors(
678 TraceProcessorTestHelper* helper) {
679 std::vector<std::string> out;
680 const auto& packets = helper->trace();
681 for (const protos::gen::TracePacket& packet : packets) {
682 for (const protos::gen::InternedString& fn :
683 packet.interned_data().function_names()) {
684 if (fn.str().find("ERROR ") == 0) {
685 out.push_back(fn.str());
686 }
687 }
688 }
689 return out;
690 }
691
PrintStats(TraceProcessorTestHelper * helper)692 void PrintStats(TraceProcessorTestHelper* helper) {
693 const auto& packets = helper->trace();
694 for (const protos::gen::TracePacket& packet : packets) {
695 for (const auto& dump : packet.profile_packet().process_dumps()) {
696 // protobuf uint64 does not like the PRIu64 formatter.
697 PERFETTO_LOG("Stats for %s: %s", std::to_string(dump.pid()).c_str(),
698 FormatStats(dump.stats()).c_str());
699 }
700 }
701 std::vector<std::string> errors = GetUnwindingErrors(helper);
702 for (const std::string& err : errors) {
703 PERFETTO_LOG("Unwinding error: %s", err.c_str());
704 }
705 }
706
ValidateSampleSizes(TraceProcessorTestHelper * helper,uint64_t pid,uint64_t alloc_size,const std::string & heap_name="")707 void ValidateSampleSizes(TraceProcessorTestHelper* helper,
708 uint64_t pid,
709 uint64_t alloc_size,
710 const std::string& heap_name = "") {
711 const auto& packets = helper->trace();
712 for (const protos::gen::TracePacket& packet : packets) {
713 for (const auto& dump : packet.profile_packet().process_dumps()) {
714 if (dump.pid() != pid ||
715 (!heap_name.empty() && heap_name != dump.heap_name())) {
716 continue;
717 }
718 for (const auto& sample : dump.samples()) {
719 EXPECT_EQ(sample.self_allocated() % alloc_size, 0u);
720 EXPECT_EQ(sample.self_freed() % alloc_size, 0u);
721 EXPECT_THAT(sample.self_allocated() - sample.self_freed(),
722 AnyOf(Eq(0u), Eq(alloc_size)));
723 }
724 }
725 }
726 }
727
ValidateFromStartup(TraceProcessorTestHelper * helper,uint64_t pid,bool from_startup)728 void ValidateFromStartup(TraceProcessorTestHelper* helper,
729 uint64_t pid,
730 bool from_startup) {
731 const auto& packets = helper->trace();
732 for (const protos::gen::TracePacket& packet : packets) {
733 for (const auto& dump : packet.profile_packet().process_dumps()) {
734 if (dump.pid() != pid)
735 continue;
736 EXPECT_EQ(dump.from_startup(), from_startup);
737 }
738 }
739 }
740
ValidateRejectedConcurrent(TraceProcessorTestHelper * helper,uint64_t pid,bool rejected_concurrent)741 void ValidateRejectedConcurrent(TraceProcessorTestHelper* helper,
742 uint64_t pid,
743 bool rejected_concurrent) {
744 const auto& packets = helper->trace();
745 for (const protos::gen::TracePacket& packet : packets) {
746 for (const auto& dump : packet.profile_packet().process_dumps()) {
747 if (dump.pid() != pid)
748 continue;
749 EXPECT_EQ(dump.rejected_concurrent(), rejected_concurrent);
750 }
751 }
752 }
753
ValidateNoSamples(TraceProcessorTestHelper * helper,uint64_t pid)754 void ValidateNoSamples(TraceProcessorTestHelper* helper, uint64_t pid) {
755 const auto& packets = helper->trace();
756 size_t samples = 0;
757 for (const protos::gen::TracePacket& packet : packets) {
758 for (const auto& dump : packet.profile_packet().process_dumps()) {
759 if (dump.pid() != pid)
760 continue;
761 samples += dump.samples().size();
762 }
763 }
764 EXPECT_EQ(samples, 0u);
765 }
766
ValidateHasSamples(TraceProcessorTestHelper * helper,uint64_t pid,const std::string & heap_name,uint64_t sampling_interval)767 void ValidateHasSamples(TraceProcessorTestHelper* helper,
768 uint64_t pid,
769 const std::string& heap_name,
770 uint64_t sampling_interval) {
771 const auto& packets = helper->trace();
772 ASSERT_GT(packets.size(), 0u);
773 size_t profile_packets = 0;
774 size_t samples = 0;
775 uint64_t last_allocated = 0;
776 uint64_t last_freed = 0;
777 for (const protos::gen::TracePacket& packet : packets) {
778 for (const auto& dump : packet.profile_packet().process_dumps()) {
779 if (dump.pid() != pid || dump.heap_name() != heap_name)
780 continue;
781 EXPECT_EQ(dump.sampling_interval_bytes(), sampling_interval);
782 for (const auto& sample : dump.samples()) {
783 last_allocated = sample.self_allocated();
784 last_freed = sample.self_freed();
785 samples++;
786 }
787 profile_packets++;
788 }
789 }
790 EXPECT_GT(profile_packets, 0u) << heap_name;
791 EXPECT_GT(samples, 0u) << heap_name;
792 EXPECT_GT(last_allocated, 0u) << heap_name;
793 EXPECT_GT(last_freed, 0u) << heap_name;
794 }
795
ValidateOnlyPID(TraceProcessorTestHelper * helper,uint64_t pid)796 void ValidateOnlyPID(TraceProcessorTestHelper* helper, uint64_t pid) {
797 size_t dumps = 0;
798 const auto& packets = helper->trace();
799 for (const protos::gen::TracePacket& packet : packets) {
800 for (const auto& dump : packet.profile_packet().process_dumps()) {
801 EXPECT_EQ(dump.pid(), pid);
802 dumps++;
803 }
804 }
805 EXPECT_GT(dumps, 0u);
806 }
807 };
808
809 // This checks that the child is still running (to ensure it didn't crash
810 // unxpectedly) and then kills it.
KillAssertRunning(base::Subprocess * child)811 void KillAssertRunning(base::Subprocess* child) {
812 ASSERT_EQ(child->Poll(), base::Subprocess::kRunning)
813 << "Target process not running. CHECK CRASH LOGS.";
814 PERFETTO_LOG("Shutting down profile target.");
815 child->KillAndWaitForTermination();
816 }
817
TEST_P(HeapprofdEndToEnd,Disabled)818 TEST_P(HeapprofdEndToEnd, Disabled) {
819 constexpr size_t kAllocSize = 1024;
820
821 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
822 const uint64_t pid = static_cast<uint64_t>(child.pid());
823
824 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
825 cfg->set_sampling_interval_bytes(1);
826 cfg->add_pid(pid);
827 cfg->add_heaps("invalid");
828 ContinuousDump(cfg);
829 });
830
831 auto helper = Trace(trace_config);
832 WRITE_TRACE(helper->full_trace());
833 PrintStats(helper.get());
834 KillAssertRunning(&child);
835
836 ValidateNoSamples(helper.get(), pid);
837 }
838
TEST_P(HeapprofdEndToEnd,Smoke)839 TEST_P(HeapprofdEndToEnd, Smoke) {
840 constexpr size_t kAllocSize = 1024;
841 constexpr size_t kSamplingInterval = 1;
842
843 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
844 const uint64_t pid = static_cast<uint64_t>(child.pid());
845
846 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
847 cfg->set_sampling_interval_bytes(kSamplingInterval);
848 cfg->add_pid(pid);
849 cfg->add_heaps(allocator_name());
850 ContinuousDump(cfg);
851 });
852
853 auto helper = Trace(trace_config);
854 WRITE_TRACE(helper->full_trace());
855 PrintStats(helper.get());
856 KillAssertRunning(&child);
857
858 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
859 ValidateOnlyPID(helper.get(), pid);
860 ValidateSampleSizes(helper.get(), pid, kAllocSize);
861 }
862
TEST_P(HeapprofdEndToEnd,TwoAllocators)863 TEST_P(HeapprofdEndToEnd, TwoAllocators) {
864 constexpr size_t kCustomAllocSize = 1024;
865 constexpr size_t kAllocSize = 7;
866 constexpr size_t kSamplingInterval = 1;
867
868 base::Subprocess child =
869 ForkContinuousAlloc(allocator_mode(), kAllocSize, kCustomAllocSize);
870 const uint64_t pid = static_cast<uint64_t>(child.pid());
871
872 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
873 cfg->set_sampling_interval_bytes(kSamplingInterval);
874 cfg->add_pid(pid);
875 cfg->add_heaps(allocator_name());
876 cfg->add_heaps("secondary");
877 ContinuousDump(cfg);
878 });
879
880 auto helper = Trace(trace_config);
881 WRITE_TRACE(helper->full_trace());
882 PrintStats(helper.get());
883 KillAssertRunning(&child);
884
885 ValidateHasSamples(helper.get(), pid, "secondary", kSamplingInterval);
886 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
887 ValidateOnlyPID(helper.get(), pid);
888 ValidateSampleSizes(helper.get(), pid, kCustomAllocSize, "secondary");
889 ValidateSampleSizes(helper.get(), pid, kAllocSize, allocator_name());
890 }
891
TEST_P(HeapprofdEndToEnd,TwoAllocatorsAll)892 TEST_P(HeapprofdEndToEnd, TwoAllocatorsAll) {
893 constexpr size_t kCustomAllocSize = 1024;
894 constexpr size_t kAllocSize = 7;
895 constexpr size_t kSamplingInterval = 1;
896
897 base::Subprocess child =
898 ForkContinuousAlloc(allocator_mode(), kAllocSize, kCustomAllocSize);
899 const uint64_t pid = static_cast<uint64_t>(child.pid());
900
901 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
902 cfg->set_sampling_interval_bytes(kSamplingInterval);
903 cfg->add_pid(pid);
904 cfg->set_all_heaps(true);
905 ContinuousDump(cfg);
906 });
907
908 auto helper = Trace(trace_config);
909 WRITE_TRACE(helper->full_trace());
910 PrintStats(helper.get());
911 KillAssertRunning(&child);
912
913 ValidateHasSamples(helper.get(), pid, "secondary", kSamplingInterval);
914 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
915 ValidateOnlyPID(helper.get(), pid);
916 ValidateSampleSizes(helper.get(), pid, kCustomAllocSize, "secondary");
917 ValidateSampleSizes(helper.get(), pid, kAllocSize, allocator_name());
918 }
919
TEST_P(HeapprofdEndToEnd,AccurateCustomReportAllocation)920 TEST_P(HeapprofdEndToEnd, AccurateCustomReportAllocation) {
921 if (allocator_mode() != AllocatorMode::kCustom)
922 GTEST_SKIP();
923
924 base::Subprocess child({"/proc/self/exe"});
925 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
926 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC=1");
927 StartAndWaitForHandshake(&child);
928
929 const uint64_t pid = static_cast<uint64_t>(child.pid());
930
931 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
932 cfg->set_sampling_interval_bytes(1);
933 cfg->add_pid(pid);
934 cfg->add_heaps("test");
935 });
936
937 auto helper = Trace(trace_config);
938 WRITE_TRACE(helper->full_trace());
939 PrintStats(helper.get());
940 KillAssertRunning(&child);
941
942 auto flamegraph = GetFlamegraph(&helper->tp());
943 EXPECT_THAT(flamegraph,
944 Contains(AllOf(
945 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
946 Field(&FlamegraphNode::cumulative_size, Eq(15)),
947 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
948
949 ValidateOnlyPID(helper.get(), pid);
950
951 size_t total_alloc = 0;
952 size_t total_freed = 0;
953 for (const protos::gen::TracePacket& packet : helper->trace()) {
954 for (const auto& dump : packet.profile_packet().process_dumps()) {
955 for (const auto& sample : dump.samples()) {
956 total_alloc += sample.self_allocated();
957 total_freed += sample.self_freed();
958 }
959 }
960 }
961 EXPECT_EQ(total_alloc, 40u);
962 EXPECT_EQ(total_freed, 25u);
963 }
964
965 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
966 #define MAYBE_AccurateCustomReportAllocationWithVfork \
967 AccurateCustomReportAllocationWithVfork
968 #define MAYBE_AccurateCustomReportAllocationWithVforkThread \
969 AccurateCustomReportAllocationWithVforkThread
970 #else
971 #define MAYBE_AccurateCustomReportAllocationWithVfork \
972 DISABLED_AccurateCustomReportAllocationWithVfork
973 #define MAYBE_AccurateCustomReportAllocationWithVforkThread \
974 DISABLED_AccurateCustomReportAllocationWithVforkThread
975 #endif
976
TEST_P(HeapprofdEndToEnd,MAYBE_AccurateCustomReportAllocationWithVfork)977 TEST_P(HeapprofdEndToEnd, MAYBE_AccurateCustomReportAllocationWithVfork) {
978 if (allocator_mode() != AllocatorMode::kCustom)
979 GTEST_SKIP();
980
981 base::Subprocess child({"/proc/self/exe"});
982 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
983 child.args.env.push_back(
984 "HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK=1");
985 StartAndWaitForHandshake(&child);
986
987 const uint64_t pid = static_cast<uint64_t>(child.pid());
988
989 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
990 cfg->set_sampling_interval_bytes(1);
991 cfg->add_pid(pid);
992 cfg->add_heaps("test");
993 });
994
995 auto helper = Trace(trace_config);
996 WRITE_TRACE(helper->full_trace());
997 PrintStats(helper.get());
998 KillAssertRunning(&child);
999
1000 auto flamegraph = GetFlamegraph(&helper->tp());
1001 EXPECT_THAT(flamegraph,
1002 Contains(AllOf(
1003 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
1004 Field(&FlamegraphNode::cumulative_size, Eq(15)),
1005 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
1006
1007 ValidateOnlyPID(helper.get(), pid);
1008
1009 size_t total_alloc = 0;
1010 size_t total_freed = 0;
1011 for (const protos::gen::TracePacket& packet : helper->trace()) {
1012 for (const auto& dump : packet.profile_packet().process_dumps()) {
1013 EXPECT_FALSE(dump.disconnected());
1014 for (const auto& sample : dump.samples()) {
1015 total_alloc += sample.self_allocated();
1016 total_freed += sample.self_freed();
1017 }
1018 }
1019 }
1020 EXPECT_EQ(total_alloc, 40u);
1021 EXPECT_EQ(total_freed, 25u);
1022 }
1023
TEST_P(HeapprofdEndToEnd,MAYBE_AccurateCustomReportAllocationWithVforkThread)1024 TEST_P(HeapprofdEndToEnd, MAYBE_AccurateCustomReportAllocationWithVforkThread) {
1025 if (allocator_mode() != AllocatorMode::kCustom)
1026 GTEST_SKIP();
1027
1028 base::Subprocess child({"/proc/self/exe"});
1029 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1030 child.args.env.push_back(
1031 "HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK_THREAD=1");
1032 StartAndWaitForHandshake(&child);
1033
1034 const uint64_t pid = static_cast<uint64_t>(child.pid());
1035
1036 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1037 cfg->set_sampling_interval_bytes(1);
1038 cfg->add_pid(pid);
1039 cfg->add_heaps("test");
1040 });
1041
1042 auto helper = Trace(trace_config);
1043 WRITE_TRACE(helper->full_trace());
1044 PrintStats(helper.get());
1045 KillAssertRunning(&child);
1046
1047 auto flamegraph = GetFlamegraph(&helper->tp());
1048 EXPECT_THAT(flamegraph,
1049 Contains(AllOf(
1050 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
1051 Field(&FlamegraphNode::cumulative_size, Eq(15)),
1052 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
1053
1054 ValidateOnlyPID(helper.get(), pid);
1055
1056 size_t total_alloc = 0;
1057 size_t total_freed = 0;
1058 for (const protos::gen::TracePacket& packet : helper->trace()) {
1059 for (const auto& dump : packet.profile_packet().process_dumps()) {
1060 EXPECT_FALSE(dump.disconnected());
1061 for (const auto& sample : dump.samples()) {
1062 total_alloc += sample.self_allocated();
1063 total_freed += sample.self_freed();
1064 }
1065 }
1066 }
1067 EXPECT_EQ(total_alloc, 40u);
1068 EXPECT_EQ(total_freed, 25u);
1069 }
1070
TEST_P(HeapprofdEndToEnd,AccurateCustomReportSample)1071 TEST_P(HeapprofdEndToEnd, AccurateCustomReportSample) {
1072 if (allocator_mode() != AllocatorMode::kCustom)
1073 GTEST_SKIP();
1074
1075 base::Subprocess child({"/proc/self/exe"});
1076 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1077 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_SAMPLE=1");
1078 StartAndWaitForHandshake(&child);
1079
1080 const uint64_t pid = static_cast<uint64_t>(child.pid());
1081
1082 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1083 cfg->set_sampling_interval_bytes(1000000);
1084 cfg->add_pid(pid);
1085 cfg->add_heaps("test");
1086 });
1087
1088 auto helper = Trace(trace_config);
1089 WRITE_TRACE(helper->full_trace());
1090 PrintStats(helper.get());
1091 KillAssertRunning(&child);
1092
1093 ValidateOnlyPID(helper.get(), pid);
1094
1095 size_t total_alloc = 0;
1096 size_t total_freed = 0;
1097 for (const protos::gen::TracePacket& packet : helper->trace()) {
1098 for (const auto& dump : packet.profile_packet().process_dumps()) {
1099 for (const auto& sample : dump.samples()) {
1100 total_alloc += sample.self_allocated();
1101 total_freed += sample.self_freed();
1102 }
1103 }
1104 }
1105 EXPECT_EQ(total_alloc, 40u);
1106 EXPECT_EQ(total_freed, 25u);
1107 }
1108
TEST_P(HeapprofdEndToEnd,AccurateDumpAtMaxCustom)1109 TEST_P(HeapprofdEndToEnd, AccurateDumpAtMaxCustom) {
1110 if (allocator_mode() != AllocatorMode::kCustom)
1111 GTEST_SKIP();
1112
1113 base::Subprocess child({"/proc/self/exe"});
1114 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1115 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC=1");
1116 StartAndWaitForHandshake(&child);
1117
1118 const uint64_t pid = static_cast<uint64_t>(child.pid());
1119
1120 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1121 cfg->set_sampling_interval_bytes(1);
1122 cfg->add_pid(pid);
1123 cfg->add_heaps("test");
1124 cfg->set_dump_at_max(true);
1125 });
1126
1127 auto helper = Trace(trace_config);
1128 WRITE_TRACE(helper->full_trace());
1129 PrintStats(helper.get());
1130 KillAssertRunning(&child);
1131
1132 ValidateOnlyPID(helper.get(), pid);
1133
1134 size_t total_alloc = 0;
1135 size_t total_count = 0;
1136 for (const protos::gen::TracePacket& packet : helper->trace()) {
1137 for (const auto& dump : packet.profile_packet().process_dumps()) {
1138 for (const auto& sample : dump.samples()) {
1139 total_alloc += sample.self_max();
1140 total_count += sample.self_max_count();
1141 }
1142 }
1143 }
1144 EXPECT_EQ(total_alloc, 30u);
1145 EXPECT_EQ(total_count, 2u);
1146 }
1147
TEST_P(HeapprofdEndToEnd,CustomLifetime)1148 TEST_P(HeapprofdEndToEnd, CustomLifetime) {
1149 if (allocator_mode() != AllocatorMode::kCustom)
1150 GTEST_SKIP();
1151
1152 int disabled_pipe[2];
1153 PERFETTO_CHECK(pipe(disabled_pipe) == 0); // NOLINT(android-cloexec-pipe)
1154
1155 int disabled_pipe_rd = disabled_pipe[0];
1156 int disabled_pipe_wr = disabled_pipe[1];
1157
1158 base::Subprocess child({"/proc/self/exe"});
1159 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1160 child.args.env.push_back("HEAPPROFD_TESTING_RUN_LIFETIME_ARG0=1000000");
1161 child.args.env.push_back("HEAPPROFD_TESTING_RUN_LIFETIME_ARG1=" +
1162 std::to_string(disabled_pipe_wr));
1163 child.args.preserve_fds.push_back(disabled_pipe_wr);
1164 StartAndWaitForHandshake(&child);
1165 close(disabled_pipe_wr);
1166
1167 const uint64_t pid = static_cast<uint64_t>(child.pid());
1168
1169 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1170 cfg->set_sampling_interval_bytes(1000000);
1171 cfg->add_pid(pid);
1172 cfg->add_heaps("test");
1173 cfg->add_heaps("othertest");
1174 });
1175
1176 auto helper = Trace(trace_config);
1177 WRITE_TRACE(helper->full_trace());
1178 PrintStats(helper.get());
1179 // Give client some time to notice the disconnect.
1180 sleep(2);
1181 KillAssertRunning(&child);
1182
1183 char x;
1184 EXPECT_EQ(base::Read(disabled_pipe_rd, &x, sizeof(x)), 1);
1185 close(disabled_pipe_rd);
1186 }
1187
TEST_P(HeapprofdEndToEnd,TwoProcesses)1188 TEST_P(HeapprofdEndToEnd, TwoProcesses) {
1189 constexpr size_t kAllocSize = 1024;
1190 constexpr size_t kAllocSize2 = 7;
1191 constexpr size_t kSamplingInterval = 1;
1192
1193 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1194 base::Subprocess child2 = ForkContinuousAlloc(allocator_mode(), kAllocSize2);
1195 const uint64_t pid = static_cast<uint64_t>(child.pid());
1196 const auto pid2 = child2.pid();
1197
1198 TraceConfig trace_config =
1199 MakeTraceConfig([this, pid, pid2](HeapprofdConfig* cfg) {
1200 cfg->set_sampling_interval_bytes(kSamplingInterval);
1201 cfg->add_pid(pid);
1202 cfg->add_pid(static_cast<uint64_t>(pid2));
1203 cfg->add_heaps(allocator_name());
1204 });
1205
1206 auto helper = Trace(trace_config);
1207 WRITE_TRACE(helper->full_trace());
1208 PrintStats(helper.get());
1209
1210 KillAssertRunning(&child);
1211 KillAssertRunning(&child2);
1212
1213 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1214 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1215 ValidateHasSamples(helper.get(), static_cast<uint64_t>(pid2),
1216 allocator_name(), kSamplingInterval);
1217 ValidateSampleSizes(helper.get(), static_cast<uint64_t>(pid2), kAllocSize2);
1218 }
1219
TEST_P(HeapprofdEndToEnd,FinalFlush)1220 TEST_P(HeapprofdEndToEnd, FinalFlush) {
1221 constexpr size_t kAllocSize = 1024;
1222 constexpr size_t kSamplingInterval = 1;
1223
1224 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1225 const uint64_t pid = static_cast<uint64_t>(child.pid());
1226 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1227 cfg->set_sampling_interval_bytes(kSamplingInterval);
1228 cfg->add_pid(pid);
1229 cfg->add_heaps(allocator_name());
1230 });
1231
1232 auto helper = Trace(trace_config);
1233 WRITE_TRACE(helper->full_trace());
1234 PrintStats(helper.get());
1235 KillAssertRunning(&child);
1236
1237 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1238 ValidateOnlyPID(helper.get(), pid);
1239 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1240 }
1241
TEST_P(HeapprofdEndToEnd,NativeStartup)1242 TEST_P(HeapprofdEndToEnd, NativeStartup) {
1243 if (test_mode() == TestMode::kStatic)
1244 GTEST_SKIP();
1245
1246 auto helper = GetHelper(&task_runner);
1247
1248 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1249 cfg->set_sampling_interval_bytes(1);
1250 cfg->add_process_cmdline("heapprofd_continuous_malloc");
1251 cfg->add_heaps(allocator_name());
1252 });
1253 trace_config.set_duration_ms(5000);
1254
1255 helper->StartTracing(trace_config);
1256
1257 // Wait to guarantee that the process forked below is hooked by the profiler
1258 // by virtue of the startup check, and not by virtue of being seen as a
1259 // running process. This sleep is here to prevent that, accidentally, the
1260 // test gets to the fork()+exec() too soon, before the heap profiling daemon
1261 // has received the trace config.
1262 sleep(1);
1263
1264 base::Subprocess child({"/proc/self/exe"});
1265 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1266 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1267 allocator_name());
1268 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1269 std::to_string(kStartupAllocSize));
1270 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1271 std::string("0"));
1272 StartAndWaitForHandshake(&child);
1273
1274 ReadAndWait(helper.get());
1275 WRITE_TRACE(helper->full_trace());
1276
1277 KillAssertRunning(&child);
1278
1279 const auto& packets = helper->trace();
1280 ASSERT_GT(packets.size(), 0u);
1281 size_t profile_packets = 0;
1282 size_t samples = 0;
1283 uint64_t total_allocated = 0;
1284 uint64_t total_freed = 0;
1285 for (const protos::gen::TracePacket& packet : packets) {
1286 if (packet.has_profile_packet() &&
1287 !packet.profile_packet().process_dumps().empty()) {
1288 const auto& dumps = packet.profile_packet().process_dumps();
1289 ASSERT_EQ(dumps.size(), 1u);
1290 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1291 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1292 profile_packets++;
1293 for (const auto& sample : dump.samples()) {
1294 samples++;
1295 total_allocated += sample.self_allocated();
1296 total_freed += sample.self_freed();
1297 }
1298 }
1299 }
1300 EXPECT_EQ(profile_packets, 1u);
1301 EXPECT_GT(samples, 0u);
1302 EXPECT_GT(total_allocated, 0u);
1303 EXPECT_GT(total_freed, 0u);
1304 }
1305
TEST_P(HeapprofdEndToEnd,NativeStartupDenormalizedCmdline)1306 TEST_P(HeapprofdEndToEnd, NativeStartupDenormalizedCmdline) {
1307 if (test_mode() == TestMode::kStatic)
1308 GTEST_SKIP();
1309
1310 auto helper = GetHelper(&task_runner);
1311
1312 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1313 cfg->set_sampling_interval_bytes(1);
1314 cfg->add_process_cmdline("heapprofd_continuous_malloc@1.2.3");
1315 cfg->add_heaps(allocator_name());
1316 });
1317 trace_config.set_duration_ms(5000);
1318
1319 helper->StartTracing(trace_config);
1320
1321 // Wait to guarantee that the process forked below is hooked by the profiler
1322 // by virtue of the startup check, and not by virtue of being seen as a
1323 // running process. This sleep is here to prevent that, accidentally, the
1324 // test gets to the fork()+exec() too soon, before the heap profiling daemon
1325 // has received the trace config.
1326 sleep(1);
1327
1328 base::Subprocess child({"/proc/self/exe"});
1329 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1330 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1331 allocator_name());
1332 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1333 std::to_string(kStartupAllocSize));
1334 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1335 std::string("0"));
1336
1337 StartAndWaitForHandshake(&child);
1338
1339 ReadAndWait(helper.get());
1340 WRITE_TRACE(helper->full_trace());
1341
1342 KillAssertRunning(&child);
1343
1344 const auto& packets = helper->trace();
1345 ASSERT_GT(packets.size(), 0u);
1346 size_t profile_packets = 0;
1347 size_t samples = 0;
1348 uint64_t total_allocated = 0;
1349 uint64_t total_freed = 0;
1350 for (const protos::gen::TracePacket& packet : packets) {
1351 if (packet.has_profile_packet() &&
1352 !packet.profile_packet().process_dumps().empty()) {
1353 const auto& dumps = packet.profile_packet().process_dumps();
1354 ASSERT_EQ(dumps.size(), 1u);
1355 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1356 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1357 profile_packets++;
1358 for (const auto& sample : dump.samples()) {
1359 samples++;
1360 total_allocated += sample.self_allocated();
1361 total_freed += sample.self_freed();
1362 }
1363 }
1364 }
1365 EXPECT_EQ(profile_packets, 1u);
1366 EXPECT_GT(samples, 0u);
1367 EXPECT_GT(total_allocated, 0u);
1368 EXPECT_GT(total_freed, 0u);
1369 }
1370
TEST_P(HeapprofdEndToEnd,DiscoverByName)1371 TEST_P(HeapprofdEndToEnd, DiscoverByName) {
1372 auto helper = GetHelper(&task_runner);
1373
1374 base::Subprocess child({"/proc/self/exe"});
1375 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1376 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1377 allocator_name());
1378 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1379 std::to_string(kStartupAllocSize));
1380 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1381 std::string("0"));
1382
1383 StartAndWaitForHandshake(&child);
1384
1385 // Wait to make sure process is fully initialized, so we do not accidentally
1386 // match it by the startup logic.
1387 sleep(1);
1388
1389 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1390 cfg->set_sampling_interval_bytes(1);
1391 cfg->add_process_cmdline("heapprofd_continuous_malloc");
1392 cfg->add_heaps(allocator_name());
1393 });
1394 trace_config.set_duration_ms(5000);
1395
1396 helper->StartTracing(trace_config);
1397 ReadAndWait(helper.get());
1398 WRITE_TRACE(helper->full_trace());
1399
1400 KillAssertRunning(&child);
1401
1402 const auto& packets = helper->trace();
1403 ASSERT_GT(packets.size(), 0u);
1404 size_t profile_packets = 0;
1405 size_t samples = 0;
1406 uint64_t total_allocated = 0;
1407 uint64_t total_freed = 0;
1408 for (const protos::gen::TracePacket& packet : packets) {
1409 if (packet.has_profile_packet() &&
1410 !packet.profile_packet().process_dumps().empty()) {
1411 const auto& dumps = packet.profile_packet().process_dumps();
1412 ASSERT_EQ(dumps.size(), 1u);
1413 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1414 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1415 profile_packets++;
1416 for (const auto& sample : dump.samples()) {
1417 samples++;
1418 total_allocated += sample.self_allocated();
1419 total_freed += sample.self_freed();
1420 }
1421 }
1422 }
1423 EXPECT_EQ(profile_packets, 1u);
1424 EXPECT_GT(samples, 0u);
1425 EXPECT_GT(total_allocated, 0u);
1426 EXPECT_GT(total_freed, 0u);
1427 }
1428
TEST_P(HeapprofdEndToEnd,DiscoverByNameDenormalizedCmdline)1429 TEST_P(HeapprofdEndToEnd, DiscoverByNameDenormalizedCmdline) {
1430 auto helper = GetHelper(&task_runner);
1431
1432 // Make sure the forked process does not get reparented to init.
1433 base::Subprocess child({"/proc/self/exe"});
1434 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1435 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1436 allocator_name());
1437 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1438 std::to_string(kStartupAllocSize));
1439 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1440 std::string("0"));
1441
1442 StartAndWaitForHandshake(&child);
1443
1444 // Wait to make sure process is fully initialized, so we do not accidentally
1445 // match it by the startup logic.
1446 sleep(1);
1447
1448 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1449 cfg->set_sampling_interval_bytes(1);
1450 cfg->add_process_cmdline("heapprofd_continuous_malloc@1.2.3");
1451 cfg->add_heaps(allocator_name());
1452 });
1453 trace_config.set_duration_ms(5000);
1454
1455 helper->StartTracing(trace_config);
1456 ReadAndWait(helper.get());
1457 WRITE_TRACE(helper->full_trace());
1458
1459 KillAssertRunning(&child);
1460
1461 const auto& packets = helper->trace();
1462 ASSERT_GT(packets.size(), 0u);
1463 size_t profile_packets = 0;
1464 size_t samples = 0;
1465 uint64_t total_allocated = 0;
1466 uint64_t total_freed = 0;
1467 for (const protos::gen::TracePacket& packet : packets) {
1468 if (packet.has_profile_packet() &&
1469 !packet.profile_packet().process_dumps().empty()) {
1470 const auto& dumps = packet.profile_packet().process_dumps();
1471 ASSERT_EQ(dumps.size(), 1u);
1472 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1473 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1474 profile_packets++;
1475 for (const auto& sample : dump.samples()) {
1476 samples++;
1477 total_allocated += sample.self_allocated();
1478 total_freed += sample.self_freed();
1479 }
1480 }
1481 }
1482 EXPECT_EQ(profile_packets, 1u);
1483 EXPECT_GT(samples, 0u);
1484 EXPECT_GT(total_allocated, 0u);
1485 EXPECT_GT(total_freed, 0u);
1486 }
1487
TEST_P(HeapprofdEndToEnd,ReInit)1488 TEST_P(HeapprofdEndToEnd, ReInit) {
1489 constexpr size_t kSamplingInterval = 1;
1490
1491 // We cannot use base::Pipe because that assumes we want CLOEXEC.
1492 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
1493 int signal_pipe[2];
1494 int ack_pipe[2];
1495
1496 PERFETTO_CHECK(pipe(signal_pipe) == 0); // NOLINT(android-cloexec-pipe)
1497 PERFETTO_CHECK(pipe(ack_pipe) == 0); // NOLINT(android-cloexec-pipe)
1498
1499 int cur_flags = fcntl(signal_pipe[0], F_GETFL, 0);
1500 PERFETTO_CHECK(cur_flags >= 0);
1501 PERFETTO_CHECK(fcntl(signal_pipe[0], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1502 cur_flags = fcntl(signal_pipe[1], F_GETFL, 0);
1503 PERFETTO_CHECK(cur_flags >= 0);
1504 PERFETTO_CHECK(fcntl(signal_pipe[1], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1505
1506 int signal_pipe_rd = signal_pipe[0];
1507 int signal_pipe_wr = signal_pipe[1];
1508 int ack_pipe_rd = ack_pipe[0];
1509 int ack_pipe_wr = ack_pipe[1];
1510
1511 base::Subprocess child({"/proc/self/exe"});
1512 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1513 child.args.preserve_fds.push_back(signal_pipe_rd);
1514 child.args.preserve_fds.push_back(ack_pipe_wr);
1515 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG0=" +
1516 allocator_name());
1517 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG1=" +
1518 std::to_string(signal_pipe_rd));
1519 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG2=" +
1520 std::to_string(ack_pipe_wr));
1521 StartAndWaitForHandshake(&child);
1522
1523 const uint64_t pid = static_cast<uint64_t>(child.pid());
1524
1525 close(signal_pipe_rd);
1526 close(ack_pipe_wr);
1527
1528 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1529 cfg->set_sampling_interval_bytes(kSamplingInterval);
1530 cfg->add_pid(pid);
1531 cfg->add_heaps(allocator_name());
1532 });
1533
1534 auto helper = Trace(trace_config);
1535 WRITE_TRACE(helper->full_trace());
1536
1537 PrintStats(helper.get());
1538 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1539 ValidateOnlyPID(helper.get(), pid);
1540 ValidateSampleSizes(helper.get(), pid, kFirstIterationBytes);
1541
1542 PERFETTO_CHECK(PERFETTO_EINTR(write(signal_pipe_wr, "1", 1)) == 1);
1543 close(signal_pipe_wr);
1544 char buf[1];
1545 ASSERT_EQ(PERFETTO_EINTR(read(ack_pipe_rd, buf, sizeof(buf))), 1);
1546 close(ack_pipe_rd);
1547
1548 // A brief sleep to allow the client to notice that the profiling session is
1549 // to be torn down (as it rejects concurrent sessions).
1550 usleep(500 * kMsToUs);
1551
1552 PERFETTO_LOG("HeapprofdEndToEnd::Reinit: Starting second");
1553
1554 // We must keep alive the original helper because it owns the service thread.
1555 std::unique_ptr<TraceProcessorTestHelper> helper2 =
1556 std::unique_ptr<TraceProcessorTestHelper>(
1557 new TraceProcessorTestHelper(&task_runner));
1558
1559 helper2->ConnectConsumer();
1560 helper2->WaitForConsumerConnect();
1561 helper2->StartTracing(trace_config);
1562 ReadAndWait(helper2.get());
1563 WRITE_TRACE(helper2->trace());
1564
1565 PrintStats(helper2.get());
1566 KillAssertRunning(&child);
1567
1568 ValidateHasSamples(helper2.get(), pid, allocator_name(), kSamplingInterval);
1569 ValidateOnlyPID(helper2.get(), pid);
1570 ValidateSampleSizes(helper2.get(), pid, kSecondIterationBytes);
1571 }
1572
TEST_P(HeapprofdEndToEnd,ReInitAfterInvalid)1573 TEST_P(HeapprofdEndToEnd, ReInitAfterInvalid) {
1574 constexpr size_t kSamplingInterval = 1;
1575
1576 // We cannot use base::Pipe because that assumes we want CLOEXEC.
1577 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
1578 int signal_pipe[2];
1579 int ack_pipe[2];
1580
1581 PERFETTO_CHECK(pipe(signal_pipe) == 0); // NOLINT(android-cloexec-pipe)
1582 PERFETTO_CHECK(pipe(ack_pipe) == 0); // NOLINT(android-cloexec-pipe)
1583
1584 int cur_flags = fcntl(signal_pipe[0], F_GETFL, 0);
1585 PERFETTO_CHECK(cur_flags >= 0);
1586 PERFETTO_CHECK(fcntl(signal_pipe[0], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1587 cur_flags = fcntl(signal_pipe[1], F_GETFL, 0);
1588 PERFETTO_CHECK(cur_flags >= 0);
1589 PERFETTO_CHECK(fcntl(signal_pipe[1], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1590
1591 int signal_pipe_rd = signal_pipe[0];
1592 int signal_pipe_wr = signal_pipe[1];
1593 int ack_pipe_rd = ack_pipe[0];
1594 int ack_pipe_wr = ack_pipe[1];
1595
1596 base::Subprocess child({"/proc/self/exe"});
1597 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1598 child.args.preserve_fds.push_back(signal_pipe_rd);
1599 child.args.preserve_fds.push_back(ack_pipe_wr);
1600 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG0=" +
1601 allocator_name());
1602 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG1=" +
1603 std::to_string(signal_pipe_rd));
1604 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG2=" +
1605 std::to_string(ack_pipe_wr));
1606 StartAndWaitForHandshake(&child);
1607
1608 const uint64_t pid = static_cast<uint64_t>(child.pid());
1609
1610 close(signal_pipe_rd);
1611 close(ack_pipe_wr);
1612
1613 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1614 cfg->set_sampling_interval_bytes(kSamplingInterval);
1615 cfg->add_pid(pid);
1616 cfg->add_heaps(allocator_name());
1617 });
1618
1619 auto helper = Trace(trace_config);
1620 WRITE_TRACE(helper->full_trace());
1621
1622 PrintStats(helper.get());
1623 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1624 ValidateOnlyPID(helper.get(), pid);
1625 ValidateSampleSizes(helper.get(), pid, kFirstIterationBytes);
1626
1627 PERFETTO_CHECK(PERFETTO_EINTR(write(signal_pipe_wr, "1", 1)) == 1);
1628 close(signal_pipe_wr);
1629 char buf[1];
1630 ASSERT_EQ(PERFETTO_EINTR(read(ack_pipe_rd, buf, sizeof(buf))), 1);
1631 close(ack_pipe_rd);
1632
1633 // A brief sleep to allow the client to notice that the profiling session is
1634 // to be torn down (as it rejects concurrent sessions).
1635 usleep(500 * kMsToUs);
1636
1637 PERFETTO_LOG("HeapprofdEndToEnd::Reinit: Starting second");
1638
1639 // We must keep alive the original helper because it owns the service thread.
1640 std::unique_ptr<TraceProcessorTestHelper> helper2 =
1641 std::unique_ptr<TraceProcessorTestHelper>(
1642 new TraceProcessorTestHelper(&task_runner));
1643
1644 helper2->ConnectConsumer();
1645 helper2->WaitForConsumerConnect();
1646 helper2->StartTracing(trace_config);
1647 ReadAndWait(helper2.get());
1648
1649 WRITE_TRACE(helper2->trace());
1650
1651 PrintStats(helper2.get());
1652 KillAssertRunning(&child);
1653
1654 ValidateHasSamples(helper2.get(), pid, allocator_name(), kSamplingInterval);
1655 ValidateOnlyPID(helper2.get(), pid);
1656 ValidateSampleSizes(helper2.get(), pid, kSecondIterationBytes);
1657 }
1658
TEST_P(HeapprofdEndToEnd,ConcurrentSession)1659 TEST_P(HeapprofdEndToEnd, ConcurrentSession) {
1660 constexpr size_t kAllocSize = 1024;
1661 constexpr size_t kSamplingInterval = 1;
1662
1663 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1664 const uint64_t pid = static_cast<uint64_t>(child.pid());
1665
1666 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1667 cfg->set_sampling_interval_bytes(kSamplingInterval);
1668 cfg->add_pid(pid);
1669 cfg->add_heaps(allocator_name());
1670 ContinuousDump(cfg);
1671 });
1672 trace_config.set_duration_ms(5000);
1673
1674 auto helper = GetHelper(&task_runner);
1675 helper->StartTracing(trace_config);
1676 sleep(1);
1677
1678 PERFETTO_LOG("Starting concurrent.");
1679 std::unique_ptr<TraceProcessorTestHelper> helper_concurrent(
1680 new TraceProcessorTestHelper(&task_runner));
1681 helper_concurrent->ConnectConsumer();
1682 helper_concurrent->WaitForConsumerConnect();
1683 helper_concurrent->StartTracing(trace_config);
1684
1685 ReadAndWait(helper.get());
1686 WRITE_TRACE(helper->full_trace());
1687 PrintStats(helper.get());
1688
1689 ReadAndWait(helper_concurrent.get());
1690 WRITE_TRACE(helper_concurrent->trace());
1691 PrintStats(helper_concurrent.get());
1692 KillAssertRunning(&child);
1693
1694 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1695 ValidateOnlyPID(helper.get(), pid);
1696 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1697 ValidateRejectedConcurrent(helper.get(), pid, false);
1698
1699 ValidateOnlyPID(helper_concurrent.get(), pid);
1700 ValidateRejectedConcurrent(helper_concurrent.get(), pid, true);
1701 }
1702
TEST_P(HeapprofdEndToEnd,NativeProfilingActiveAtProcessExit)1703 TEST_P(HeapprofdEndToEnd, NativeProfilingActiveAtProcessExit) {
1704 constexpr uint64_t kTestAllocSize = 128;
1705 base::Pipe start_pipe = base::Pipe::Create(base::Pipe::kBothBlock);
1706 int start_pipe_wr = *start_pipe.wr;
1707
1708 base::Subprocess child({"/proc/self/exe"});
1709 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1710 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1711 allocator_name());
1712 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1713 std::to_string(kTestAllocSize));
1714 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1715 std::to_string(0));
1716 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG3=" +
1717 std::to_string(200));
1718 child.args.preserve_fds.push_back(start_pipe_wr);
1719 child.args.posix_entrypoint_for_testing = [start_pipe_wr] {
1720 PERFETTO_CHECK(PERFETTO_EINTR(write(start_pipe_wr, "1", 1)) == 1);
1721 PERFETTO_CHECK(close(start_pipe_wr) == 0 || errno == EINTR);
1722 };
1723
1724 StartAndWaitForHandshake(&child);
1725
1726 const uint64_t pid = static_cast<uint64_t>(child.pid());
1727 start_pipe.wr.reset();
1728
1729 // Construct tracing config (without starting profiling).
1730 auto helper = GetHelper(&task_runner);
1731
1732 // Wait for child to have been scheduled at least once.
1733 char buf[1] = {};
1734 ASSERT_EQ(PERFETTO_EINTR(read(*start_pipe.rd, buf, sizeof(buf))), 1);
1735 start_pipe.rd.reset();
1736
1737 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1738 cfg->set_sampling_interval_bytes(1);
1739 cfg->add_pid(pid);
1740 cfg->add_heaps(allocator_name());
1741 });
1742 trace_config.set_duration_ms(5000);
1743
1744 // Trace until child exits.
1745 helper->StartTracing(trace_config);
1746
1747 // Wait for the child and assert that it exited successfully.
1748 EXPECT_TRUE(child.Wait(30000));
1749 EXPECT_EQ(child.status(), base::Subprocess::kTerminated);
1750 EXPECT_EQ(child.returncode(), 0);
1751
1752 // Assert that we did profile the process.
1753 helper->FlushAndWait(2000);
1754 helper->DisableTracing();
1755 ReadAndWait(helper.get());
1756 WRITE_TRACE(helper->full_trace());
1757
1758 const auto& packets = helper->trace();
1759 ASSERT_GT(packets.size(), 0u);
1760 size_t profile_packets = 0;
1761 size_t samples = 0;
1762 uint64_t total_allocated = 0;
1763 for (const protos::gen::TracePacket& packet : packets) {
1764 if (packet.has_profile_packet() &&
1765 !packet.profile_packet().process_dumps().empty()) {
1766 const auto& dumps = packet.profile_packet().process_dumps();
1767 ASSERT_EQ(dumps.size(), 1u);
1768 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1769 EXPECT_EQ(dump.pid(), pid);
1770 profile_packets++;
1771 for (const auto& sample : dump.samples()) {
1772 samples++;
1773 total_allocated += sample.self_allocated();
1774 }
1775 }
1776 }
1777 EXPECT_EQ(profile_packets, 1u);
1778 EXPECT_GT(samples, 0u);
1779 EXPECT_GT(total_allocated, 0u);
1780 }
1781
1782 // On in-tree Android, we use the system heapprofd in fork or central mode.
1783 // For Linux and out-of-tree Android, we statically include a copy of
1784 // heapprofd and use that. This one does not support intercepting malloc.
1785 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
1786 #if !PERFETTO_BUILDFLAG(PERFETTO_START_DAEMONS)
1787 #error "Need to start daemons for Linux test."
1788 #endif
1789
1790 INSTANTIATE_TEST_SUITE_P(Run,
1791 HeapprofdEndToEnd,
1792 Values(std::make_tuple(TestMode::kStatic,
1793 AllocatorMode::kCustom)),
1794 TestSuffix);
1795 #elif !PERFETTO_BUILDFLAG(PERFETTO_START_DAEMONS)
1796 INSTANTIATE_TEST_SUITE_P(
1797 Run,
1798 HeapprofdEndToEnd,
1799 Values(std::make_tuple(TestMode::kCentral, AllocatorMode::kMalloc),
1800 std::make_tuple(TestMode::kCentral, AllocatorMode::kCustom)),
1801 TestSuffix);
1802 #endif
1803
1804 } // namespace
1805 } // namespace profiling
1806 } // namespace perfetto
1807