1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <atomic>
18 #include <string>
19 #include <vector>
20
21 #include <fcntl.h>
22 #include <stdint.h>
23 #include <string.h>
24 #include <sys/stat.h>
25 #include <sys/types.h>
26 #include <sys/wait.h>
27 #include <unistd.h>
28 #include <optional>
29
30 #include "perfetto/base/build_config.h"
31 #include "perfetto/base/logging.h"
32 #include "perfetto/ext/base/file_utils.h"
33 #include "perfetto/ext/base/pipe.h"
34 #include "perfetto/ext/base/string_utils.h"
35 #include "perfetto/ext/base/subprocess.h"
36 #include "perfetto/ext/tracing/ipc/default_socket.h"
37 #include "perfetto/heap_profile.h"
38 #include "perfetto/trace_processor/trace_processor.h"
39 #include "protos/perfetto/trace/trace.gen.h"
40 #include "protos/perfetto/trace/trace.pbzero.h"
41 #include "src/base/test/test_task_runner.h"
42 #include "src/profiling/memory/heapprofd_producer.h"
43 #include "test/gtest_and_gmock.h"
44 #include "test/test_helper.h"
45
46 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
47 #include <sys/system_properties.h>
48 #endif
49
50 #include "protos/perfetto/config/profiling/heapprofd_config.gen.h"
51 #include "protos/perfetto/trace/interned_data/interned_data.gen.h"
52 #include "protos/perfetto/trace/profiling/profile_common.gen.h"
53 #include "protos/perfetto/trace/profiling/profile_packet.gen.h"
54
55 namespace perfetto {
56 namespace profiling {
57 namespace {
58
59 constexpr useconds_t kMsToUs = 1000;
60
61 constexpr auto kTracingDisabledTimeoutMs = 30000;
62 constexpr auto kWaitForReadDataTimeoutMs = 10000;
63 constexpr size_t kStartupAllocSize = 10;
64 constexpr size_t kFirstIterationBytes = 5;
65 constexpr size_t kSecondIterationBytes = 7;
66
67 enum class TestMode { kCentral, kStatic };
68 enum class AllocatorMode { kMalloc, kCustom };
69
70 using ::testing::AllOf;
71 using ::testing::AnyOf;
72 using ::testing::Bool;
73 using ::testing::Contains;
74 using ::testing::Eq;
75 using ::testing::Field;
76 using ::testing::HasSubstr;
77 using ::testing::Values;
78
79 constexpr const char* kOnlyFlamegraph =
80 "SELECT id, name, map_name, count, cumulative_count, size, "
81 "cumulative_size, "
82 "alloc_count, cumulative_alloc_count, alloc_size, cumulative_alloc_size, "
83 "parent_id "
84 "FROM experimental_flamegraph WHERE "
85 "(ts, upid) IN (SELECT distinct ts, upid from heap_profile_allocation) AND "
86 "profile_type = 'native' order by abs(cumulative_size) desc;";
87
88 struct FlamegraphNode {
89 int64_t id;
90 std::string name;
91 std::string map_name;
92 int64_t count;
93 int64_t cumulative_count;
94 int64_t size;
95 int64_t cumulative_size;
96 int64_t alloc_count;
97 int64_t cumulative_alloc_count;
98 int64_t alloc_size;
99 int64_t cumulative_alloc_size;
100 std::optional<int64_t> parent_id;
101 };
102
GetFlamegraph(trace_processor::TraceProcessor * tp)103 std::vector<FlamegraphNode> GetFlamegraph(trace_processor::TraceProcessor* tp) {
104 std::vector<FlamegraphNode> result;
105 auto it = tp->ExecuteQuery(kOnlyFlamegraph);
106 while (it.Next()) {
107 result.push_back({
108 it.Get(0).AsLong(),
109 it.Get(1).AsString(),
110 it.Get(2).AsString(),
111 it.Get(3).AsLong(),
112 it.Get(4).AsLong(),
113 it.Get(5).AsLong(),
114 it.Get(6).AsLong(),
115 it.Get(7).AsLong(),
116 it.Get(8).AsLong(),
117 it.Get(9).AsLong(),
118 it.Get(10).AsLong(),
119 it.Get(11).is_null() ? std::nullopt
120 : std::optional<int64_t>(it.Get(11).AsLong()),
121 });
122 }
123 PERFETTO_CHECK(it.Status().ok());
124 return result;
125 }
126
AllocatorName(AllocatorMode mode)127 std::string AllocatorName(AllocatorMode mode) {
128 switch (mode) {
129 case AllocatorMode::kMalloc:
130 return "libc.malloc";
131 case AllocatorMode::kCustom:
132 return "test";
133 }
134 }
135
AllocatorModeFromNameOrDie(std::string s)136 AllocatorMode AllocatorModeFromNameOrDie(std::string s) {
137 if (s == "libc.malloc")
138 return AllocatorMode::kMalloc;
139 if (s == "test")
140 return AllocatorMode::kCustom;
141 PERFETTO_FATAL("Invalid allocator mode [malloc | test]: %s", s.c_str());
142 }
143
ContinuousDump(HeapprofdConfig * cfg)144 void ContinuousDump(HeapprofdConfig* cfg) {
145 auto* cont_config = cfg->mutable_continuous_dump_config();
146 cont_config->set_dump_phase_ms(0);
147 cont_config->set_dump_interval_ms(100);
148 }
149
150 template <typename F>
MakeTraceConfig(F fn)151 TraceConfig MakeTraceConfig(F fn) {
152 TraceConfig trace_config;
153 trace_config.add_buffers()->set_size_kb(10 * 1024);
154 trace_config.set_duration_ms(2000);
155 trace_config.set_data_source_stop_timeout_ms(10000);
156
157 auto* ds_config = trace_config.add_data_sources()->mutable_config();
158 ds_config->set_name("android.heapprofd");
159 ds_config->set_target_buffer(0);
160
161 protos::gen::HeapprofdConfig heapprofd_config;
162 fn(&heapprofd_config);
163 ds_config->set_heapprofd_config_raw(heapprofd_config.SerializeAsString());
164 return trace_config;
165 }
166
CustomAllocateAndFree(size_t bytes)167 void CustomAllocateAndFree(size_t bytes) {
168 static uint32_t heap_id = AHeapProfile_registerHeap(AHeapInfo_create("test"));
169 AHeapProfile_reportAllocation(heap_id, 0x1234abc, bytes);
170 AHeapProfile_reportFree(heap_id, 0x1234abc);
171 }
172
SecondaryAllocAndFree(size_t bytes)173 void SecondaryAllocAndFree(size_t bytes) {
174 static uint32_t heap_id =
175 AHeapProfile_registerHeap(AHeapInfo_create("secondary"));
176 AHeapProfile_reportAllocation(heap_id, 0x1234abc, bytes);
177 AHeapProfile_reportFree(heap_id, 0x1234abc);
178 }
179
AllocateAndFree(size_t bytes)180 void AllocateAndFree(size_t bytes) {
181 // This volatile is needed to prevent the compiler from trying to be
182 // helpful and compiling a "useless" malloc + free into a noop.
183 volatile char* x = static_cast<char*>(malloc(bytes));
184 if (x) {
185 if (bytes > 0)
186 x[0] = 'x';
187 free(const_cast<char*>(x));
188 }
189 }
190
DoAllocation(AllocatorMode mode,size_t bytes)191 void DoAllocation(AllocatorMode mode, size_t bytes) {
192 switch (mode) {
193 case AllocatorMode::kMalloc:
194 AllocateAndFree(bytes);
195 break;
196 case AllocatorMode::kCustom:
197 // We need to run malloc(0) even if we want to test the custom allocator,
198 // as the init mechanism assumes the application uses malloc.
199 AllocateAndFree(1);
200 CustomAllocateAndFree(bytes);
201 break;
202 }
203 }
204
ContinuousMalloc(AllocatorMode mode,size_t primary_bytes,size_t secondary_bytes,ssize_t max_iter=-1)205 void ContinuousMalloc(AllocatorMode mode,
206 size_t primary_bytes,
207 size_t secondary_bytes,
208 ssize_t max_iter = -1) {
209 for (ssize_t i = 0; max_iter == -1 || i < max_iter; ++i) {
210 DoAllocation(mode, primary_bytes);
211 if (secondary_bytes)
212 SecondaryAllocAndFree(secondary_bytes);
213 usleep(10 * kMsToUs);
214 }
215 }
216
StartAndWaitForHandshake(base::Subprocess * child)217 void StartAndWaitForHandshake(base::Subprocess* child) {
218 // We cannot use base::Pipe because that assumes we want CLOEXEC.
219 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
220 int ready_pipe[2];
221 PERFETTO_CHECK(pipe(ready_pipe) == 0); // NOLINT(android-cloexec-pipe)
222
223 int ready_pipe_rd = ready_pipe[0];
224 int ready_pipe_wr = ready_pipe[1];
225 child->args.preserve_fds.push_back(ready_pipe_wr);
226 child->args.env.push_back("HEAPPROFD_TESTING_READY_PIPE=" +
227 std::to_string(ready_pipe_wr));
228 child->Start();
229 close(ready_pipe_wr);
230 // Wait for libc to initialize the signal handler. If we signal before the
231 // handler is installed, we can kill the process.
232 char buf[1];
233 PERFETTO_CHECK(PERFETTO_EINTR(read(ready_pipe_rd, buf, sizeof(buf))) == 0);
234 close(ready_pipe_rd);
235 }
236
ChildFinishHandshake()237 void ChildFinishHandshake() {
238 const char* ready_pipe = getenv("HEAPPROFD_TESTING_READY_PIPE");
239 if (ready_pipe != nullptr) {
240 close(static_cast<int>(base::StringToInt64(ready_pipe).value()));
241 }
242 }
243
ForkContinuousAlloc(AllocatorMode mode,size_t primary_bytes,size_t secondary_bytes=0,ssize_t max_iter=-1)244 base::Subprocess ForkContinuousAlloc(AllocatorMode mode,
245 size_t primary_bytes,
246 size_t secondary_bytes = 0,
247 ssize_t max_iter = -1) {
248 base::Subprocess child({"/proc/self/exe"});
249 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
250 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
251 AllocatorName(mode));
252 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
253 std::to_string(primary_bytes));
254 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
255 std::to_string(secondary_bytes));
256 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG3=" +
257 std::to_string(max_iter));
258
259 StartAndWaitForHandshake(&child);
260 return child;
261 }
262
RunContinuousMalloc()263 void __attribute__((constructor(1024))) RunContinuousMalloc() {
264 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG0");
265 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG1");
266 const char* a2 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG2");
267 const char* a3 = getenv("HEAPPROFD_TESTING_RUN_MALLOC_ARG3");
268 if (a0 == nullptr)
269 return;
270
271 AllocatorMode arg0 = AllocatorModeFromNameOrDie(a0);
272 uint32_t arg1 = a1 ? base::StringToUInt32(a1).value() : 0;
273 uint32_t arg2 = a2 ? base::StringToUInt32(a2).value() : 0;
274 int32_t arg3 = a3 ? base::StringToInt32(a3).value() : -1;
275
276 ChildFinishHandshake();
277
278 ContinuousMalloc(arg0, arg1, arg2, arg3);
279 exit(0);
280 }
281
RunAccurateMalloc()282 void __attribute__((constructor(1024))) RunAccurateMalloc() {
283 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC");
284 if (a0 == nullptr)
285 return;
286
287 static std::atomic<bool> initialized{false};
288 static uint32_t heap_id =
289 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
290 AHeapInfo_create("test"),
291 [](void*, const AHeapProfileEnableCallbackInfo*) {
292 initialized = true;
293 },
294 nullptr));
295
296 ChildFinishHandshake();
297
298 // heapprofd_client needs malloc to see the signal.
299 while (!initialized)
300 AllocateAndFree(1);
301 // We call the callback before setting enabled=true on the heap, so we
302 // wait a bit for the assignment to happen.
303 usleep(100000);
304 if (!AHeapProfile_reportAllocation(heap_id, 0x1, 10u))
305 PERFETTO_FATAL("Expected allocation to be sampled.");
306 AHeapProfile_reportFree(heap_id, 0x1);
307 if (!AHeapProfile_reportAllocation(heap_id, 0x2, 15u))
308 PERFETTO_FATAL("Expected allocation to be sampled.");
309 if (!AHeapProfile_reportAllocation(heap_id, 0x3, 15u))
310 PERFETTO_FATAL("Expected allocation to be sampled.");
311 AHeapProfile_reportFree(heap_id, 0x2);
312
313 // Wait around so we can verify it did't crash.
314 for (;;) {
315 // Call sleep, otherwise an empty busy loop is undefined behavior:
316 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
317 sleep(1);
318 }
319 }
320
RunAccurateMallocWithVforkCommon()321 void __attribute__((noreturn)) RunAccurateMallocWithVforkCommon() {
322 static std::atomic<bool> initialized{false};
323 static uint32_t heap_id =
324 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
325 AHeapInfo_create("test"),
326 [](void*, const AHeapProfileEnableCallbackInfo*) {
327 initialized = true;
328 },
329 nullptr));
330
331 ChildFinishHandshake();
332
333 // heapprofd_client needs malloc to see the signal.
334 while (!initialized)
335 AllocateAndFree(1);
336 // We call the callback before setting enabled=true on the heap, so we
337 // wait a bit for the assignment to happen.
338 usleep(100000);
339 if (!AHeapProfile_reportAllocation(heap_id, 0x1, 10u))
340 PERFETTO_FATAL("Expected allocation to be sampled.");
341 AHeapProfile_reportFree(heap_id, 0x1);
342 pid_t pid = vfork();
343 PERFETTO_CHECK(pid != -1);
344 if (pid == 0) {
345 AHeapProfile_reportAllocation(heap_id, 0x2, 15u);
346 AHeapProfile_reportAllocation(heap_id, 0x3, 15u);
347 exit(0);
348 }
349 if (!AHeapProfile_reportAllocation(heap_id, 0x2, 15u))
350 PERFETTO_FATAL("Expected allocation to be sampled.");
351 if (!AHeapProfile_reportAllocation(heap_id, 0x3, 15u))
352 PERFETTO_FATAL("Expected allocation to be sampled.");
353 AHeapProfile_reportFree(heap_id, 0x2);
354
355 // Wait around so we can verify it did't crash.
356 for (;;) {
357 // Call sleep, otherwise an empty busy loop is undefined behavior:
358 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
359 sleep(1);
360 }
361 }
362
RunAccurateSample()363 void __attribute__((constructor(1024))) RunAccurateSample() {
364 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_SAMPLE");
365 if (a0 == nullptr)
366 return;
367
368 static std::atomic<bool> initialized{false};
369 static uint32_t heap_id =
370 AHeapProfile_registerHeap(AHeapInfo_setEnabledCallback(
371 AHeapInfo_create("test"),
372 [](void*, const AHeapProfileEnableCallbackInfo*) {
373 initialized = true;
374 },
375 nullptr));
376
377 ChildFinishHandshake();
378
379 // heapprofd_client needs malloc to see the signal.
380 while (!initialized)
381 AllocateAndFree(1);
382 // We call the callback before setting enabled=true on the heap, so we
383 // wait a bit for the assignment to happen.
384 usleep(100000);
385 if (!AHeapProfile_reportSample(heap_id, 0x1, 10u))
386 PERFETTO_FATAL("Expected allocation to be sampled.");
387 AHeapProfile_reportFree(heap_id, 0x1);
388 if (!AHeapProfile_reportSample(heap_id, 0x2, 15u))
389 PERFETTO_FATAL("Expected allocation to be sampled.");
390 if (!AHeapProfile_reportSample(heap_id, 0x3, 15u))
391 PERFETTO_FATAL("Expected allocation to be sampled.");
392 AHeapProfile_reportFree(heap_id, 0x2);
393
394 // Wait around so we can verify it did't crash.
395 for (;;) {
396 // Call sleep, otherwise an empty busy loop is undefined behavior:
397 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
398 sleep(1);
399 }
400 }
401
RunAccurateMallocWithVfork()402 void __attribute__((constructor(1024))) RunAccurateMallocWithVfork() {
403 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK");
404 if (a0 == nullptr)
405 return;
406 RunAccurateMallocWithVforkCommon();
407 }
408
RunAccurateMallocWithVforkThread()409 void __attribute__((constructor(1024))) RunAccurateMallocWithVforkThread() {
410 const char* a0 =
411 getenv("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK_THREAD");
412 if (a0 == nullptr)
413 return;
414 std::thread th(RunAccurateMallocWithVforkCommon);
415 th.join();
416 }
417
RunReInit()418 void __attribute__((constructor(1024))) RunReInit() {
419 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG0");
420 if (a0 == nullptr)
421 return;
422
423 AllocatorMode mode = AllocatorModeFromNameOrDie(a0);
424 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG1");
425 const char* a2 = getenv("HEAPPROFD_TESTING_RUN_REINIT_ARG2");
426 PERFETTO_CHECK(a1 != nullptr && a2 != nullptr);
427 int signal_pipe_rd = static_cast<int>(base::StringToInt64(a1).value());
428 int ack_pipe_wr = static_cast<int>(base::StringToInt64(a2).value());
429
430 ChildFinishHandshake();
431
432 size_t bytes = kFirstIterationBytes;
433 bool signalled = false;
434 for (;;) {
435 DoAllocation(mode, bytes);
436 char buf[1];
437 if (!signalled && read(signal_pipe_rd, buf, sizeof(buf)) == 1) {
438 signalled = true;
439 close(signal_pipe_rd);
440
441 // make sure the client has noticed that the session has stopped
442 DoAllocation(mode, bytes);
443
444 bytes = kSecondIterationBytes;
445 PERFETTO_CHECK(PERFETTO_EINTR(write(ack_pipe_wr, "1", 1)) == 1);
446 close(ack_pipe_wr);
447 }
448 usleep(10 * kMsToUs);
449 }
450 PERFETTO_FATAL("Should be unreachable");
451 }
452
RunCustomLifetime()453 void __attribute__((constructor(1024))) RunCustomLifetime() {
454 const char* a0 = getenv("HEAPPROFD_TESTING_RUN_LIFETIME_ARG0");
455 const char* a1 = getenv("HEAPPROFD_TESTING_RUN_LIFETIME_ARG1");
456 if (a0 == nullptr)
457 return;
458 uint64_t arg0 = a0 ? base::StringToUInt64(a0).value() : 0;
459 uint64_t arg1 = a0 ? base::StringToUInt64(a1).value() : 0;
460
461 PERFETTO_CHECK(arg1);
462
463 static std::atomic<bool> initialized{false};
464 static std::atomic<bool> disabled{false};
465 static std::atomic<uint64_t> sampling_interval;
466
467 static uint32_t other_heap_id = 0;
468 auto enabled_callback = [](void*,
469 const AHeapProfileEnableCallbackInfo* info) {
470 sampling_interval =
471 AHeapProfileEnableCallbackInfo_getSamplingInterval(info);
472 initialized = true;
473 };
474 auto disabled_callback = [](void*, const AHeapProfileDisableCallbackInfo*) {
475 PERFETTO_CHECK(other_heap_id);
476 AHeapProfile_reportFree(other_heap_id, 0);
477 disabled = true;
478 };
479 static uint32_t heap_id =
480 AHeapProfile_registerHeap(AHeapInfo_setDisabledCallback(
481 AHeapInfo_setEnabledCallback(AHeapInfo_create("test"),
482 enabled_callback, nullptr),
483 disabled_callback, nullptr));
484
485 other_heap_id = AHeapProfile_registerHeap(AHeapInfo_create("othertest"));
486 ChildFinishHandshake();
487
488 // heapprofd_client needs malloc to see the signal.
489 while (!initialized)
490 AllocateAndFree(1);
491
492 if (sampling_interval.load() != arg0) {
493 PERFETTO_FATAL("%" PRIu64 " != %" PRIu64, sampling_interval.load(), arg0);
494 }
495
496 while (!disabled)
497 AHeapProfile_reportFree(heap_id, 0x2);
498
499 char x = 'x';
500 PERFETTO_CHECK(base::WriteAll(static_cast<int>(arg1), &x, sizeof(x)) == 1);
501 close(static_cast<int>(arg1));
502
503 // Wait around so we can verify it didn't crash.
504 for (;;) {
505 // Call sleep, otherwise an empty busy loop is undefined behavior:
506 // http://en.cppreference.com/w/cpp/language/memory_model#Progress_guarantee
507 sleep(1);
508 }
509 }
510
511 class TraceProcessorTestHelper : public TestHelper {
512 public:
TraceProcessorTestHelper(base::TestTaskRunner * task_runner)513 explicit TraceProcessorTestHelper(base::TestTaskRunner* task_runner)
514 : TestHelper(task_runner),
515 tp_(trace_processor::TraceProcessor::CreateInstance({})) {}
516
ReadTraceData(std::vector<TracePacket> packets)517 void ReadTraceData(std::vector<TracePacket> packets) override {
518 for (auto& packet : packets) {
519 auto preamble = packet.GetProtoPreamble();
520 std::string payload = packet.GetRawBytesForTesting();
521 char* preamble_payload = std::get<0>(preamble);
522 size_t preamble_size = std::get<1>(preamble);
523 size_t buf_size = preamble_size + payload.size();
524 std::unique_ptr<uint8_t[]> buf =
525 std::unique_ptr<uint8_t[]>(new uint8_t[buf_size]);
526 memcpy(&buf[0], preamble_payload, preamble_size);
527 memcpy(&buf[preamble_size], payload.data(), payload.size());
528 PERFETTO_CHECK(tp_->Parse(std::move(buf), buf_size).ok());
529 }
530 TestHelper::ReadTraceData(std::move(packets));
531 }
532
tp()533 trace_processor::TraceProcessor& tp() { return *tp_; }
534
535 private:
536 std::unique_ptr<trace_processor::TraceProcessor> tp_;
537 };
538
GetHelper(base::TestTaskRunner * task_runner)539 std::unique_ptr<TraceProcessorTestHelper> GetHelper(
540 base::TestTaskRunner* task_runner) {
541 std::unique_ptr<TraceProcessorTestHelper> helper(
542 new TraceProcessorTestHelper(task_runner));
543 helper->StartServiceIfRequired();
544
545 helper->ConnectConsumer();
546 helper->WaitForConsumerConnect();
547 return helper;
548 }
549
ReadAndWait(TraceProcessorTestHelper * helper)550 void ReadAndWait(TraceProcessorTestHelper* helper) {
551 helper->WaitForTracingDisabled(kTracingDisabledTimeoutMs);
552 helper->ReadData();
553 helper->WaitForReadData(0, kWaitForReadDataTimeoutMs);
554 helper->tp().NotifyEndOfFile();
555 }
556
ToTraceString(const std::vector<protos::gen::TracePacket> & packets)557 std::string ToTraceString(
558 const std::vector<protos::gen::TracePacket>& packets) {
559 protos::gen::Trace trace;
560 for (const protos::gen::TracePacket& packet : packets) {
561 *trace.add_packet() = packet;
562 }
563 return trace.SerializeAsString();
564 }
565
566 #define WRITE_TRACE(trace) \
567 do { \
568 WriteTrace(trace, __FILE__, __LINE__); \
569 } while (0)
570
FormatHistogram(const protos::gen::ProfilePacket_Histogram & hist)571 std::string FormatHistogram(const protos::gen::ProfilePacket_Histogram& hist) {
572 std::string out;
573 std::string prev_upper_limit = "-inf";
574 for (const auto& bucket : hist.buckets()) {
575 std::string upper_limit;
576 if (bucket.max_bucket())
577 upper_limit = "inf";
578 else
579 upper_limit = std::to_string(bucket.upper_limit());
580
581 out += "[" + prev_upper_limit + ", " + upper_limit +
582 "]: " + std::to_string(bucket.count()) + "; ";
583 prev_upper_limit = std::move(upper_limit);
584 }
585 return out + "\n";
586 }
587
FormatStats(const protos::gen::ProfilePacket_ProcessStats & stats)588 std::string FormatStats(const protos::gen::ProfilePacket_ProcessStats& stats) {
589 return std::string("unwinding_errors: ") +
590 std::to_string(stats.unwinding_errors()) + "\n" +
591 "heap_samples: " + std::to_string(stats.heap_samples()) + "\n" +
592 "map_reparses: " + std::to_string(stats.map_reparses()) + "\n" +
593 "unwinding_time_us: " + FormatHistogram(stats.unwinding_time_us());
594 }
595
Suffix(const std::tuple<TestMode,AllocatorMode> & param)596 std::string Suffix(const std::tuple<TestMode, AllocatorMode>& param) {
597 TestMode tm = std::get<0>(param);
598 AllocatorMode am = std::get<1>(param);
599
600 std::string result;
601 switch (tm) {
602 case TestMode::kCentral:
603 result += "CentralMode";
604 break;
605 case TestMode::kStatic:
606 result += "StaticMode";
607 break;
608 }
609 switch (am) {
610 case AllocatorMode::kMalloc:
611 result += "Malloc";
612 break;
613 case AllocatorMode::kCustom:
614 result += "Custom";
615 break;
616 }
617 return result;
618 }
619
TestSuffix(const::testing::TestParamInfo<std::tuple<TestMode,AllocatorMode>> & info)620 __attribute__((unused)) std::string TestSuffix(
621 const ::testing::TestParamInfo<std::tuple<TestMode, AllocatorMode>>& info) {
622 return Suffix(info.param);
623 }
624
625 class HeapprofdEndToEnd
626 : public ::testing::TestWithParam<std::tuple<TestMode, AllocatorMode>> {
627 protected:
628 base::TestTaskRunner task_runner;
629
test_mode()630 TestMode test_mode() { return std::get<0>(GetParam()); }
allocator_mode()631 AllocatorMode allocator_mode() { return std::get<1>(GetParam()); }
allocator_name()632 std::string allocator_name() { return AllocatorName(allocator_mode()); }
633
WriteTrace(const std::vector<protos::gen::TracePacket> & packets,const char * filename,uint64_t lineno)634 void WriteTrace(const std::vector<protos::gen::TracePacket>& packets,
635 const char* filename,
636 uint64_t lineno) {
637 const char* outdir = getenv("HEAPPROFD_TEST_PROFILE_OUT");
638 if (!outdir)
639 return;
640 const std::string fq_filename =
641 std::string(outdir) + "/" + basename(filename) + ":" +
642 std::to_string(lineno) + "_" + Suffix(GetParam());
643 base::ScopedFile fd(base::OpenFile(fq_filename, O_WRONLY | O_CREAT, 0666));
644 PERFETTO_CHECK(*fd);
645 std::string trace_string = ToTraceString(packets);
646 PERFETTO_CHECK(
647 base::WriteAll(*fd, trace_string.data(), trace_string.size()) >= 0);
648 }
649
Trace(const TraceConfig & trace_config)650 std::unique_ptr<TraceProcessorTestHelper> Trace(
651 const TraceConfig& trace_config) {
652 auto helper = GetHelper(&task_runner);
653
654 helper->StartTracing(trace_config);
655
656 ReadAndWait(helper.get());
657 return helper;
658 }
659
GetUnwindingErrors(TraceProcessorTestHelper * helper)660 std::vector<std::string> GetUnwindingErrors(
661 TraceProcessorTestHelper* helper) {
662 std::vector<std::string> out;
663 const auto& packets = helper->trace();
664 for (const protos::gen::TracePacket& packet : packets) {
665 for (const protos::gen::InternedString& fn :
666 packet.interned_data().function_names()) {
667 if (fn.str().find("ERROR ") == 0) {
668 out.push_back(fn.str());
669 }
670 }
671 }
672 return out;
673 }
674
PrintStats(TraceProcessorTestHelper * helper)675 void PrintStats(TraceProcessorTestHelper* helper) {
676 const auto& packets = helper->trace();
677 for (const protos::gen::TracePacket& packet : packets) {
678 for (const auto& dump : packet.profile_packet().process_dumps()) {
679 // protobuf uint64 does not like the PRIu64 formatter.
680 PERFETTO_LOG("Stats for %s: %s", std::to_string(dump.pid()).c_str(),
681 FormatStats(dump.stats()).c_str());
682 }
683 }
684 std::vector<std::string> errors = GetUnwindingErrors(helper);
685 for (const std::string& err : errors) {
686 PERFETTO_LOG("Unwinding error: %s", err.c_str());
687 }
688 }
689
ValidateSampleSizes(TraceProcessorTestHelper * helper,uint64_t pid,uint64_t alloc_size,const std::string & heap_name="")690 void ValidateSampleSizes(TraceProcessorTestHelper* helper,
691 uint64_t pid,
692 uint64_t alloc_size,
693 const std::string& heap_name = "") {
694 const auto& packets = helper->trace();
695 for (const protos::gen::TracePacket& packet : packets) {
696 for (const auto& dump : packet.profile_packet().process_dumps()) {
697 if (dump.pid() != pid ||
698 (!heap_name.empty() && heap_name != dump.heap_name())) {
699 continue;
700 }
701 for (const auto& sample : dump.samples()) {
702 EXPECT_EQ(sample.self_allocated() % alloc_size, 0u);
703 EXPECT_EQ(sample.self_freed() % alloc_size, 0u);
704 EXPECT_THAT(sample.self_allocated() - sample.self_freed(),
705 AnyOf(Eq(0u), Eq(alloc_size)));
706 }
707 }
708 }
709 }
710
ValidateFromStartup(TraceProcessorTestHelper * helper,uint64_t pid,bool from_startup)711 void ValidateFromStartup(TraceProcessorTestHelper* helper,
712 uint64_t pid,
713 bool from_startup) {
714 const auto& packets = helper->trace();
715 for (const protos::gen::TracePacket& packet : packets) {
716 for (const auto& dump : packet.profile_packet().process_dumps()) {
717 if (dump.pid() != pid)
718 continue;
719 EXPECT_EQ(dump.from_startup(), from_startup);
720 }
721 }
722 }
723
ValidateRejectedConcurrent(TraceProcessorTestHelper * helper,uint64_t pid,bool rejected_concurrent)724 void ValidateRejectedConcurrent(TraceProcessorTestHelper* helper,
725 uint64_t pid,
726 bool rejected_concurrent) {
727 const auto& packets = helper->trace();
728 for (const protos::gen::TracePacket& packet : packets) {
729 for (const auto& dump : packet.profile_packet().process_dumps()) {
730 if (dump.pid() != pid)
731 continue;
732 EXPECT_EQ(dump.rejected_concurrent(), rejected_concurrent);
733 }
734 }
735 }
736
ValidateNoSamples(TraceProcessorTestHelper * helper,uint64_t pid)737 void ValidateNoSamples(TraceProcessorTestHelper* helper, uint64_t pid) {
738 const auto& packets = helper->trace();
739 size_t samples = 0;
740 for (const protos::gen::TracePacket& packet : packets) {
741 for (const auto& dump : packet.profile_packet().process_dumps()) {
742 if (dump.pid() != pid)
743 continue;
744 samples += dump.samples().size();
745 }
746 }
747 EXPECT_EQ(samples, 0u);
748 }
749
ValidateHasSamples(TraceProcessorTestHelper * helper,uint64_t pid,const std::string & heap_name,uint64_t sampling_interval)750 void ValidateHasSamples(TraceProcessorTestHelper* helper,
751 uint64_t pid,
752 const std::string& heap_name,
753 uint64_t sampling_interval) {
754 const auto& packets = helper->trace();
755 ASSERT_GT(packets.size(), 0u);
756 size_t profile_packets = 0;
757 size_t samples = 0;
758 uint64_t last_allocated = 0;
759 uint64_t last_freed = 0;
760 for (const protos::gen::TracePacket& packet : packets) {
761 for (const auto& dump : packet.profile_packet().process_dumps()) {
762 if (dump.pid() != pid || dump.heap_name() != heap_name)
763 continue;
764 EXPECT_EQ(dump.sampling_interval_bytes(), sampling_interval);
765 for (const auto& sample : dump.samples()) {
766 last_allocated = sample.self_allocated();
767 last_freed = sample.self_freed();
768 samples++;
769 }
770 profile_packets++;
771 }
772 }
773 EXPECT_GT(profile_packets, 0u) << heap_name;
774 EXPECT_GT(samples, 0u) << heap_name;
775 EXPECT_GT(last_allocated, 0u) << heap_name;
776 EXPECT_GT(last_freed, 0u) << heap_name;
777 }
778
ValidateOnlyPID(TraceProcessorTestHelper * helper,uint64_t pid)779 void ValidateOnlyPID(TraceProcessorTestHelper* helper, uint64_t pid) {
780 size_t dumps = 0;
781 const auto& packets = helper->trace();
782 for (const protos::gen::TracePacket& packet : packets) {
783 for (const auto& dump : packet.profile_packet().process_dumps()) {
784 EXPECT_EQ(dump.pid(), pid);
785 dumps++;
786 }
787 }
788 EXPECT_GT(dumps, 0u);
789 }
790 };
791
792 // This checks that the child is still running (to ensure it didn't crash
793 // unxpectedly) and then kills it.
KillAssertRunning(base::Subprocess * child)794 void KillAssertRunning(base::Subprocess* child) {
795 ASSERT_EQ(child->Poll(), base::Subprocess::kRunning)
796 << "Target process not running. CHECK CRASH LOGS.";
797 PERFETTO_LOG("Shutting down profile target.");
798 child->KillAndWaitForTermination();
799 }
800
TEST_P(HeapprofdEndToEnd,Disabled)801 TEST_P(HeapprofdEndToEnd, Disabled) {
802 constexpr size_t kAllocSize = 1024;
803
804 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
805 const uint64_t pid = static_cast<uint64_t>(child.pid());
806
807 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
808 cfg->set_sampling_interval_bytes(1);
809 cfg->add_pid(pid);
810 cfg->add_heaps("invalid");
811 ContinuousDump(cfg);
812 });
813
814 auto helper = Trace(trace_config);
815 WRITE_TRACE(helper->full_trace());
816 PrintStats(helper.get());
817 KillAssertRunning(&child);
818
819 ValidateNoSamples(helper.get(), pid);
820 }
821
TEST_P(HeapprofdEndToEnd,Smoke)822 TEST_P(HeapprofdEndToEnd, Smoke) {
823 constexpr size_t kAllocSize = 1024;
824 constexpr size_t kSamplingInterval = 1;
825
826 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
827 const uint64_t pid = static_cast<uint64_t>(child.pid());
828
829 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
830 cfg->set_sampling_interval_bytes(kSamplingInterval);
831 cfg->add_pid(pid);
832 cfg->add_heaps(allocator_name());
833 ContinuousDump(cfg);
834 });
835
836 auto helper = Trace(trace_config);
837 WRITE_TRACE(helper->full_trace());
838 PrintStats(helper.get());
839 KillAssertRunning(&child);
840
841 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
842 ValidateOnlyPID(helper.get(), pid);
843 ValidateSampleSizes(helper.get(), pid, kAllocSize);
844 }
845
TEST_P(HeapprofdEndToEnd,TwoAllocators)846 TEST_P(HeapprofdEndToEnd, TwoAllocators) {
847 constexpr size_t kCustomAllocSize = 1024;
848 constexpr size_t kAllocSize = 7;
849 constexpr size_t kSamplingInterval = 1;
850
851 base::Subprocess child =
852 ForkContinuousAlloc(allocator_mode(), kAllocSize, kCustomAllocSize);
853 const uint64_t pid = static_cast<uint64_t>(child.pid());
854
855 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
856 cfg->set_sampling_interval_bytes(kSamplingInterval);
857 cfg->add_pid(pid);
858 cfg->add_heaps(allocator_name());
859 cfg->add_heaps("secondary");
860 ContinuousDump(cfg);
861 });
862
863 auto helper = Trace(trace_config);
864 WRITE_TRACE(helper->full_trace());
865 PrintStats(helper.get());
866 KillAssertRunning(&child);
867
868 ValidateHasSamples(helper.get(), pid, "secondary", kSamplingInterval);
869 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
870 ValidateOnlyPID(helper.get(), pid);
871 ValidateSampleSizes(helper.get(), pid, kCustomAllocSize, "secondary");
872 ValidateSampleSizes(helper.get(), pid, kAllocSize, allocator_name());
873 }
874
TEST_P(HeapprofdEndToEnd,TwoAllocatorsAll)875 TEST_P(HeapprofdEndToEnd, TwoAllocatorsAll) {
876 constexpr size_t kCustomAllocSize = 1024;
877 constexpr size_t kAllocSize = 7;
878 constexpr size_t kSamplingInterval = 1;
879
880 base::Subprocess child =
881 ForkContinuousAlloc(allocator_mode(), kAllocSize, kCustomAllocSize);
882 const uint64_t pid = static_cast<uint64_t>(child.pid());
883
884 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
885 cfg->set_sampling_interval_bytes(kSamplingInterval);
886 cfg->add_pid(pid);
887 cfg->set_all_heaps(true);
888 ContinuousDump(cfg);
889 });
890
891 auto helper = Trace(trace_config);
892 WRITE_TRACE(helper->full_trace());
893 PrintStats(helper.get());
894 KillAssertRunning(&child);
895
896 ValidateHasSamples(helper.get(), pid, "secondary", kSamplingInterval);
897 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
898 ValidateOnlyPID(helper.get(), pid);
899 ValidateSampleSizes(helper.get(), pid, kCustomAllocSize, "secondary");
900 ValidateSampleSizes(helper.get(), pid, kAllocSize, allocator_name());
901 }
902
TEST_P(HeapprofdEndToEnd,AccurateCustomReportAllocation)903 TEST_P(HeapprofdEndToEnd, AccurateCustomReportAllocation) {
904 if (allocator_mode() != AllocatorMode::kCustom)
905 GTEST_SKIP();
906
907 base::Subprocess child({"/proc/self/exe"});
908 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
909 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC=1");
910 StartAndWaitForHandshake(&child);
911
912 const uint64_t pid = static_cast<uint64_t>(child.pid());
913
914 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
915 cfg->set_sampling_interval_bytes(1);
916 cfg->add_pid(pid);
917 cfg->add_heaps("test");
918 });
919
920 auto helper = Trace(trace_config);
921 WRITE_TRACE(helper->full_trace());
922 PrintStats(helper.get());
923 KillAssertRunning(&child);
924
925 auto flamegraph = GetFlamegraph(&helper->tp());
926 EXPECT_THAT(flamegraph,
927 Contains(AllOf(
928 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
929 Field(&FlamegraphNode::cumulative_size, Eq(15)),
930 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
931
932 ValidateOnlyPID(helper.get(), pid);
933
934 size_t total_alloc = 0;
935 size_t total_freed = 0;
936 for (const protos::gen::TracePacket& packet : helper->trace()) {
937 for (const auto& dump : packet.profile_packet().process_dumps()) {
938 for (const auto& sample : dump.samples()) {
939 total_alloc += sample.self_allocated();
940 total_freed += sample.self_freed();
941 }
942 }
943 }
944 EXPECT_EQ(total_alloc, 40u);
945 EXPECT_EQ(total_freed, 25u);
946 }
947
948 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
949 #define MAYBE_AccurateCustomReportAllocationWithVfork \
950 AccurateCustomReportAllocationWithVfork
951 #define MAYBE_AccurateCustomReportAllocationWithVforkThread \
952 AccurateCustomReportAllocationWithVforkThread
953 #else
954 #define MAYBE_AccurateCustomReportAllocationWithVfork \
955 DISABLED_AccurateCustomReportAllocationWithVfork
956 #define MAYBE_AccurateCustomReportAllocationWithVforkThread \
957 DISABLED_AccurateCustomReportAllocationWithVforkThread
958 #endif
959
TEST_P(HeapprofdEndToEnd,MAYBE_AccurateCustomReportAllocationWithVfork)960 TEST_P(HeapprofdEndToEnd, MAYBE_AccurateCustomReportAllocationWithVfork) {
961 if (allocator_mode() != AllocatorMode::kCustom)
962 GTEST_SKIP();
963
964 base::Subprocess child({"/proc/self/exe"});
965 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
966 child.args.env.push_back(
967 "HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK=1");
968 StartAndWaitForHandshake(&child);
969
970 const uint64_t pid = static_cast<uint64_t>(child.pid());
971
972 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
973 cfg->set_sampling_interval_bytes(1);
974 cfg->add_pid(pid);
975 cfg->add_heaps("test");
976 });
977
978 auto helper = Trace(trace_config);
979 WRITE_TRACE(helper->full_trace());
980 PrintStats(helper.get());
981 KillAssertRunning(&child);
982
983 auto flamegraph = GetFlamegraph(&helper->tp());
984 EXPECT_THAT(flamegraph,
985 Contains(AllOf(
986 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
987 Field(&FlamegraphNode::cumulative_size, Eq(15)),
988 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
989
990 ValidateOnlyPID(helper.get(), pid);
991
992 size_t total_alloc = 0;
993 size_t total_freed = 0;
994 for (const protos::gen::TracePacket& packet : helper->trace()) {
995 for (const auto& dump : packet.profile_packet().process_dumps()) {
996 EXPECT_FALSE(dump.disconnected());
997 for (const auto& sample : dump.samples()) {
998 total_alloc += sample.self_allocated();
999 total_freed += sample.self_freed();
1000 }
1001 }
1002 }
1003 EXPECT_EQ(total_alloc, 40u);
1004 EXPECT_EQ(total_freed, 25u);
1005 }
1006
TEST_P(HeapprofdEndToEnd,MAYBE_AccurateCustomReportAllocationWithVforkThread)1007 TEST_P(HeapprofdEndToEnd, MAYBE_AccurateCustomReportAllocationWithVforkThread) {
1008 if (allocator_mode() != AllocatorMode::kCustom)
1009 GTEST_SKIP();
1010
1011 base::Subprocess child({"/proc/self/exe"});
1012 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1013 child.args.env.push_back(
1014 "HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC_WITH_VFORK_THREAD=1");
1015 StartAndWaitForHandshake(&child);
1016
1017 const uint64_t pid = static_cast<uint64_t>(child.pid());
1018
1019 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1020 cfg->set_sampling_interval_bytes(1);
1021 cfg->add_pid(pid);
1022 cfg->add_heaps("test");
1023 });
1024
1025 auto helper = Trace(trace_config);
1026 WRITE_TRACE(helper->full_trace());
1027 PrintStats(helper.get());
1028 KillAssertRunning(&child);
1029
1030 auto flamegraph = GetFlamegraph(&helper->tp());
1031 EXPECT_THAT(flamegraph,
1032 Contains(AllOf(
1033 Field(&FlamegraphNode::name, HasSubstr("RunAccurateMalloc")),
1034 Field(&FlamegraphNode::cumulative_size, Eq(15)),
1035 Field(&FlamegraphNode::cumulative_alloc_size, Eq(40)))));
1036
1037 ValidateOnlyPID(helper.get(), pid);
1038
1039 size_t total_alloc = 0;
1040 size_t total_freed = 0;
1041 for (const protos::gen::TracePacket& packet : helper->trace()) {
1042 for (const auto& dump : packet.profile_packet().process_dumps()) {
1043 EXPECT_FALSE(dump.disconnected());
1044 for (const auto& sample : dump.samples()) {
1045 total_alloc += sample.self_allocated();
1046 total_freed += sample.self_freed();
1047 }
1048 }
1049 }
1050 EXPECT_EQ(total_alloc, 40u);
1051 EXPECT_EQ(total_freed, 25u);
1052 }
1053
TEST_P(HeapprofdEndToEnd,AccurateCustomReportSample)1054 TEST_P(HeapprofdEndToEnd, AccurateCustomReportSample) {
1055 if (allocator_mode() != AllocatorMode::kCustom)
1056 GTEST_SKIP();
1057
1058 base::Subprocess child({"/proc/self/exe"});
1059 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1060 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_SAMPLE=1");
1061 StartAndWaitForHandshake(&child);
1062
1063 const uint64_t pid = static_cast<uint64_t>(child.pid());
1064
1065 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1066 cfg->set_sampling_interval_bytes(1000000);
1067 cfg->add_pid(pid);
1068 cfg->add_heaps("test");
1069 });
1070
1071 auto helper = Trace(trace_config);
1072 WRITE_TRACE(helper->full_trace());
1073 PrintStats(helper.get());
1074 KillAssertRunning(&child);
1075
1076 ValidateOnlyPID(helper.get(), pid);
1077
1078 size_t total_alloc = 0;
1079 size_t total_freed = 0;
1080 for (const protos::gen::TracePacket& packet : helper->trace()) {
1081 for (const auto& dump : packet.profile_packet().process_dumps()) {
1082 for (const auto& sample : dump.samples()) {
1083 total_alloc += sample.self_allocated();
1084 total_freed += sample.self_freed();
1085 }
1086 }
1087 }
1088 EXPECT_EQ(total_alloc, 40u);
1089 EXPECT_EQ(total_freed, 25u);
1090 }
1091
TEST_P(HeapprofdEndToEnd,AccurateDumpAtMaxCustom)1092 TEST_P(HeapprofdEndToEnd, AccurateDumpAtMaxCustom) {
1093 if (allocator_mode() != AllocatorMode::kCustom)
1094 GTEST_SKIP();
1095
1096 base::Subprocess child({"/proc/self/exe"});
1097 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1098 child.args.env.push_back("HEAPPROFD_TESTING_RUN_ACCURATE_MALLOC=1");
1099 StartAndWaitForHandshake(&child);
1100
1101 const uint64_t pid = static_cast<uint64_t>(child.pid());
1102
1103 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1104 cfg->set_sampling_interval_bytes(1);
1105 cfg->add_pid(pid);
1106 cfg->add_heaps("test");
1107 cfg->set_dump_at_max(true);
1108 });
1109
1110 auto helper = Trace(trace_config);
1111 WRITE_TRACE(helper->full_trace());
1112 PrintStats(helper.get());
1113 KillAssertRunning(&child);
1114
1115 ValidateOnlyPID(helper.get(), pid);
1116
1117 size_t total_alloc = 0;
1118 size_t total_count = 0;
1119 for (const protos::gen::TracePacket& packet : helper->trace()) {
1120 for (const auto& dump : packet.profile_packet().process_dumps()) {
1121 for (const auto& sample : dump.samples()) {
1122 total_alloc += sample.self_max();
1123 total_count += sample.self_max_count();
1124 }
1125 }
1126 }
1127 EXPECT_EQ(total_alloc, 30u);
1128 EXPECT_EQ(total_count, 2u);
1129 }
1130
TEST_P(HeapprofdEndToEnd,CustomLifetime)1131 TEST_P(HeapprofdEndToEnd, CustomLifetime) {
1132 if (allocator_mode() != AllocatorMode::kCustom)
1133 GTEST_SKIP();
1134
1135 int disabled_pipe[2];
1136 PERFETTO_CHECK(pipe(disabled_pipe) == 0); // NOLINT(android-cloexec-pipe)
1137
1138 int disabled_pipe_rd = disabled_pipe[0];
1139 int disabled_pipe_wr = disabled_pipe[1];
1140
1141 base::Subprocess child({"/proc/self/exe"});
1142 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1143 child.args.env.push_back("HEAPPROFD_TESTING_RUN_LIFETIME_ARG0=1000000");
1144 child.args.env.push_back("HEAPPROFD_TESTING_RUN_LIFETIME_ARG1=" +
1145 std::to_string(disabled_pipe_wr));
1146 child.args.preserve_fds.push_back(disabled_pipe_wr);
1147 StartAndWaitForHandshake(&child);
1148 close(disabled_pipe_wr);
1149
1150 const uint64_t pid = static_cast<uint64_t>(child.pid());
1151
1152 TraceConfig trace_config = MakeTraceConfig([pid](HeapprofdConfig* cfg) {
1153 cfg->set_sampling_interval_bytes(1000000);
1154 cfg->add_pid(pid);
1155 cfg->add_heaps("test");
1156 cfg->add_heaps("othertest");
1157 });
1158
1159 auto helper = Trace(trace_config);
1160 WRITE_TRACE(helper->full_trace());
1161 PrintStats(helper.get());
1162 // Give client some time to notice the disconnect.
1163 sleep(2);
1164 KillAssertRunning(&child);
1165
1166 char x;
1167 EXPECT_EQ(base::Read(disabled_pipe_rd, &x, sizeof(x)), 1);
1168 close(disabled_pipe_rd);
1169 }
1170
TEST_P(HeapprofdEndToEnd,TwoProcesses)1171 TEST_P(HeapprofdEndToEnd, TwoProcesses) {
1172 constexpr size_t kAllocSize = 1024;
1173 constexpr size_t kAllocSize2 = 7;
1174 constexpr size_t kSamplingInterval = 1;
1175
1176 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1177 base::Subprocess child2 = ForkContinuousAlloc(allocator_mode(), kAllocSize2);
1178 const uint64_t pid = static_cast<uint64_t>(child.pid());
1179 const auto pid2 = child2.pid();
1180
1181 TraceConfig trace_config =
1182 MakeTraceConfig([this, pid, pid2](HeapprofdConfig* cfg) {
1183 cfg->set_sampling_interval_bytes(kSamplingInterval);
1184 cfg->add_pid(pid);
1185 cfg->add_pid(static_cast<uint64_t>(pid2));
1186 cfg->add_heaps(allocator_name());
1187 });
1188
1189 auto helper = Trace(trace_config);
1190 WRITE_TRACE(helper->full_trace());
1191 PrintStats(helper.get());
1192
1193 KillAssertRunning(&child);
1194 KillAssertRunning(&child2);
1195
1196 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1197 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1198 ValidateHasSamples(helper.get(), static_cast<uint64_t>(pid2),
1199 allocator_name(), kSamplingInterval);
1200 ValidateSampleSizes(helper.get(), static_cast<uint64_t>(pid2), kAllocSize2);
1201 }
1202
TEST_P(HeapprofdEndToEnd,FinalFlush)1203 TEST_P(HeapprofdEndToEnd, FinalFlush) {
1204 constexpr size_t kAllocSize = 1024;
1205 constexpr size_t kSamplingInterval = 1;
1206
1207 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1208 const uint64_t pid = static_cast<uint64_t>(child.pid());
1209 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1210 cfg->set_sampling_interval_bytes(kSamplingInterval);
1211 cfg->add_pid(pid);
1212 cfg->add_heaps(allocator_name());
1213 });
1214
1215 auto helper = Trace(trace_config);
1216 WRITE_TRACE(helper->full_trace());
1217 PrintStats(helper.get());
1218 KillAssertRunning(&child);
1219
1220 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1221 ValidateOnlyPID(helper.get(), pid);
1222 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1223 }
1224
TEST_P(HeapprofdEndToEnd,NativeStartup)1225 TEST_P(HeapprofdEndToEnd, NativeStartup) {
1226 if (test_mode() == TestMode::kStatic)
1227 GTEST_SKIP();
1228
1229 auto helper = GetHelper(&task_runner);
1230
1231 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1232 cfg->set_sampling_interval_bytes(1);
1233 cfg->add_process_cmdline("heapprofd_continuous_malloc");
1234 cfg->add_heaps(allocator_name());
1235 });
1236 trace_config.set_duration_ms(5000);
1237
1238 helper->StartTracing(trace_config);
1239
1240 // Wait to guarantee that the process forked below is hooked by the profiler
1241 // by virtue of the startup check, and not by virtue of being seen as a
1242 // running process. This sleep is here to prevent that, accidentally, the
1243 // test gets to the fork()+exec() too soon, before the heap profiling daemon
1244 // has received the trace config.
1245 sleep(1);
1246
1247 base::Subprocess child({"/proc/self/exe"});
1248 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1249 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1250 allocator_name());
1251 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1252 std::to_string(kStartupAllocSize));
1253 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1254 std::string("0"));
1255 StartAndWaitForHandshake(&child);
1256
1257 ReadAndWait(helper.get());
1258 WRITE_TRACE(helper->full_trace());
1259
1260 KillAssertRunning(&child);
1261
1262 const auto& packets = helper->trace();
1263 ASSERT_GT(packets.size(), 0u);
1264 size_t profile_packets = 0;
1265 size_t samples = 0;
1266 uint64_t total_allocated = 0;
1267 uint64_t total_freed = 0;
1268 for (const protos::gen::TracePacket& packet : packets) {
1269 if (packet.has_profile_packet() &&
1270 !packet.profile_packet().process_dumps().empty()) {
1271 const auto& dumps = packet.profile_packet().process_dumps();
1272 ASSERT_EQ(dumps.size(), 1u);
1273 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1274 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1275 profile_packets++;
1276 for (const auto& sample : dump.samples()) {
1277 samples++;
1278 total_allocated += sample.self_allocated();
1279 total_freed += sample.self_freed();
1280 }
1281 }
1282 }
1283 EXPECT_EQ(profile_packets, 1u);
1284 EXPECT_GT(samples, 0u);
1285 EXPECT_GT(total_allocated, 0u);
1286 EXPECT_GT(total_freed, 0u);
1287 }
1288
TEST_P(HeapprofdEndToEnd,NativeStartupDenormalizedCmdline)1289 TEST_P(HeapprofdEndToEnd, NativeStartupDenormalizedCmdline) {
1290 if (test_mode() == TestMode::kStatic)
1291 GTEST_SKIP();
1292
1293 auto helper = GetHelper(&task_runner);
1294
1295 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1296 cfg->set_sampling_interval_bytes(1);
1297 cfg->add_process_cmdline("heapprofd_continuous_malloc@1.2.3");
1298 cfg->add_heaps(allocator_name());
1299 });
1300 trace_config.set_duration_ms(5000);
1301
1302 helper->StartTracing(trace_config);
1303
1304 // Wait to guarantee that the process forked below is hooked by the profiler
1305 // by virtue of the startup check, and not by virtue of being seen as a
1306 // running process. This sleep is here to prevent that, accidentally, the
1307 // test gets to the fork()+exec() too soon, before the heap profiling daemon
1308 // has received the trace config.
1309 sleep(1);
1310
1311 base::Subprocess child({"/proc/self/exe"});
1312 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1313 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1314 allocator_name());
1315 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1316 std::to_string(kStartupAllocSize));
1317 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1318 std::string("0"));
1319
1320 StartAndWaitForHandshake(&child);
1321
1322 ReadAndWait(helper.get());
1323 WRITE_TRACE(helper->full_trace());
1324
1325 KillAssertRunning(&child);
1326
1327 const auto& packets = helper->trace();
1328 ASSERT_GT(packets.size(), 0u);
1329 size_t profile_packets = 0;
1330 size_t samples = 0;
1331 uint64_t total_allocated = 0;
1332 uint64_t total_freed = 0;
1333 for (const protos::gen::TracePacket& packet : packets) {
1334 if (packet.has_profile_packet() &&
1335 !packet.profile_packet().process_dumps().empty()) {
1336 const auto& dumps = packet.profile_packet().process_dumps();
1337 ASSERT_EQ(dumps.size(), 1u);
1338 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1339 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1340 profile_packets++;
1341 for (const auto& sample : dump.samples()) {
1342 samples++;
1343 total_allocated += sample.self_allocated();
1344 total_freed += sample.self_freed();
1345 }
1346 }
1347 }
1348 EXPECT_EQ(profile_packets, 1u);
1349 EXPECT_GT(samples, 0u);
1350 EXPECT_GT(total_allocated, 0u);
1351 EXPECT_GT(total_freed, 0u);
1352 }
1353
TEST_P(HeapprofdEndToEnd,DiscoverByName)1354 TEST_P(HeapprofdEndToEnd, DiscoverByName) {
1355 auto helper = GetHelper(&task_runner);
1356
1357 base::Subprocess child({"/proc/self/exe"});
1358 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1359 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1360 allocator_name());
1361 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1362 std::to_string(kStartupAllocSize));
1363 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1364 std::string("0"));
1365
1366 StartAndWaitForHandshake(&child);
1367
1368 // Wait to make sure process is fully initialized, so we do not accidentally
1369 // match it by the startup logic.
1370 sleep(1);
1371
1372 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1373 cfg->set_sampling_interval_bytes(1);
1374 cfg->add_process_cmdline("heapprofd_continuous_malloc");
1375 cfg->add_heaps(allocator_name());
1376 });
1377 trace_config.set_duration_ms(5000);
1378
1379 helper->StartTracing(trace_config);
1380 ReadAndWait(helper.get());
1381 WRITE_TRACE(helper->full_trace());
1382
1383 KillAssertRunning(&child);
1384
1385 const auto& packets = helper->trace();
1386 ASSERT_GT(packets.size(), 0u);
1387 size_t profile_packets = 0;
1388 size_t samples = 0;
1389 uint64_t total_allocated = 0;
1390 uint64_t total_freed = 0;
1391 for (const protos::gen::TracePacket& packet : packets) {
1392 if (packet.has_profile_packet() &&
1393 !packet.profile_packet().process_dumps().empty()) {
1394 const auto& dumps = packet.profile_packet().process_dumps();
1395 ASSERT_EQ(dumps.size(), 1u);
1396 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1397 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1398 profile_packets++;
1399 for (const auto& sample : dump.samples()) {
1400 samples++;
1401 total_allocated += sample.self_allocated();
1402 total_freed += sample.self_freed();
1403 }
1404 }
1405 }
1406 EXPECT_EQ(profile_packets, 1u);
1407 EXPECT_GT(samples, 0u);
1408 EXPECT_GT(total_allocated, 0u);
1409 EXPECT_GT(total_freed, 0u);
1410 }
1411
TEST_P(HeapprofdEndToEnd,DiscoverByNameDenormalizedCmdline)1412 TEST_P(HeapprofdEndToEnd, DiscoverByNameDenormalizedCmdline) {
1413 auto helper = GetHelper(&task_runner);
1414
1415 // Make sure the forked process does not get reparented to init.
1416 base::Subprocess child({"/proc/self/exe"});
1417 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1418 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1419 allocator_name());
1420 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1421 std::to_string(kStartupAllocSize));
1422 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1423 std::string("0"));
1424
1425 StartAndWaitForHandshake(&child);
1426
1427 // Wait to make sure process is fully initialized, so we do not accidentally
1428 // match it by the startup logic.
1429 sleep(1);
1430
1431 TraceConfig trace_config = MakeTraceConfig([this](HeapprofdConfig* cfg) {
1432 cfg->set_sampling_interval_bytes(1);
1433 cfg->add_process_cmdline("heapprofd_continuous_malloc@1.2.3");
1434 cfg->add_heaps(allocator_name());
1435 });
1436 trace_config.set_duration_ms(5000);
1437
1438 helper->StartTracing(trace_config);
1439 ReadAndWait(helper.get());
1440 WRITE_TRACE(helper->full_trace());
1441
1442 KillAssertRunning(&child);
1443
1444 const auto& packets = helper->trace();
1445 ASSERT_GT(packets.size(), 0u);
1446 size_t profile_packets = 0;
1447 size_t samples = 0;
1448 uint64_t total_allocated = 0;
1449 uint64_t total_freed = 0;
1450 for (const protos::gen::TracePacket& packet : packets) {
1451 if (packet.has_profile_packet() &&
1452 !packet.profile_packet().process_dumps().empty()) {
1453 const auto& dumps = packet.profile_packet().process_dumps();
1454 ASSERT_EQ(dumps.size(), 1u);
1455 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1456 EXPECT_EQ(static_cast<pid_t>(dump.pid()), child.pid());
1457 profile_packets++;
1458 for (const auto& sample : dump.samples()) {
1459 samples++;
1460 total_allocated += sample.self_allocated();
1461 total_freed += sample.self_freed();
1462 }
1463 }
1464 }
1465 EXPECT_EQ(profile_packets, 1u);
1466 EXPECT_GT(samples, 0u);
1467 EXPECT_GT(total_allocated, 0u);
1468 EXPECT_GT(total_freed, 0u);
1469 }
1470
TEST_P(HeapprofdEndToEnd,ReInit)1471 TEST_P(HeapprofdEndToEnd, ReInit) {
1472 constexpr size_t kSamplingInterval = 1;
1473
1474 // We cannot use base::Pipe because that assumes we want CLOEXEC.
1475 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
1476 int signal_pipe[2];
1477 int ack_pipe[2];
1478
1479 PERFETTO_CHECK(pipe(signal_pipe) == 0); // NOLINT(android-cloexec-pipe)
1480 PERFETTO_CHECK(pipe(ack_pipe) == 0); // NOLINT(android-cloexec-pipe)
1481
1482 int cur_flags = fcntl(signal_pipe[0], F_GETFL, 0);
1483 PERFETTO_CHECK(cur_flags >= 0);
1484 PERFETTO_CHECK(fcntl(signal_pipe[0], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1485 cur_flags = fcntl(signal_pipe[1], F_GETFL, 0);
1486 PERFETTO_CHECK(cur_flags >= 0);
1487 PERFETTO_CHECK(fcntl(signal_pipe[1], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1488
1489 int signal_pipe_rd = signal_pipe[0];
1490 int signal_pipe_wr = signal_pipe[1];
1491 int ack_pipe_rd = ack_pipe[0];
1492 int ack_pipe_wr = ack_pipe[1];
1493
1494 base::Subprocess child({"/proc/self/exe"});
1495 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1496 child.args.preserve_fds.push_back(signal_pipe_rd);
1497 child.args.preserve_fds.push_back(ack_pipe_wr);
1498 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG0=" +
1499 allocator_name());
1500 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG1=" +
1501 std::to_string(signal_pipe_rd));
1502 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG2=" +
1503 std::to_string(ack_pipe_wr));
1504 StartAndWaitForHandshake(&child);
1505
1506 const uint64_t pid = static_cast<uint64_t>(child.pid());
1507
1508 close(signal_pipe_rd);
1509 close(ack_pipe_wr);
1510
1511 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1512 cfg->set_sampling_interval_bytes(kSamplingInterval);
1513 cfg->add_pid(pid);
1514 cfg->add_heaps(allocator_name());
1515 });
1516
1517 auto helper = Trace(trace_config);
1518 WRITE_TRACE(helper->full_trace());
1519
1520 PrintStats(helper.get());
1521 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1522 ValidateOnlyPID(helper.get(), pid);
1523 ValidateSampleSizes(helper.get(), pid, kFirstIterationBytes);
1524
1525 PERFETTO_CHECK(PERFETTO_EINTR(write(signal_pipe_wr, "1", 1)) == 1);
1526 close(signal_pipe_wr);
1527 char buf[1];
1528 ASSERT_EQ(PERFETTO_EINTR(read(ack_pipe_rd, buf, sizeof(buf))), 1);
1529 close(ack_pipe_rd);
1530
1531 // A brief sleep to allow the client to notice that the profiling session is
1532 // to be torn down (as it rejects concurrent sessions).
1533 usleep(500 * kMsToUs);
1534
1535 PERFETTO_LOG("HeapprofdEndToEnd::Reinit: Starting second");
1536
1537 // We must keep alive the original helper because it owns the service thread.
1538 std::unique_ptr<TraceProcessorTestHelper> helper2 =
1539 std::unique_ptr<TraceProcessorTestHelper>(
1540 new TraceProcessorTestHelper(&task_runner));
1541
1542 helper2->ConnectConsumer();
1543 helper2->WaitForConsumerConnect();
1544 helper2->StartTracing(trace_config);
1545 ReadAndWait(helper2.get());
1546 WRITE_TRACE(helper2->trace());
1547
1548 PrintStats(helper2.get());
1549 KillAssertRunning(&child);
1550
1551 ValidateHasSamples(helper2.get(), pid, allocator_name(), kSamplingInterval);
1552 ValidateOnlyPID(helper2.get(), pid);
1553 ValidateSampleSizes(helper2.get(), pid, kSecondIterationBytes);
1554 }
1555
TEST_P(HeapprofdEndToEnd,ReInitAfterInvalid)1556 TEST_P(HeapprofdEndToEnd, ReInitAfterInvalid) {
1557 constexpr size_t kSamplingInterval = 1;
1558
1559 // We cannot use base::Pipe because that assumes we want CLOEXEC.
1560 // We do NOT want CLOEXEC as this gets used by the RunReInit in the child.
1561 int signal_pipe[2];
1562 int ack_pipe[2];
1563
1564 PERFETTO_CHECK(pipe(signal_pipe) == 0); // NOLINT(android-cloexec-pipe)
1565 PERFETTO_CHECK(pipe(ack_pipe) == 0); // NOLINT(android-cloexec-pipe)
1566
1567 int cur_flags = fcntl(signal_pipe[0], F_GETFL, 0);
1568 PERFETTO_CHECK(cur_flags >= 0);
1569 PERFETTO_CHECK(fcntl(signal_pipe[0], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1570 cur_flags = fcntl(signal_pipe[1], F_GETFL, 0);
1571 PERFETTO_CHECK(cur_flags >= 0);
1572 PERFETTO_CHECK(fcntl(signal_pipe[1], F_SETFL, cur_flags | O_NONBLOCK) == 0);
1573
1574 int signal_pipe_rd = signal_pipe[0];
1575 int signal_pipe_wr = signal_pipe[1];
1576 int ack_pipe_rd = ack_pipe[0];
1577 int ack_pipe_wr = ack_pipe[1];
1578
1579 base::Subprocess child({"/proc/self/exe"});
1580 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1581 child.args.preserve_fds.push_back(signal_pipe_rd);
1582 child.args.preserve_fds.push_back(ack_pipe_wr);
1583 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG0=" +
1584 allocator_name());
1585 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG1=" +
1586 std::to_string(signal_pipe_rd));
1587 child.args.env.push_back("HEAPPROFD_TESTING_RUN_REINIT_ARG2=" +
1588 std::to_string(ack_pipe_wr));
1589 StartAndWaitForHandshake(&child);
1590
1591 const uint64_t pid = static_cast<uint64_t>(child.pid());
1592
1593 close(signal_pipe_rd);
1594 close(ack_pipe_wr);
1595
1596 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1597 cfg->set_sampling_interval_bytes(kSamplingInterval);
1598 cfg->add_pid(pid);
1599 cfg->add_heaps(allocator_name());
1600 });
1601
1602 auto helper = Trace(trace_config);
1603 WRITE_TRACE(helper->full_trace());
1604
1605 PrintStats(helper.get());
1606 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1607 ValidateOnlyPID(helper.get(), pid);
1608 ValidateSampleSizes(helper.get(), pid, kFirstIterationBytes);
1609
1610 PERFETTO_CHECK(PERFETTO_EINTR(write(signal_pipe_wr, "1", 1)) == 1);
1611 close(signal_pipe_wr);
1612 char buf[1];
1613 ASSERT_EQ(PERFETTO_EINTR(read(ack_pipe_rd, buf, sizeof(buf))), 1);
1614 close(ack_pipe_rd);
1615
1616 // A brief sleep to allow the client to notice that the profiling session is
1617 // to be torn down (as it rejects concurrent sessions).
1618 usleep(500 * kMsToUs);
1619
1620 PERFETTO_LOG("HeapprofdEndToEnd::Reinit: Starting second");
1621
1622 // We must keep alive the original helper because it owns the service thread.
1623 std::unique_ptr<TraceProcessorTestHelper> helper2 =
1624 std::unique_ptr<TraceProcessorTestHelper>(
1625 new TraceProcessorTestHelper(&task_runner));
1626
1627 helper2->ConnectConsumer();
1628 helper2->WaitForConsumerConnect();
1629 helper2->StartTracing(trace_config);
1630 ReadAndWait(helper2.get());
1631
1632 WRITE_TRACE(helper2->trace());
1633
1634 PrintStats(helper2.get());
1635 KillAssertRunning(&child);
1636
1637 ValidateHasSamples(helper2.get(), pid, allocator_name(), kSamplingInterval);
1638 ValidateOnlyPID(helper2.get(), pid);
1639 ValidateSampleSizes(helper2.get(), pid, kSecondIterationBytes);
1640 }
1641
TEST_P(HeapprofdEndToEnd,ConcurrentSession)1642 TEST_P(HeapprofdEndToEnd, ConcurrentSession) {
1643 constexpr size_t kAllocSize = 1024;
1644 constexpr size_t kSamplingInterval = 1;
1645
1646 base::Subprocess child = ForkContinuousAlloc(allocator_mode(), kAllocSize);
1647 const uint64_t pid = static_cast<uint64_t>(child.pid());
1648
1649 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1650 cfg->set_sampling_interval_bytes(kSamplingInterval);
1651 cfg->add_pid(pid);
1652 cfg->add_heaps(allocator_name());
1653 ContinuousDump(cfg);
1654 });
1655 trace_config.set_duration_ms(5000);
1656
1657 auto helper = GetHelper(&task_runner);
1658 helper->StartTracing(trace_config);
1659 sleep(1);
1660
1661 PERFETTO_LOG("Starting concurrent.");
1662 std::unique_ptr<TraceProcessorTestHelper> helper_concurrent(
1663 new TraceProcessorTestHelper(&task_runner));
1664 helper_concurrent->ConnectConsumer();
1665 helper_concurrent->WaitForConsumerConnect();
1666 helper_concurrent->StartTracing(trace_config);
1667
1668 ReadAndWait(helper.get());
1669 WRITE_TRACE(helper->full_trace());
1670 PrintStats(helper.get());
1671
1672 ReadAndWait(helper_concurrent.get());
1673 WRITE_TRACE(helper_concurrent->trace());
1674 PrintStats(helper_concurrent.get());
1675 KillAssertRunning(&child);
1676
1677 ValidateHasSamples(helper.get(), pid, allocator_name(), kSamplingInterval);
1678 ValidateOnlyPID(helper.get(), pid);
1679 ValidateSampleSizes(helper.get(), pid, kAllocSize);
1680 ValidateRejectedConcurrent(helper.get(), pid, false);
1681
1682 ValidateOnlyPID(helper_concurrent.get(), pid);
1683 ValidateRejectedConcurrent(helper_concurrent.get(), pid, true);
1684 }
1685
TEST_P(HeapprofdEndToEnd,NativeProfilingActiveAtProcessExit)1686 TEST_P(HeapprofdEndToEnd, NativeProfilingActiveAtProcessExit) {
1687 constexpr uint64_t kTestAllocSize = 128;
1688 base::Pipe start_pipe = base::Pipe::Create(base::Pipe::kBothBlock);
1689 int start_pipe_wr = *start_pipe.wr;
1690
1691 base::Subprocess child({"/proc/self/exe"});
1692 child.args.posix_argv0_override_for_testing = "heapprofd_continuous_malloc";
1693 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG0=" +
1694 allocator_name());
1695 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG1=" +
1696 std::to_string(kTestAllocSize));
1697 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG2=" +
1698 std::to_string(0));
1699 child.args.env.push_back("HEAPPROFD_TESTING_RUN_MALLOC_ARG3=" +
1700 std::to_string(200));
1701 child.args.preserve_fds.push_back(start_pipe_wr);
1702 child.args.posix_entrypoint_for_testing = [start_pipe_wr] {
1703 PERFETTO_CHECK(PERFETTO_EINTR(write(start_pipe_wr, "1", 1)) == 1);
1704 PERFETTO_CHECK(close(start_pipe_wr) == 0 || errno == EINTR);
1705 };
1706
1707 StartAndWaitForHandshake(&child);
1708
1709 const uint64_t pid = static_cast<uint64_t>(child.pid());
1710 start_pipe.wr.reset();
1711
1712 // Construct tracing config (without starting profiling).
1713 auto helper = GetHelper(&task_runner);
1714
1715 // Wait for child to have been scheduled at least once.
1716 char buf[1] = {};
1717 ASSERT_EQ(PERFETTO_EINTR(read(*start_pipe.rd, buf, sizeof(buf))), 1);
1718 start_pipe.rd.reset();
1719
1720 TraceConfig trace_config = MakeTraceConfig([this, pid](HeapprofdConfig* cfg) {
1721 cfg->set_sampling_interval_bytes(1);
1722 cfg->add_pid(pid);
1723 cfg->add_heaps(allocator_name());
1724 });
1725 trace_config.set_duration_ms(5000);
1726
1727 // Trace until child exits.
1728 helper->StartTracing(trace_config);
1729
1730 // Wait for the child and assert that it exited successfully.
1731 EXPECT_TRUE(child.Wait(30000));
1732 EXPECT_EQ(child.status(), base::Subprocess::kTerminated);
1733 EXPECT_EQ(child.returncode(), 0);
1734
1735 // Assert that we did profile the process.
1736 helper->FlushAndWait(2000);
1737 helper->DisableTracing();
1738 ReadAndWait(helper.get());
1739 WRITE_TRACE(helper->full_trace());
1740
1741 const auto& packets = helper->trace();
1742 ASSERT_GT(packets.size(), 0u);
1743 size_t profile_packets = 0;
1744 size_t samples = 0;
1745 uint64_t total_allocated = 0;
1746 for (const protos::gen::TracePacket& packet : packets) {
1747 if (packet.has_profile_packet() &&
1748 !packet.profile_packet().process_dumps().empty()) {
1749 const auto& dumps = packet.profile_packet().process_dumps();
1750 ASSERT_EQ(dumps.size(), 1u);
1751 const protos::gen::ProfilePacket_ProcessHeapSamples& dump = dumps[0];
1752 EXPECT_EQ(dump.pid(), pid);
1753 profile_packets++;
1754 for (const auto& sample : dump.samples()) {
1755 samples++;
1756 total_allocated += sample.self_allocated();
1757 }
1758 }
1759 }
1760 EXPECT_EQ(profile_packets, 1u);
1761 EXPECT_GT(samples, 0u);
1762 EXPECT_GT(total_allocated, 0u);
1763 }
1764
1765 // On in-tree Android, we use the system heapprofd in fork or central mode.
1766 // For Linux and out-of-tree Android, we statically include a copy of
1767 // heapprofd and use that. This one does not support intercepting malloc.
1768 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
1769 #if !PERFETTO_BUILDFLAG(PERFETTO_START_DAEMONS)
1770 #error "Need to start daemons for Linux test."
1771 #endif
1772
1773 INSTANTIATE_TEST_SUITE_P(Run,
1774 HeapprofdEndToEnd,
1775 Values(std::make_tuple(TestMode::kStatic,
1776 AllocatorMode::kCustom)),
1777 TestSuffix);
1778 #elif !PERFETTO_BUILDFLAG(PERFETTO_START_DAEMONS)
1779 INSTANTIATE_TEST_SUITE_P(
1780 Run,
1781 HeapprofdEndToEnd,
1782 Values(std::make_tuple(TestMode::kCentral, AllocatorMode::kMalloc),
1783 std::make_tuple(TestMode::kCentral, AllocatorMode::kCustom)),
1784 TestSuffix);
1785 #endif
1786
1787 } // namespace
1788 } // namespace profiling
1789 } // namespace perfetto
1790