• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/profiling/memory/unwinding.h"
18 
19 #include <sys/types.h>
20 #include <unistd.h>
21 
22 #include <unwindstack/MachineArm.h>
23 #include <unwindstack/MachineArm64.h>
24 #include <unwindstack/MachineMips.h>
25 #include <unwindstack/MachineMips64.h>
26 #include <unwindstack/MachineRiscv64.h>
27 #include <unwindstack/MachineX86.h>
28 #include <unwindstack/MachineX86_64.h>
29 #include <unwindstack/Maps.h>
30 #include <unwindstack/Memory.h>
31 #include <unwindstack/Regs.h>
32 #include <unwindstack/RegsArm.h>
33 #include <unwindstack/RegsArm64.h>
34 #include <unwindstack/RegsMips.h>
35 #include <unwindstack/RegsMips64.h>
36 #include <unwindstack/RegsRiscv64.h>
37 #include <unwindstack/RegsX86.h>
38 #include <unwindstack/RegsX86_64.h>
39 #include <unwindstack/Unwinder.h>
40 #include <unwindstack/UserArm.h>
41 #include <unwindstack/UserArm64.h>
42 #include <unwindstack/UserMips.h>
43 #include <unwindstack/UserMips64.h>
44 #include <unwindstack/UserRiscv64.h>
45 #include <unwindstack/UserX86.h>
46 #include <unwindstack/UserX86_64.h>
47 
48 #include <procinfo/process_map.h>
49 
50 #include "perfetto/base/logging.h"
51 #include "perfetto/base/task_runner.h"
52 #include "perfetto/ext/base/file_utils.h"
53 #include "perfetto/ext/base/scoped_file.h"
54 #include "perfetto/ext/base/string_utils.h"
55 #include "perfetto/ext/base/thread_task_runner.h"
56 
57 #include "src/profiling/memory/unwound_messages.h"
58 #include "src/profiling/memory/wire_protocol.h"
59 
60 namespace perfetto {
61 namespace profiling {
62 namespace {
63 
64 constexpr base::TimeMillis kMapsReparseInterval{500};
65 constexpr uint32_t kRetryDelayMs = 100;
66 
67 constexpr size_t kMaxFrames = 500;
68 
69 // We assume average ~300us per unwind. If we handle up to 1000 unwinds, this
70 // makes sure other tasks get to be run at least every 300ms if the unwinding
71 // saturates this thread.
72 constexpr size_t kUnwindBatchSize = 1000;
73 constexpr size_t kRecordBatchSize = 1024;
74 constexpr size_t kMaxAllocRecordArenaSize = 2 * kRecordBatchSize;
75 
76 #pragma GCC diagnostic push
77 // We do not care about deterministic destructor order.
78 #pragma GCC diagnostic ignored "-Wglobal-constructors"
79 #pragma GCC diagnostic ignored "-Wexit-time-destructors"
80 static std::vector<std::string> kSkipMaps{"heapprofd_client.so",
81                                           "heapprofd_client_api.so"};
82 #pragma GCC diagnostic pop
83 
GetRegsSize(unwindstack::Regs * regs)84 size_t GetRegsSize(unwindstack::Regs* regs) {
85   if (regs->Is32Bit())
86     return sizeof(uint32_t) * regs->total_regs();
87   return sizeof(uint64_t) * regs->total_regs();
88 }
89 
ReadFromRawData(unwindstack::Regs * regs,void * raw_data)90 void ReadFromRawData(unwindstack::Regs* regs, void* raw_data) {
91   memcpy(regs->RawData(), raw_data, GetRegsSize(regs));
92 }
93 
94 }  // namespace
95 
CreateRegsFromRawData(unwindstack::ArchEnum arch,void * raw_data)96 std::unique_ptr<unwindstack::Regs> CreateRegsFromRawData(
97     unwindstack::ArchEnum arch,
98     void* raw_data) {
99   std::unique_ptr<unwindstack::Regs> ret;
100   switch (arch) {
101     case unwindstack::ARCH_X86:
102       ret.reset(new unwindstack::RegsX86());
103       break;
104     case unwindstack::ARCH_X86_64:
105       ret.reset(new unwindstack::RegsX86_64());
106       break;
107     case unwindstack::ARCH_ARM:
108       ret.reset(new unwindstack::RegsArm());
109       break;
110     case unwindstack::ARCH_ARM64:
111       ret.reset(new unwindstack::RegsArm64());
112       break;
113     case unwindstack::ARCH_MIPS:
114       ret.reset(new unwindstack::RegsMips());
115       break;
116     case unwindstack::ARCH_MIPS64:
117       ret.reset(new unwindstack::RegsMips64());
118       break;
119     case unwindstack::ARCH_RISCV64:
120       ret.reset(new unwindstack::RegsRiscv64());
121       break;
122     case unwindstack::ARCH_UNKNOWN:
123       break;
124   }
125   if (ret)
126     ReadFromRawData(ret.get(), raw_data);
127   return ret;
128 }
129 
DoUnwind(WireMessage * msg,UnwindingMetadata * metadata,AllocRecord * out)130 bool DoUnwind(WireMessage* msg, UnwindingMetadata* metadata, AllocRecord* out) {
131   AllocMetadata* alloc_metadata = msg->alloc_header;
132   std::unique_ptr<unwindstack::Regs> regs(CreateRegsFromRawData(
133       alloc_metadata->arch, alloc_metadata->register_data));
134   if (regs == nullptr) {
135     PERFETTO_DLOG("Unable to construct unwindstack::Regs");
136     unwindstack::FrameData frame_data{};
137     frame_data.function_name = "ERROR READING REGISTERS";
138 
139     out->frames.clear();
140     out->build_ids.clear();
141     out->frames.emplace_back(std::move(frame_data));
142     out->build_ids.emplace_back("");
143     out->error = true;
144     return false;
145   }
146   uint8_t* stack = reinterpret_cast<uint8_t*>(msg->payload);
147   std::shared_ptr<unwindstack::Memory> mems =
148       std::make_shared<StackOverlayMemory>(metadata->fd_mem,
149                                            alloc_metadata->stack_pointer, stack,
150                                            msg->payload_size);
151 
152   unwindstack::Unwinder unwinder(kMaxFrames, &metadata->fd_maps, regs.get(),
153                                  mems);
154 #if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
155   unwinder.SetJitDebug(metadata->GetJitDebug(regs->Arch()));
156   unwinder.SetDexFiles(metadata->GetDexFiles(regs->Arch()));
157 #endif
158   // Suppress incorrect "variable may be uninitialized" error for if condition
159   // after this loop. error_code = LastErrorCode gets run at least once.
160   unwindstack::ErrorCode error_code = unwindstack::ERROR_NONE;
161   for (int attempt = 0; attempt < 2; ++attempt) {
162     if (attempt > 0) {
163       if (metadata->last_maps_reparse_time + kMapsReparseInterval >
164           base::GetWallTimeMs()) {
165         PERFETTO_DLOG("Skipping reparse due to rate limit.");
166         break;
167       }
168       PERFETTO_DLOG("Reparsing maps");
169       metadata->ReparseMaps();
170       metadata->last_maps_reparse_time = base::GetWallTimeMs();
171       // Regs got invalidated by libuwindstack's speculative jump.
172       // Reset.
173       ReadFromRawData(regs.get(), alloc_metadata->register_data);
174       out->reparsed_map = true;
175 #if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
176       unwinder.SetJitDebug(metadata->GetJitDebug(regs->Arch()));
177       unwinder.SetDexFiles(metadata->GetDexFiles(regs->Arch()));
178 #endif
179     }
180     out->frames.swap(unwinder.frames());  // Provide the unwinder buffer to use.
181     unwinder.Unwind(&kSkipMaps, /*map_suffixes_to_ignore=*/nullptr);
182     out->frames.swap(unwinder.frames());  // Take the buffer back.
183     error_code = unwinder.LastErrorCode();
184     if (error_code != unwindstack::ERROR_INVALID_MAP &&
185         (unwinder.warnings() & unwindstack::WARNING_DEX_PC_NOT_IN_MAP) == 0) {
186       break;
187     }
188   }
189   out->build_ids.resize(out->frames.size());
190   for (size_t i = 0; i < out->frames.size(); ++i) {
191     out->build_ids[i] = metadata->GetBuildId(out->frames[i]);
192   }
193 
194   if (error_code != unwindstack::ERROR_NONE) {
195     PERFETTO_DLOG("Unwinding error %" PRIu8, error_code);
196     unwindstack::FrameData frame_data{};
197     frame_data.function_name =
198         "ERROR " + StringifyLibUnwindstackError(error_code);
199 
200     out->frames.emplace_back(std::move(frame_data));
201     out->build_ids.emplace_back("");
202     out->error = true;
203   }
204   return true;
205 }
206 
~UnwindingWorker()207 UnwindingWorker::~UnwindingWorker() {
208   if (thread_task_runner_.get() == nullptr) {
209     return;
210   }
211   std::mutex mutex;
212   std::condition_variable cv;
213 
214   std::unique_lock<std::mutex> lock(mutex);
215   bool done = false;
216   thread_task_runner_.PostTask([&mutex, &cv, &done, this] {
217     for (auto& it : client_data_) {
218       auto& client_data = it.second;
219       client_data.sock->Shutdown(false);
220     }
221     client_data_.clear();
222 
223     std::lock_guard<std::mutex> inner_lock(mutex);
224     done = true;
225     cv.notify_one();
226   });
227   cv.wait(lock, [&done] { return done; });
228 }
229 
OnDisconnect(base::UnixSocket * self)230 void UnwindingWorker::OnDisconnect(base::UnixSocket* self) {
231   pid_t peer_pid = self->peer_pid_linux();
232   auto it = client_data_.find(peer_pid);
233   if (it == client_data_.end()) {
234     PERFETTO_DFATAL_OR_ELOG("Disconnected unexpected socket.");
235     return;
236   }
237 
238   ClientData& client_data = it->second;
239   SharedRingBuffer& shmem = client_data.shmem;
240   client_data.drain_bytes = shmem.read_avail();
241 
242   if (client_data.drain_bytes != 0) {
243     DrainJob(peer_pid);
244   } else {
245     FinishDisconnect(it);
246   }
247 }
248 
RemoveClientData(std::map<pid_t,ClientData>::iterator client_data_iterator)249 void UnwindingWorker::RemoveClientData(
250     std::map<pid_t, ClientData>::iterator client_data_iterator) {
251   client_data_.erase(client_data_iterator);
252   if (client_data_.empty()) {
253     // We got rid of the last client. Flush and destruct AllocRecords in
254     // arena. Disable the arena (will not accept returning borrowed records)
255     // in case there are pending AllocRecords on the main thread.
256     alloc_record_arena_.Disable();
257   }
258 }
259 
FinishDisconnect(std::map<pid_t,ClientData>::iterator client_data_iterator)260 void UnwindingWorker::FinishDisconnect(
261     std::map<pid_t, ClientData>::iterator client_data_iterator) {
262   pid_t peer_pid = client_data_iterator->first;
263   ClientData& client_data = client_data_iterator->second;
264   SharedRingBuffer& shmem = client_data.shmem;
265 
266   if (!client_data.free_records.empty()) {
267     delegate_->PostFreeRecord(this, std::move(client_data.free_records));
268   }
269 
270   SharedRingBuffer::Stats stats = {};
271   {
272     auto lock = shmem.AcquireLock(ScopedSpinlock::Mode::Try);
273     if (lock.locked())
274       stats = shmem.GetStats(lock);
275     else
276       PERFETTO_ELOG("Failed to log shmem to get stats.");
277   }
278   DataSourceInstanceID ds_id = client_data.data_source_instance_id;
279 
280   RemoveClientData(client_data_iterator);
281   delegate_->PostSocketDisconnected(this, ds_id, peer_pid, stats);
282 }
283 
OnDataAvailable(base::UnixSocket * self)284 void UnwindingWorker::OnDataAvailable(base::UnixSocket* self) {
285   // Drain buffer to clear the notification.
286   char recv_buf[kUnwindBatchSize];
287   self->Receive(recv_buf, sizeof(recv_buf));
288   BatchUnwindJob(self->peer_pid_linux());
289 }
290 
ReadAndUnwindBatch(ClientData * client_data)291 UnwindingWorker::ReadAndUnwindBatchResult UnwindingWorker::ReadAndUnwindBatch(
292     ClientData* client_data) {
293   SharedRingBuffer& shmem = client_data->shmem;
294   SharedRingBuffer::Buffer buf;
295   ReadAndUnwindBatchResult res;
296 
297   size_t i;
298   for (i = 0; i < kUnwindBatchSize; ++i) {
299     uint64_t reparses_before = client_data->metadata.reparses;
300     buf = shmem.BeginRead();
301     if (!buf)
302       break;
303     HandleBuffer(this, &alloc_record_arena_, buf, client_data,
304                  client_data->sock->peer_pid_linux(), delegate_);
305     res.bytes_read += shmem.EndRead(std::move(buf));
306     // Reparsing takes time, so process the rest in a new batch to avoid timing
307     // out.
308     if (reparses_before < client_data->metadata.reparses) {
309       res.status = ReadAndUnwindBatchResult::Status::kHasMore;
310       return res;
311     }
312   }
313 
314   if (i == kUnwindBatchSize) {
315     res.status = ReadAndUnwindBatchResult::Status::kHasMore;
316   } else if (i > 0) {
317     res.status = ReadAndUnwindBatchResult::Status::kReadSome;
318   } else {
319     res.status = ReadAndUnwindBatchResult::Status::kReadNone;
320   }
321   return res;
322 }
323 
BatchUnwindJob(pid_t peer_pid)324 void UnwindingWorker::BatchUnwindJob(pid_t peer_pid) {
325   auto it = client_data_.find(peer_pid);
326   if (it == client_data_.end()) {
327     // This can happen if the client disconnected before the buffer was fully
328     // handled.
329     PERFETTO_DLOG("Unexpected data.");
330     return;
331   }
332   ClientData& client_data = it->second;
333   if (client_data.drain_bytes != 0) {
334     // This process disconnected and we're reading out the remainder of its
335     // buffered data in a dedicated recurring task (DrainJob), so this task has
336     // nothing to do.
337     return;
338   }
339 
340   bool job_reposted = false;
341   bool reader_paused = false;
342   switch (ReadAndUnwindBatch(&client_data).status) {
343     case ReadAndUnwindBatchResult::Status::kHasMore:
344       thread_task_runner_.get()->PostTask(
345           [this, peer_pid] { BatchUnwindJob(peer_pid); });
346       job_reposted = true;
347       break;
348     case ReadAndUnwindBatchResult::Status::kReadSome:
349       thread_task_runner_.get()->PostDelayedTask(
350           [this, peer_pid] { BatchUnwindJob(peer_pid); }, kRetryDelayMs);
351       job_reposted = true;
352       break;
353     case ReadAndUnwindBatchResult::Status::kReadNone:
354       client_data.shmem.SetReaderPaused();
355       reader_paused = true;
356       break;
357   }
358 
359   // We need to either repost the job, or set the reader paused bit. By
360   // setting that bit, we inform the client that we want to be notified when
361   // new data is written to the shared memory buffer.
362   // If we do neither of these things, we will not read from the shared memory
363   // buffer again.
364   PERFETTO_CHECK(job_reposted || reader_paused);
365 }
366 
DrainJob(pid_t peer_pid)367 void UnwindingWorker::DrainJob(pid_t peer_pid) {
368   auto it = client_data_.find(peer_pid);
369   if (it == client_data_.end()) {
370     return;
371   }
372   ClientData& client_data = it->second;
373   auto res = ReadAndUnwindBatch(&client_data);
374   switch (res.status) {
375     case ReadAndUnwindBatchResult::Status::kHasMore:
376       if (res.bytes_read < client_data.drain_bytes) {
377         client_data.drain_bytes -= res.bytes_read;
378         thread_task_runner_.get()->PostTask(
379             [this, peer_pid] { DrainJob(peer_pid); });
380         return;
381       }
382       // ReadAndUnwindBatch read more than client_data.drain_bytes.
383       break;
384     case ReadAndUnwindBatchResult::Status::kReadSome:
385       // ReadAndUnwindBatch read all the available data (for now) in the shared
386       // memory buffer.
387     case ReadAndUnwindBatchResult::Status::kReadNone:
388       // There was no data in the shared memory buffer.
389       break;
390   }
391   // No further drain task has been scheduled. Drain is finished. Finish the
392   // disconnect operation as well.
393 
394   FinishDisconnect(it);
395 }
396 
397 // static
HandleBuffer(UnwindingWorker * self,AllocRecordArena * alloc_record_arena,const SharedRingBuffer::Buffer & buf,ClientData * client_data,pid_t peer_pid,Delegate * delegate)398 void UnwindingWorker::HandleBuffer(UnwindingWorker* self,
399                                    AllocRecordArena* alloc_record_arena,
400                                    const SharedRingBuffer::Buffer& buf,
401                                    ClientData* client_data,
402                                    pid_t peer_pid,
403                                    Delegate* delegate) {
404   UnwindingMetadata* unwinding_metadata = &client_data->metadata;
405   DataSourceInstanceID data_source_instance_id =
406       client_data->data_source_instance_id;
407   WireMessage msg;
408   // TODO(fmayer): standardise on char* or uint8_t*.
409   // char* has stronger guarantees regarding aliasing.
410   // see https://timsong-cpp.github.io/cppwp/n3337/basic.lval#10.8
411   if (!ReceiveWireMessage(reinterpret_cast<char*>(buf.data), buf.size, &msg)) {
412     PERFETTO_DFATAL_OR_ELOG("Failed to receive wire message.");
413     return;
414   }
415 
416   if (msg.record_type == RecordType::Malloc) {
417     std::unique_ptr<AllocRecord> rec = alloc_record_arena->BorrowAllocRecord();
418     rec->alloc_metadata = *msg.alloc_header;
419     rec->pid = peer_pid;
420     rec->data_source_instance_id = data_source_instance_id;
421     auto start_time_us = base::GetWallTimeNs() / 1000;
422     if (!client_data->stream_allocations)
423       DoUnwind(&msg, unwinding_metadata, rec.get());
424     rec->unwinding_time_us = static_cast<uint64_t>(
425         ((base::GetWallTimeNs() / 1000) - start_time_us).count());
426     delegate->PostAllocRecord(self, std::move(rec));
427   } else if (msg.record_type == RecordType::Free) {
428     FreeRecord rec;
429     rec.pid = peer_pid;
430     rec.data_source_instance_id = data_source_instance_id;
431     // We need to copy this, so we can return the memory to the shmem buffer.
432     memcpy(&rec.entry, msg.free_header, sizeof(*msg.free_header));
433     client_data->free_records.emplace_back(std::move(rec));
434     if (client_data->free_records.size() == kRecordBatchSize) {
435       delegate->PostFreeRecord(self, std::move(client_data->free_records));
436       client_data->free_records.clear();
437       client_data->free_records.reserve(kRecordBatchSize);
438     }
439   } else if (msg.record_type == RecordType::HeapName) {
440     HeapNameRecord rec;
441     rec.pid = peer_pid;
442     rec.data_source_instance_id = data_source_instance_id;
443     memcpy(&rec.entry, msg.heap_name_header, sizeof(*msg.heap_name_header));
444     rec.entry.heap_name[sizeof(rec.entry.heap_name) - 1] = '\0';
445     delegate->PostHeapNameRecord(self, std::move(rec));
446   } else {
447     PERFETTO_DFATAL_OR_ELOG("Invalid record type.");
448   }
449 }
450 
PostHandoffSocket(HandoffData handoff_data)451 void UnwindingWorker::PostHandoffSocket(HandoffData handoff_data) {
452   // Even with C++14, this cannot be moved, as std::function has to be
453   // copyable, which HandoffData is not.
454   HandoffData* raw_data = new HandoffData(std::move(handoff_data));
455   // We do not need to use a WeakPtr here because the task runner will not
456   // outlive its UnwindingWorker.
457   thread_task_runner_.get()->PostTask([this, raw_data] {
458     HandoffData data = std::move(*raw_data);
459     delete raw_data;
460     HandleHandoffSocket(std::move(data));
461   });
462 }
463 
HandleHandoffSocket(HandoffData handoff_data)464 void UnwindingWorker::HandleHandoffSocket(HandoffData handoff_data) {
465   auto sock = base::UnixSocket::AdoptConnected(
466       handoff_data.sock.ReleaseFd(), this, this->thread_task_runner_.get(),
467       base::SockFamily::kUnix, base::SockType::kStream);
468   pid_t peer_pid = sock->peer_pid_linux();
469 
470   UnwindingMetadata metadata(std::move(handoff_data.maps_fd),
471                              std::move(handoff_data.mem_fd));
472   ClientData client_data{
473       handoff_data.data_source_instance_id,
474       std::move(sock),
475       std::move(metadata),
476       std::move(handoff_data.shmem),
477       std::move(handoff_data.client_config),
478       handoff_data.stream_allocations,
479       /*drain_bytes=*/0,
480       /*free_records=*/{},
481   };
482   client_data.free_records.reserve(kRecordBatchSize);
483   client_data.shmem.SetReaderPaused();
484   client_data_.emplace(peer_pid, std::move(client_data));
485   alloc_record_arena_.Enable();
486 }
487 
HandleDrainFree(DataSourceInstanceID ds_id,pid_t pid)488 void UnwindingWorker::HandleDrainFree(DataSourceInstanceID ds_id, pid_t pid) {
489   auto it = client_data_.find(pid);
490   if (it != client_data_.end()) {
491     ClientData& client_data = it->second;
492 
493     if (!client_data.free_records.empty()) {
494       delegate_->PostFreeRecord(this, std::move(client_data.free_records));
495       client_data.free_records.clear();
496       client_data.free_records.reserve(kRecordBatchSize);
497     }
498   }
499   delegate_->PostDrainDone(this, ds_id);
500 }
501 
PostDisconnectSocket(pid_t pid)502 void UnwindingWorker::PostDisconnectSocket(pid_t pid) {
503   // We do not need to use a WeakPtr here because the task runner will not
504   // outlive its UnwindingWorker.
505   thread_task_runner_.get()->PostTask(
506       [this, pid] { HandleDisconnectSocket(pid); });
507 }
508 
PostPurgeProcess(pid_t pid)509 void UnwindingWorker::PostPurgeProcess(pid_t pid) {
510   // We do not need to use a WeakPtr here because the task runner will not
511   // outlive its UnwindingWorker.
512   thread_task_runner_.get()->PostTask([this, pid] {
513     auto it = client_data_.find(pid);
514     if (it == client_data_.end()) {
515       return;
516     }
517     RemoveClientData(it);
518   });
519 }
520 
PostDrainFree(DataSourceInstanceID ds_id,pid_t pid)521 void UnwindingWorker::PostDrainFree(DataSourceInstanceID ds_id, pid_t pid) {
522   // We do not need to use a WeakPtr here because the task runner will not
523   // outlive its UnwindingWorker.
524   thread_task_runner_.get()->PostTask(
525       [this, ds_id, pid] { HandleDrainFree(ds_id, pid); });
526 }
527 
HandleDisconnectSocket(pid_t pid)528 void UnwindingWorker::HandleDisconnectSocket(pid_t pid) {
529   auto it = client_data_.find(pid);
530   if (it == client_data_.end()) {
531     // This is expected if the client voluntarily disconnects before the
532     // profiling session ended. In that case, there is a race between the main
533     // thread learning about the disconnect and it calling back here.
534     return;
535   }
536   ClientData& client_data = it->second;
537   // Shutdown and call OnDisconnect handler.
538   client_data.shmem.SetShuttingDown();
539   client_data.sock->Shutdown(/* notify= */ true);
540 }
541 
BorrowAllocRecord()542 std::unique_ptr<AllocRecord> AllocRecordArena::BorrowAllocRecord() {
543   std::lock_guard<std::mutex> l(*alloc_records_mutex_);
544   if (!alloc_records_.empty()) {
545     std::unique_ptr<AllocRecord> result = std::move(alloc_records_.back());
546     alloc_records_.pop_back();
547     return result;
548   }
549   return std::unique_ptr<AllocRecord>(new AllocRecord());
550 }
551 
ReturnAllocRecord(std::unique_ptr<AllocRecord> record)552 void AllocRecordArena::ReturnAllocRecord(std::unique_ptr<AllocRecord> record) {
553   std::lock_guard<std::mutex> l(*alloc_records_mutex_);
554   if (enabled_ && record && alloc_records_.size() < kMaxAllocRecordArenaSize)
555     alloc_records_.emplace_back(std::move(record));
556 }
557 
Disable()558 void AllocRecordArena::Disable() {
559   std::lock_guard<std::mutex> l(*alloc_records_mutex_);
560   alloc_records_.clear();
561   enabled_ = false;
562 }
563 
Enable()564 void AllocRecordArena::Enable() {
565   std::lock_guard<std::mutex> l(*alloc_records_mutex_);
566   enabled_ = true;
567 }
568 
569 UnwindingWorker::Delegate::~Delegate() = default;
570 
571 }  // namespace profiling
572 }  // namespace perfetto
573