• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef SRC_PROFILING_MEMORY_UNWINDING_H_
18 #define SRC_PROFILING_MEMORY_UNWINDING_H_
19 
20 #include <unwindstack/Regs.h>
21 
22 #include "perfetto/base/time.h"
23 #include "perfetto/ext/base/scoped_file.h"
24 #include "perfetto/ext/base/thread_task_runner.h"
25 #include "perfetto/ext/tracing/core/basic_types.h"
26 #include "src/profiling/common/unwind_support.h"
27 #include "src/profiling/memory/bookkeeping.h"
28 #include "src/profiling/memory/unwound_messages.h"
29 #include "src/profiling/memory/wire_protocol.h"
30 
31 namespace perfetto {
32 namespace profiling {
33 
34 std::unique_ptr<unwindstack::Regs> CreateRegsFromRawData(
35     unwindstack::ArchEnum arch,
36     void* raw_data);
37 
38 bool DoUnwind(WireMessage*, UnwindingMetadata* metadata, AllocRecord* out);
39 
40 // AllocRecords are expensive to construct and destruct. We have seen up to
41 // 10 % of total CPU of heapprofd being used to destruct them. That is why
42 // we re-use them to cut CPU usage significantly.
43 class AllocRecordArena {
44  public:
AllocRecordArena()45   AllocRecordArena() : alloc_records_mutex_(new std::mutex()) {}
46 
47   void ReturnAllocRecord(std::unique_ptr<AllocRecord>);
48   std::unique_ptr<AllocRecord> BorrowAllocRecord();
49 
50   void Enable();
51   void Disable();
52 
53  private:
54   std::unique_ptr<std::mutex> alloc_records_mutex_;
55   std::vector<std::unique_ptr<AllocRecord>> alloc_records_;
56   bool enabled_ = true;
57 };
58 
59 class UnwindingWorker : public base::UnixSocket::EventListener {
60  public:
61   class Delegate {
62    public:
63     virtual void PostAllocRecord(UnwindingWorker*,
64                                  std::unique_ptr<AllocRecord>) = 0;
65     virtual void PostFreeRecord(UnwindingWorker*, std::vector<FreeRecord>) = 0;
66     virtual void PostHeapNameRecord(UnwindingWorker*, HeapNameRecord rec) = 0;
67     virtual void PostSocketDisconnected(UnwindingWorker*,
68                                         DataSourceInstanceID,
69                                         pid_t pid,
70                                         SharedRingBuffer::Stats stats) = 0;
71     virtual void PostDrainDone(UnwindingWorker*, DataSourceInstanceID) = 0;
72     virtual ~Delegate();
73   };
74 
75   struct HandoffData {
76     DataSourceInstanceID data_source_instance_id;
77     base::UnixSocketRaw sock;
78     base::ScopedFile maps_fd;
79     base::ScopedFile mem_fd;
80     SharedRingBuffer shmem;
81     ClientConfiguration client_config;
82     bool stream_allocations;
83   };
84 
UnwindingWorker(Delegate * delegate,base::ThreadTaskRunner thread_task_runner)85   UnwindingWorker(Delegate* delegate, base::ThreadTaskRunner thread_task_runner)
86       : delegate_(delegate),
87         thread_task_runner_(std::move(thread_task_runner)) {}
88 
89   ~UnwindingWorker() override;
90   UnwindingWorker(UnwindingWorker&&) = default;
91 
92   // Public API safe to call from other threads.
93   void PostDisconnectSocket(pid_t pid);
94   void PostPurgeProcess(pid_t pid);
95   void PostHandoffSocket(HandoffData);
96   void PostDrainFree(DataSourceInstanceID, pid_t pid);
ReturnAllocRecord(std::unique_ptr<AllocRecord> record)97   void ReturnAllocRecord(std::unique_ptr<AllocRecord> record) {
98     alloc_record_arena_.ReturnAllocRecord(std::move(record));
99   }
100 
101   // Implementation of UnixSocket::EventListener.
102   // Do not call explicitly.
103   void OnDisconnect(base::UnixSocket* self) override;
OnNewIncomingConnection(base::UnixSocket *,std::unique_ptr<base::UnixSocket>)104   void OnNewIncomingConnection(base::UnixSocket*,
105                                std::unique_ptr<base::UnixSocket>) override {
106     PERFETTO_DFATAL_OR_ELOG("This should not happen.");
107   }
108   void OnDataAvailable(base::UnixSocket* self) override;
109 
110  public:
111   // public for testing/fuzzer
112   struct ClientData {
113     DataSourceInstanceID data_source_instance_id;
114     std::unique_ptr<base::UnixSocket> sock;
115     UnwindingMetadata metadata;
116     SharedRingBuffer shmem;
117     ClientConfiguration client_config;
118     bool stream_allocations = false;
119     size_t drain_bytes = 0;
120     std::vector<FreeRecord> free_records;
121   };
122 
123   // public for testing/fuzzing
124   static void HandleBuffer(UnwindingWorker* self,
125                            AllocRecordArena* alloc_record_arena,
126                            const SharedRingBuffer::Buffer& buf,
127                            ClientData* client_data,
128                            pid_t peer_pid,
129                            Delegate* delegate);
130 
131  private:
132   void HandleHandoffSocket(HandoffData data);
133   void HandleDisconnectSocket(pid_t pid);
134   void HandleDrainFree(DataSourceInstanceID, pid_t);
135   void RemoveClientData(
136       std::map<pid_t, ClientData>::iterator client_data_iterator);
137   void FinishDisconnect(
138       std::map<pid_t, ClientData>::iterator client_data_iterator);
139   std::unique_ptr<AllocRecord> BorrowAllocRecord();
140 
141   struct ReadAndUnwindBatchResult {
142     enum class Status {
143       kHasMore,
144       kReadSome,
145       kReadNone,
146     };
147     size_t bytes_read = 0;
148     Status status;
149   };
150   ReadAndUnwindBatchResult ReadAndUnwindBatch(ClientData* client_data);
151   void BatchUnwindJob(pid_t);
152   void DrainJob(pid_t);
153 
154   AllocRecordArena alloc_record_arena_;
155   std::map<pid_t, ClientData> client_data_;
156   Delegate* delegate_;
157 
158   // Task runner with a dedicated thread. Keep last. By destroying this task
159   // runner first, we ensure that the UnwindingWorker is not active while the
160   // rest of its state is being destroyed. Additionally this ensures that the
161   // destructing thread sees a consistent view of the memory due to the
162   // ThreadTaskRunner's destructor joining a thread.
163   base::ThreadTaskRunner thread_task_runner_;
164 };
165 
166 }  // namespace profiling
167 }  // namespace perfetto
168 
169 #endif  // SRC_PROFILING_MEMORY_UNWINDING_H_
170