1 /* 2 * Copyright (C) 2018 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef SRC_PROFILING_MEMORY_UNWINDING_H_ 18 #define SRC_PROFILING_MEMORY_UNWINDING_H_ 19 20 #include <unwindstack/Regs.h> 21 22 #include "perfetto/base/time.h" 23 #include "perfetto/ext/base/scoped_file.h" 24 #include "perfetto/ext/base/thread_task_runner.h" 25 #include "perfetto/ext/tracing/core/basic_types.h" 26 #include "src/profiling/common/unwind_support.h" 27 #include "src/profiling/memory/bookkeeping.h" 28 #include "src/profiling/memory/unwound_messages.h" 29 #include "src/profiling/memory/wire_protocol.h" 30 31 namespace perfetto { 32 namespace profiling { 33 34 std::unique_ptr<unwindstack::Regs> CreateRegsFromRawData( 35 unwindstack::ArchEnum arch, 36 void* raw_data); 37 38 bool DoUnwind(WireMessage*, UnwindingMetadata* metadata, AllocRecord* out); 39 40 // AllocRecords are expensive to construct and destruct. We have seen up to 41 // 10 % of total CPU of heapprofd being used to destruct them. That is why 42 // we re-use them to cut CPU usage significantly. 43 class AllocRecordArena { 44 public: AllocRecordArena()45 AllocRecordArena() : alloc_records_mutex_(new std::mutex()) {} 46 47 void ReturnAllocRecord(std::unique_ptr<AllocRecord>); 48 std::unique_ptr<AllocRecord> BorrowAllocRecord(); 49 50 void Enable(); 51 void Disable(); 52 53 private: 54 std::unique_ptr<std::mutex> alloc_records_mutex_; 55 std::vector<std::unique_ptr<AllocRecord>> alloc_records_; 56 bool enabled_ = true; 57 }; 58 59 class UnwindingWorker : public base::UnixSocket::EventListener { 60 public: 61 class Delegate { 62 public: 63 virtual void PostAllocRecord(UnwindingWorker*, 64 std::unique_ptr<AllocRecord>) = 0; 65 virtual void PostFreeRecord(UnwindingWorker*, std::vector<FreeRecord>) = 0; 66 virtual void PostHeapNameRecord(UnwindingWorker*, HeapNameRecord rec) = 0; 67 virtual void PostSocketDisconnected(UnwindingWorker*, 68 DataSourceInstanceID, 69 pid_t pid, 70 SharedRingBuffer::Stats stats) = 0; 71 virtual ~Delegate(); 72 }; 73 74 struct HandoffData { 75 DataSourceInstanceID data_source_instance_id; 76 base::UnixSocketRaw sock; 77 base::ScopedFile maps_fd; 78 base::ScopedFile mem_fd; 79 SharedRingBuffer shmem; 80 ClientConfiguration client_config; 81 bool stream_allocations; 82 }; 83 UnwindingWorker(Delegate * delegate,base::ThreadTaskRunner thread_task_runner)84 UnwindingWorker(Delegate* delegate, base::ThreadTaskRunner thread_task_runner) 85 : delegate_(delegate), 86 thread_task_runner_(std::move(thread_task_runner)) {} 87 88 ~UnwindingWorker() override; 89 UnwindingWorker(UnwindingWorker&&) = default; 90 91 // Public API safe to call from other threads. 92 void PostDisconnectSocket(pid_t pid); 93 void PostHandoffSocket(HandoffData); ReturnAllocRecord(std::unique_ptr<AllocRecord> record)94 void ReturnAllocRecord(std::unique_ptr<AllocRecord> record) { 95 alloc_record_arena_.ReturnAllocRecord(std::move(record)); 96 } 97 98 // Implementation of UnixSocket::EventListener. 99 // Do not call explicitly. 100 void OnDisconnect(base::UnixSocket* self) override; OnNewIncomingConnection(base::UnixSocket *,std::unique_ptr<base::UnixSocket>)101 void OnNewIncomingConnection(base::UnixSocket*, 102 std::unique_ptr<base::UnixSocket>) override { 103 PERFETTO_DFATAL_OR_ELOG("This should not happen."); 104 } 105 void OnDataAvailable(base::UnixSocket* self) override; 106 107 public: 108 // public for testing/fuzzer 109 struct ClientData { 110 DataSourceInstanceID data_source_instance_id; 111 std::unique_ptr<base::UnixSocket> sock; 112 UnwindingMetadata metadata; 113 SharedRingBuffer shmem; 114 ClientConfiguration client_config; 115 bool stream_allocations; 116 std::vector<FreeRecord> free_records; 117 }; 118 119 // public for testing/fuzzing 120 static void HandleBuffer(UnwindingWorker* self, 121 AllocRecordArena* alloc_record_arena, 122 const SharedRingBuffer::Buffer& buf, 123 ClientData* client_data, 124 pid_t peer_pid, 125 Delegate* delegate); 126 127 private: 128 void HandleHandoffSocket(HandoffData data); 129 void HandleDisconnectSocket(pid_t pid); 130 std::unique_ptr<AllocRecord> BorrowAllocRecord(); 131 132 enum class ReadAndUnwindBatchResult { 133 kHasMore, 134 kReadSome, 135 kReadNone, 136 }; 137 ReadAndUnwindBatchResult ReadAndUnwindBatch(ClientData* client_data); 138 void BatchUnwindJob(pid_t); 139 140 AllocRecordArena alloc_record_arena_; 141 std::map<pid_t, ClientData> client_data_; 142 Delegate* delegate_; 143 144 // Task runner with a dedicated thread. Keep last. By destroying this task 145 // runner first, we ensure that the UnwindingWorker is not active while the 146 // rest of its state is being destroyed. Additionally this ensures that the 147 // destructing thread sees a consistent view of the memory due to the 148 // ThreadTaskRunner's destructor joining a thread. 149 base::ThreadTaskRunner thread_task_runner_; 150 }; 151 152 } // namespace profiling 153 } // namespace perfetto 154 155 #endif // SRC_PROFILING_MEMORY_UNWINDING_H_ 156