1 // Copyright 2015 The Chromium Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #ifdef UNSAFE_BUFFERS_BUILD
6 // TODO(crbug.com/40284755): Remove this and spanify to fix the errors.
7 #pragma allow_unsafe_buffers
8 #endif
9
10 #include "base/trace_event/trace_buffer.h"
11
12 #include <memory>
13 #include <utility>
14 #include <vector>
15
16 #include "base/containers/heap_array.h"
17 #include "base/functional/bind.h"
18 #include "base/trace_event/heap_profiler.h"
19 #include "base/trace_event/trace_event_impl.h"
20
21 namespace base {
22 namespace trace_event {
23
24 namespace {
25
26 class TraceBufferRingBuffer : public TraceBuffer {
27 public:
TraceBufferRingBuffer(size_t max_chunks)28 TraceBufferRingBuffer(size_t max_chunks)
29 : max_chunks_(max_chunks),
30 recyclable_chunks_queue_(HeapArray<size_t>::Uninit(queue_capacity())),
31 queue_tail_(max_chunks) {
32 chunks_.reserve(max_chunks);
33 for (size_t i = 0; i < max_chunks; ++i)
34 recyclable_chunks_queue_[i] = i;
35 }
36
37 TraceBufferRingBuffer(const TraceBufferRingBuffer&) = delete;
38 TraceBufferRingBuffer& operator=(const TraceBufferRingBuffer&) = delete;
39
GetChunk(size_t * index)40 std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
41 HEAP_PROFILER_SCOPED_IGNORE;
42
43 // Because the number of threads is much less than the number of chunks,
44 // the queue should never be empty.
45 DCHECK(!QueueIsEmpty());
46
47 *index = recyclable_chunks_queue_[queue_head_];
48 queue_head_ = NextQueueIndex(queue_head_);
49 current_iteration_index_ = queue_head_;
50
51 if (*index >= chunks_.size())
52 chunks_.resize(*index + 1);
53
54 TraceBufferChunk* chunk = chunks_[*index].release();
55 chunks_[*index] = nullptr; // Put nullptr in the slot of a in-flight chunk.
56 if (chunk)
57 chunk->Reset(current_chunk_seq_++);
58 else
59 chunk = new TraceBufferChunk(current_chunk_seq_++);
60
61 return std::unique_ptr<TraceBufferChunk>(chunk);
62 }
63
ReturnChunk(size_t index,std::unique_ptr<TraceBufferChunk> chunk)64 void ReturnChunk(size_t index,
65 std::unique_ptr<TraceBufferChunk> chunk) override {
66 // When this method is called, the queue should not be full because it
67 // can contain all chunks including the one to be returned.
68 DCHECK(!QueueIsFull());
69 DCHECK(chunk);
70 DCHECK_LT(index, chunks_.size());
71 DCHECK(!chunks_[index]);
72 chunks_[index] = std::move(chunk);
73 recyclable_chunks_queue_[queue_tail_] = index;
74 queue_tail_ = NextQueueIndex(queue_tail_);
75 }
76
IsFull() const77 bool IsFull() const override { return false; }
78
Size() const79 size_t Size() const override {
80 // This is approximate because not all of the chunks are full.
81 return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
82 }
83
Capacity() const84 size_t Capacity() const override {
85 return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
86 }
87
GetEventByHandle(TraceEventHandle handle)88 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
89 if (handle.chunk_index >= chunks_.size())
90 return nullptr;
91 TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
92 if (!chunk || chunk->seq() != handle.chunk_seq)
93 return nullptr;
94 return chunk->GetEventAt(handle.event_index);
95 }
96
NextChunk()97 const TraceBufferChunk* NextChunk() override {
98 if (chunks_.empty())
99 return nullptr;
100
101 while (current_iteration_index_ != queue_tail_) {
102 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
103 current_iteration_index_ = NextQueueIndex(current_iteration_index_);
104 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
105 continue;
106 DCHECK(chunks_[chunk_index]);
107 return chunks_[chunk_index].get();
108 }
109 return nullptr;
110 }
111
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)112 void EstimateTraceMemoryOverhead(
113 TraceEventMemoryOverhead* overhead) override {
114 overhead->Add(TraceEventMemoryOverhead::kTraceBuffer, sizeof(*this));
115 for (size_t queue_index = queue_head_; queue_index != queue_tail_;
116 queue_index = NextQueueIndex(queue_index)) {
117 size_t chunk_index = recyclable_chunks_queue_[queue_index];
118 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
119 continue;
120 chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
121 }
122 }
123
124 private:
QueueIsEmpty() const125 bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
126
QueueSize() const127 size_t QueueSize() const {
128 return queue_tail_ > queue_head_
129 ? queue_tail_ - queue_head_
130 : queue_tail_ + queue_capacity() - queue_head_;
131 }
132
QueueIsFull() const133 bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
134
queue_capacity() const135 size_t queue_capacity() const {
136 // One extra space to help distinguish full state and empty state.
137 return max_chunks_ + 1;
138 }
139
NextQueueIndex(size_t index) const140 size_t NextQueueIndex(size_t index) const {
141 index++;
142 if (index >= queue_capacity())
143 index = 0;
144 return index;
145 }
146
147 size_t max_chunks_;
148 std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
149
150 HeapArray<size_t> recyclable_chunks_queue_;
151 size_t queue_head_ = 0;
152 size_t queue_tail_;
153
154 size_t current_iteration_index_ = 0;
155 uint32_t current_chunk_seq_ = 1;
156 };
157
158 class TraceBufferVector : public TraceBuffer {
159 public:
TraceBufferVector(size_t max_chunks)160 TraceBufferVector(size_t max_chunks)
161 : in_flight_chunk_count_(0),
162 current_iteration_index_(0),
163 max_chunks_(max_chunks) {
164 chunks_.reserve(max_chunks_);
165 }
166
167 TraceBufferVector(const TraceBufferVector&) = delete;
168 TraceBufferVector& operator=(const TraceBufferVector&) = delete;
169
GetChunk(size_t * index)170 std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
171 HEAP_PROFILER_SCOPED_IGNORE;
172
173 // This function may be called when adding normal events or indirectly from
174 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
175 // have to add the metadata events and flush thread-local buffers even if
176 // the buffer is full.
177 *index = chunks_.size();
178 // Put nullptr in the slot of a in-flight chunk.
179 chunks_.push_back(nullptr);
180 ++in_flight_chunk_count_;
181 // + 1 because zero chunk_seq is not allowed.
182 return std::make_unique<TraceBufferChunk>(static_cast<uint32_t>(*index) +
183 1);
184 }
185
ReturnChunk(size_t index,std::unique_ptr<TraceBufferChunk> chunk)186 void ReturnChunk(size_t index,
187 std::unique_ptr<TraceBufferChunk> chunk) override {
188 DCHECK_GT(in_flight_chunk_count_, 0u);
189 DCHECK_LT(index, chunks_.size());
190 DCHECK(!chunks_[index]);
191 --in_flight_chunk_count_;
192 chunks_[index] = std::move(chunk);
193 }
194
IsFull() const195 bool IsFull() const override { return chunks_.size() >= max_chunks_; }
196
Size() const197 size_t Size() const override {
198 // This is approximate because not all of the chunks are full.
199 return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
200 }
201
Capacity() const202 size_t Capacity() const override {
203 return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
204 }
205
GetEventByHandle(TraceEventHandle handle)206 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
207 if (handle.chunk_index >= chunks_.size())
208 return nullptr;
209 TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
210 if (!chunk || chunk->seq() != handle.chunk_seq)
211 return nullptr;
212 return chunk->GetEventAt(handle.event_index);
213 }
214
NextChunk()215 const TraceBufferChunk* NextChunk() override {
216 while (current_iteration_index_ < chunks_.size()) {
217 // Skip in-flight chunks.
218 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++].get();
219 if (chunk)
220 return chunk;
221 }
222 return nullptr;
223 }
224
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)225 void EstimateTraceMemoryOverhead(
226 TraceEventMemoryOverhead* overhead) override {
227 const size_t chunks_ptr_vector_allocated_size =
228 sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
229 const size_t chunks_ptr_vector_resident_size =
230 sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
231 overhead->Add(TraceEventMemoryOverhead::kTraceBuffer,
232 chunks_ptr_vector_allocated_size,
233 chunks_ptr_vector_resident_size);
234 for (size_t i = 0; i < chunks_.size(); ++i) {
235 TraceBufferChunk* chunk = chunks_[i].get();
236 // Skip the in-flight (nullptr) chunks. They will be accounted by the
237 // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
238 if (chunk)
239 chunk->EstimateTraceMemoryOverhead(overhead);
240 }
241 }
242
243 private:
244 size_t in_flight_chunk_count_;
245 size_t current_iteration_index_;
246 size_t max_chunks_;
247 std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
248 };
249
250 } // namespace
251
TraceBufferChunk(uint32_t seq)252 TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
253
254 TraceBufferChunk::~TraceBufferChunk() = default;
255
Reset(uint32_t new_seq)256 void TraceBufferChunk::Reset(uint32_t new_seq) {
257 for (size_t i = 0; i < next_free_; ++i)
258 chunk_[i].Reset();
259 next_free_ = 0;
260 seq_ = new_seq;
261 cached_overhead_estimate_.reset();
262 }
263
AddTraceEvent(size_t * event_index)264 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
265 DCHECK(!IsFull());
266 *event_index = next_free_++;
267 return &chunk_[*event_index];
268 }
269
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)270 void TraceBufferChunk::EstimateTraceMemoryOverhead(
271 TraceEventMemoryOverhead* overhead) {
272 if (!cached_overhead_estimate_) {
273 cached_overhead_estimate_ = std::make_unique<TraceEventMemoryOverhead>();
274
275 // When estimating the size of TraceBufferChunk, exclude the array of trace
276 // events, as they are computed individually below.
277 cached_overhead_estimate_->Add(TraceEventMemoryOverhead::kTraceBufferChunk,
278 sizeof(*this) - sizeof(chunk_));
279 }
280
281 const size_t num_cached_estimated_events =
282 cached_overhead_estimate_->GetCount(
283 TraceEventMemoryOverhead::kTraceEvent);
284 DCHECK_LE(num_cached_estimated_events, size());
285
286 if (IsFull() && num_cached_estimated_events == size()) {
287 overhead->Update(*cached_overhead_estimate_);
288 return;
289 }
290
291 for (size_t i = num_cached_estimated_events; i < size(); ++i)
292 chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
293
294 if (IsFull()) {
295 cached_overhead_estimate_->AddSelf();
296 } else {
297 // The unused TraceEvents in |chunks_| are not cached. They will keep
298 // changing as new TraceEvents are added to this chunk, so they are
299 // computed on the fly.
300 const size_t num_unused_trace_events = capacity() - size();
301 overhead->Add(TraceEventMemoryOverhead::kUnusedTraceEvent,
302 num_unused_trace_events * sizeof(TraceEvent));
303 }
304
305 overhead->Update(*cached_overhead_estimate_);
306 }
307
308 TraceResultBuffer::OutputCallback
GetCallback()309 TraceResultBuffer::SimpleOutput::GetCallback() {
310 return BindRepeating(&SimpleOutput::Append, Unretained(this));
311 }
312
Append(const std::string & json_trace_output)313 void TraceResultBuffer::SimpleOutput::Append(
314 const std::string& json_trace_output) {
315 json_output += json_trace_output;
316 }
317
TraceResultBuffer()318 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
319
320 TraceResultBuffer::~TraceResultBuffer() = default;
321
SetOutputCallback(OutputCallback json_chunk_callback)322 void TraceResultBuffer::SetOutputCallback(OutputCallback json_chunk_callback) {
323 output_callback_ = std::move(json_chunk_callback);
324 }
325
Start()326 void TraceResultBuffer::Start() {
327 append_comma_ = false;
328 output_callback_.Run("[");
329 }
330
AddFragment(const std::string & trace_fragment)331 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
332 if (append_comma_)
333 output_callback_.Run(",");
334 append_comma_ = true;
335 output_callback_.Run(trace_fragment);
336 }
337
Finish()338 void TraceResultBuffer::Finish() {
339 output_callback_.Run("]");
340 }
341
CreateTraceBufferRingBuffer(size_t max_chunks)342 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
343 return new TraceBufferRingBuffer(max_chunks);
344 }
345
CreateTraceBufferVectorOfSize(size_t max_chunks)346 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
347 return new TraceBufferVector(max_chunks);
348 }
349
350 } // namespace trace_event
351 } // namespace base
352