1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/trace_buffer.h"
6
7 #include <memory>
8 #include <utility>
9 #include <vector>
10
11 #include "base/macros.h"
12 #include "base/trace_event/heap_profiler.h"
13 #include "base/trace_event/trace_event_impl.h"
14
15 namespace base {
16 namespace trace_event {
17
18 namespace {
19
20 class TraceBufferRingBuffer : public TraceBuffer {
21 public:
TraceBufferRingBuffer(size_t max_chunks)22 TraceBufferRingBuffer(size_t max_chunks)
23 : max_chunks_(max_chunks),
24 recyclable_chunks_queue_(new size_t[queue_capacity()]),
25 queue_head_(0),
26 queue_tail_(max_chunks),
27 current_iteration_index_(0),
28 current_chunk_seq_(1) {
29 chunks_.reserve(max_chunks);
30 for (size_t i = 0; i < max_chunks; ++i)
31 recyclable_chunks_queue_[i] = i;
32 }
33
GetChunk(size_t * index)34 std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
35 HEAP_PROFILER_SCOPED_IGNORE;
36
37 // Because the number of threads is much less than the number of chunks,
38 // the queue should never be empty.
39 DCHECK(!QueueIsEmpty());
40
41 *index = recyclable_chunks_queue_[queue_head_];
42 queue_head_ = NextQueueIndex(queue_head_);
43 current_iteration_index_ = queue_head_;
44
45 if (*index >= chunks_.size())
46 chunks_.resize(*index + 1);
47
48 TraceBufferChunk* chunk = chunks_[*index].release();
49 chunks_[*index] = nullptr; // Put nullptr in the slot of a in-flight chunk.
50 if (chunk)
51 chunk->Reset(current_chunk_seq_++);
52 else
53 chunk = new TraceBufferChunk(current_chunk_seq_++);
54
55 return std::unique_ptr<TraceBufferChunk>(chunk);
56 }
57
ReturnChunk(size_t index,std::unique_ptr<TraceBufferChunk> chunk)58 void ReturnChunk(size_t index,
59 std::unique_ptr<TraceBufferChunk> chunk) override {
60 // When this method is called, the queue should not be full because it
61 // can contain all chunks including the one to be returned.
62 DCHECK(!QueueIsFull());
63 DCHECK(chunk);
64 DCHECK_LT(index, chunks_.size());
65 DCHECK(!chunks_[index]);
66 chunks_[index] = std::move(chunk);
67 recyclable_chunks_queue_[queue_tail_] = index;
68 queue_tail_ = NextQueueIndex(queue_tail_);
69 }
70
IsFull() const71 bool IsFull() const override { return false; }
72
Size() const73 size_t Size() const override {
74 // This is approximate because not all of the chunks are full.
75 return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
76 }
77
Capacity() const78 size_t Capacity() const override {
79 return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
80 }
81
GetEventByHandle(TraceEventHandle handle)82 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
83 if (handle.chunk_index >= chunks_.size())
84 return nullptr;
85 TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
86 if (!chunk || chunk->seq() != handle.chunk_seq)
87 return nullptr;
88 return chunk->GetEventAt(handle.event_index);
89 }
90
NextChunk()91 const TraceBufferChunk* NextChunk() override {
92 if (chunks_.empty())
93 return nullptr;
94
95 while (current_iteration_index_ != queue_tail_) {
96 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
97 current_iteration_index_ = NextQueueIndex(current_iteration_index_);
98 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
99 continue;
100 DCHECK(chunks_[chunk_index]);
101 return chunks_[chunk_index].get();
102 }
103 return nullptr;
104 }
105
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)106 void EstimateTraceMemoryOverhead(
107 TraceEventMemoryOverhead* overhead) override {
108 overhead->Add(TraceEventMemoryOverhead::kTraceBuffer, sizeof(*this));
109 for (size_t queue_index = queue_head_; queue_index != queue_tail_;
110 queue_index = NextQueueIndex(queue_index)) {
111 size_t chunk_index = recyclable_chunks_queue_[queue_index];
112 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
113 continue;
114 chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
115 }
116 }
117
118 private:
QueueIsEmpty() const119 bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
120
QueueSize() const121 size_t QueueSize() const {
122 return queue_tail_ > queue_head_
123 ? queue_tail_ - queue_head_
124 : queue_tail_ + queue_capacity() - queue_head_;
125 }
126
QueueIsFull() const127 bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
128
queue_capacity() const129 size_t queue_capacity() const {
130 // One extra space to help distinguish full state and empty state.
131 return max_chunks_ + 1;
132 }
133
NextQueueIndex(size_t index) const134 size_t NextQueueIndex(size_t index) const {
135 index++;
136 if (index >= queue_capacity())
137 index = 0;
138 return index;
139 }
140
141 size_t max_chunks_;
142 std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
143
144 std::unique_ptr<size_t[]> recyclable_chunks_queue_;
145 size_t queue_head_;
146 size_t queue_tail_;
147
148 size_t current_iteration_index_;
149 uint32_t current_chunk_seq_;
150
151 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
152 };
153
154 class TraceBufferVector : public TraceBuffer {
155 public:
TraceBufferVector(size_t max_chunks)156 TraceBufferVector(size_t max_chunks)
157 : in_flight_chunk_count_(0),
158 current_iteration_index_(0),
159 max_chunks_(max_chunks) {
160 chunks_.reserve(max_chunks_);
161 }
162
GetChunk(size_t * index)163 std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
164 HEAP_PROFILER_SCOPED_IGNORE;
165
166 // This function may be called when adding normal events or indirectly from
167 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
168 // have to add the metadata events and flush thread-local buffers even if
169 // the buffer is full.
170 *index = chunks_.size();
171 // Put nullptr in the slot of a in-flight chunk.
172 chunks_.push_back(nullptr);
173 ++in_flight_chunk_count_;
174 // + 1 because zero chunk_seq is not allowed.
175 return std::unique_ptr<TraceBufferChunk>(
176 new TraceBufferChunk(static_cast<uint32_t>(*index) + 1));
177 }
178
ReturnChunk(size_t index,std::unique_ptr<TraceBufferChunk> chunk)179 void ReturnChunk(size_t index,
180 std::unique_ptr<TraceBufferChunk> chunk) override {
181 DCHECK_GT(in_flight_chunk_count_, 0u);
182 DCHECK_LT(index, chunks_.size());
183 DCHECK(!chunks_[index]);
184 --in_flight_chunk_count_;
185 chunks_[index] = std::move(chunk);
186 }
187
IsFull() const188 bool IsFull() const override { return chunks_.size() >= max_chunks_; }
189
Size() const190 size_t Size() const override {
191 // This is approximate because not all of the chunks are full.
192 return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
193 }
194
Capacity() const195 size_t Capacity() const override {
196 return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
197 }
198
GetEventByHandle(TraceEventHandle handle)199 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
200 if (handle.chunk_index >= chunks_.size())
201 return nullptr;
202 TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
203 if (!chunk || chunk->seq() != handle.chunk_seq)
204 return nullptr;
205 return chunk->GetEventAt(handle.event_index);
206 }
207
NextChunk()208 const TraceBufferChunk* NextChunk() override {
209 while (current_iteration_index_ < chunks_.size()) {
210 // Skip in-flight chunks.
211 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++].get();
212 if (chunk)
213 return chunk;
214 }
215 return nullptr;
216 }
217
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)218 void EstimateTraceMemoryOverhead(
219 TraceEventMemoryOverhead* overhead) override {
220 const size_t chunks_ptr_vector_allocated_size =
221 sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
222 const size_t chunks_ptr_vector_resident_size =
223 sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
224 overhead->Add(TraceEventMemoryOverhead::kTraceBuffer,
225 chunks_ptr_vector_allocated_size,
226 chunks_ptr_vector_resident_size);
227 for (size_t i = 0; i < chunks_.size(); ++i) {
228 TraceBufferChunk* chunk = chunks_[i].get();
229 // Skip the in-flight (nullptr) chunks. They will be accounted by the
230 // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
231 if (chunk)
232 chunk->EstimateTraceMemoryOverhead(overhead);
233 }
234 }
235
236 private:
237 size_t in_flight_chunk_count_;
238 size_t current_iteration_index_;
239 size_t max_chunks_;
240 std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
241
242 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
243 };
244
245 } // namespace
246
TraceBufferChunk(uint32_t seq)247 TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
248
249 TraceBufferChunk::~TraceBufferChunk() = default;
250
Reset(uint32_t new_seq)251 void TraceBufferChunk::Reset(uint32_t new_seq) {
252 for (size_t i = 0; i < next_free_; ++i)
253 chunk_[i].Reset();
254 next_free_ = 0;
255 seq_ = new_seq;
256 cached_overhead_estimate_.reset();
257 }
258
AddTraceEvent(size_t * event_index)259 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
260 DCHECK(!IsFull());
261 *event_index = next_free_++;
262 return &chunk_[*event_index];
263 }
264
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)265 void TraceBufferChunk::EstimateTraceMemoryOverhead(
266 TraceEventMemoryOverhead* overhead) {
267 if (!cached_overhead_estimate_) {
268 cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
269
270 // When estimating the size of TraceBufferChunk, exclude the array of trace
271 // events, as they are computed individually below.
272 cached_overhead_estimate_->Add(TraceEventMemoryOverhead::kTraceBufferChunk,
273 sizeof(*this) - sizeof(chunk_));
274 }
275
276 const size_t num_cached_estimated_events =
277 cached_overhead_estimate_->GetCount(
278 TraceEventMemoryOverhead::kTraceEvent);
279 DCHECK_LE(num_cached_estimated_events, size());
280
281 if (IsFull() && num_cached_estimated_events == size()) {
282 overhead->Update(*cached_overhead_estimate_);
283 return;
284 }
285
286 for (size_t i = num_cached_estimated_events; i < size(); ++i)
287 chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
288
289 if (IsFull()) {
290 cached_overhead_estimate_->AddSelf();
291 } else {
292 // The unused TraceEvents in |chunks_| are not cached. They will keep
293 // changing as new TraceEvents are added to this chunk, so they are
294 // computed on the fly.
295 const size_t num_unused_trace_events = capacity() - size();
296 overhead->Add(TraceEventMemoryOverhead::kUnusedTraceEvent,
297 num_unused_trace_events * sizeof(TraceEvent));
298 }
299
300 overhead->Update(*cached_overhead_estimate_);
301 }
302
303 TraceResultBuffer::OutputCallback
GetCallback()304 TraceResultBuffer::SimpleOutput::GetCallback() {
305 return Bind(&SimpleOutput::Append, Unretained(this));
306 }
307
Append(const std::string & json_trace_output)308 void TraceResultBuffer::SimpleOutput::Append(
309 const std::string& json_trace_output) {
310 json_output += json_trace_output;
311 }
312
TraceResultBuffer()313 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
314
315 TraceResultBuffer::~TraceResultBuffer() = default;
316
SetOutputCallback(const OutputCallback & json_chunk_callback)317 void TraceResultBuffer::SetOutputCallback(
318 const OutputCallback& json_chunk_callback) {
319 output_callback_ = json_chunk_callback;
320 }
321
Start()322 void TraceResultBuffer::Start() {
323 append_comma_ = false;
324 output_callback_.Run("[");
325 }
326
AddFragment(const std::string & trace_fragment)327 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
328 if (append_comma_)
329 output_callback_.Run(",");
330 append_comma_ = true;
331 output_callback_.Run(trace_fragment);
332 }
333
Finish()334 void TraceResultBuffer::Finish() {
335 output_callback_.Run("]");
336 }
337
CreateTraceBufferRingBuffer(size_t max_chunks)338 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
339 return new TraceBufferRingBuffer(max_chunks);
340 }
341
CreateTraceBufferVectorOfSize(size_t max_chunks)342 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
343 return new TraceBufferVector(max_chunks);
344 }
345
346 } // namespace trace_event
347 } // namespace base
348