1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/trace_event/trace_buffer.h"
6
7 #include <memory>
8 #include <utility>
9 #include <vector>
10
11 #include "base/macros.h"
12 #include "base/trace_event/heap_profiler.h"
13 #include "base/trace_event/trace_event_impl.h"
14
15 namespace base {
16 namespace trace_event {
17
18 namespace {
19
20 class TraceBufferRingBuffer : public TraceBuffer {
21 public:
TraceBufferRingBuffer(size_t max_chunks)22 TraceBufferRingBuffer(size_t max_chunks)
23 : max_chunks_(max_chunks),
24 recyclable_chunks_queue_(new size_t[queue_capacity()]),
25 queue_head_(0),
26 queue_tail_(max_chunks),
27 current_iteration_index_(0),
28 current_chunk_seq_(1) {
29 chunks_.reserve(max_chunks);
30 for (size_t i = 0; i < max_chunks; ++i)
31 recyclable_chunks_queue_[i] = i;
32 }
33
GetChunk(size_t * index)34 std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
35 HEAP_PROFILER_SCOPED_IGNORE;
36
37 // Because the number of threads is much less than the number of chunks,
38 // the queue should never be empty.
39 DCHECK(!QueueIsEmpty());
40
41 *index = recyclable_chunks_queue_[queue_head_];
42 queue_head_ = NextQueueIndex(queue_head_);
43 current_iteration_index_ = queue_head_;
44
45 if (*index >= chunks_.size())
46 chunks_.resize(*index + 1);
47
48 TraceBufferChunk* chunk = chunks_[*index].release();
49 chunks_[*index] = NULL; // Put NULL in the slot of a in-flight chunk.
50 if (chunk)
51 chunk->Reset(current_chunk_seq_++);
52 else
53 chunk = new TraceBufferChunk(current_chunk_seq_++);
54
55 return std::unique_ptr<TraceBufferChunk>(chunk);
56 }
57
ReturnChunk(size_t index,std::unique_ptr<TraceBufferChunk> chunk)58 void ReturnChunk(size_t index,
59 std::unique_ptr<TraceBufferChunk> chunk) override {
60 // When this method is called, the queue should not be full because it
61 // can contain all chunks including the one to be returned.
62 DCHECK(!QueueIsFull());
63 DCHECK(chunk);
64 DCHECK_LT(index, chunks_.size());
65 DCHECK(!chunks_[index]);
66 chunks_[index] = std::move(chunk);
67 recyclable_chunks_queue_[queue_tail_] = index;
68 queue_tail_ = NextQueueIndex(queue_tail_);
69 }
70
IsFull() const71 bool IsFull() const override { return false; }
72
Size() const73 size_t Size() const override {
74 // This is approximate because not all of the chunks are full.
75 return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
76 }
77
Capacity() const78 size_t Capacity() const override {
79 return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
80 }
81
GetEventByHandle(TraceEventHandle handle)82 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
83 if (handle.chunk_index >= chunks_.size())
84 return NULL;
85 TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
86 if (!chunk || chunk->seq() != handle.chunk_seq)
87 return NULL;
88 return chunk->GetEventAt(handle.event_index);
89 }
90
NextChunk()91 const TraceBufferChunk* NextChunk() override {
92 if (chunks_.empty())
93 return NULL;
94
95 while (current_iteration_index_ != queue_tail_) {
96 size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
97 current_iteration_index_ = NextQueueIndex(current_iteration_index_);
98 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
99 continue;
100 DCHECK(chunks_[chunk_index]);
101 return chunks_[chunk_index].get();
102 }
103 return NULL;
104 }
105
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)106 void EstimateTraceMemoryOverhead(
107 TraceEventMemoryOverhead* overhead) override {
108 overhead->Add("TraceBufferRingBuffer", sizeof(*this));
109 for (size_t queue_index = queue_head_; queue_index != queue_tail_;
110 queue_index = NextQueueIndex(queue_index)) {
111 size_t chunk_index = recyclable_chunks_queue_[queue_index];
112 if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
113 continue;
114 chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
115 }
116 }
117
118 private:
QueueIsEmpty() const119 bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
120
QueueSize() const121 size_t QueueSize() const {
122 return queue_tail_ > queue_head_
123 ? queue_tail_ - queue_head_
124 : queue_tail_ + queue_capacity() - queue_head_;
125 }
126
QueueIsFull() const127 bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
128
queue_capacity() const129 size_t queue_capacity() const {
130 // One extra space to help distinguish full state and empty state.
131 return max_chunks_ + 1;
132 }
133
NextQueueIndex(size_t index) const134 size_t NextQueueIndex(size_t index) const {
135 index++;
136 if (index >= queue_capacity())
137 index = 0;
138 return index;
139 }
140
141 size_t max_chunks_;
142 std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
143
144 std::unique_ptr<size_t[]> recyclable_chunks_queue_;
145 size_t queue_head_;
146 size_t queue_tail_;
147
148 size_t current_iteration_index_;
149 uint32_t current_chunk_seq_;
150
151 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
152 };
153
154 class TraceBufferVector : public TraceBuffer {
155 public:
TraceBufferVector(size_t max_chunks)156 TraceBufferVector(size_t max_chunks)
157 : in_flight_chunk_count_(0),
158 current_iteration_index_(0),
159 max_chunks_(max_chunks) {
160 chunks_.reserve(max_chunks_);
161 }
162
GetChunk(size_t * index)163 std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
164 HEAP_PROFILER_SCOPED_IGNORE;
165
166 // This function may be called when adding normal events or indirectly from
167 // AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
168 // have to add the metadata events and flush thread-local buffers even if
169 // the buffer is full.
170 *index = chunks_.size();
171 chunks_.push_back(NULL); // Put NULL in the slot of a in-flight chunk.
172 ++in_flight_chunk_count_;
173 // + 1 because zero chunk_seq is not allowed.
174 return std::unique_ptr<TraceBufferChunk>(
175 new TraceBufferChunk(static_cast<uint32_t>(*index) + 1));
176 }
177
ReturnChunk(size_t index,std::unique_ptr<TraceBufferChunk> chunk)178 void ReturnChunk(size_t index,
179 std::unique_ptr<TraceBufferChunk> chunk) override {
180 DCHECK_GT(in_flight_chunk_count_, 0u);
181 DCHECK_LT(index, chunks_.size());
182 DCHECK(!chunks_[index]);
183 --in_flight_chunk_count_;
184 chunks_[index] = chunk.release();
185 }
186
IsFull() const187 bool IsFull() const override { return chunks_.size() >= max_chunks_; }
188
Size() const189 size_t Size() const override {
190 // This is approximate because not all of the chunks are full.
191 return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
192 }
193
Capacity() const194 size_t Capacity() const override {
195 return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
196 }
197
GetEventByHandle(TraceEventHandle handle)198 TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
199 if (handle.chunk_index >= chunks_.size())
200 return NULL;
201 TraceBufferChunk* chunk = chunks_[handle.chunk_index];
202 if (!chunk || chunk->seq() != handle.chunk_seq)
203 return NULL;
204 return chunk->GetEventAt(handle.event_index);
205 }
206
NextChunk()207 const TraceBufferChunk* NextChunk() override {
208 while (current_iteration_index_ < chunks_.size()) {
209 // Skip in-flight chunks.
210 const TraceBufferChunk* chunk = chunks_[current_iteration_index_++];
211 if (chunk)
212 return chunk;
213 }
214 return NULL;
215 }
216
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)217 void EstimateTraceMemoryOverhead(
218 TraceEventMemoryOverhead* overhead) override {
219 const size_t chunks_ptr_vector_allocated_size =
220 sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
221 const size_t chunks_ptr_vector_resident_size =
222 sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
223 overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size,
224 chunks_ptr_vector_resident_size);
225 for (size_t i = 0; i < chunks_.size(); ++i) {
226 TraceBufferChunk* chunk = chunks_[i];
227 // Skip the in-flight (nullptr) chunks. They will be accounted by the
228 // per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
229 if (chunk)
230 chunk->EstimateTraceMemoryOverhead(overhead);
231 }
232 }
233
234 private:
235 size_t in_flight_chunk_count_;
236 size_t current_iteration_index_;
237 size_t max_chunks_;
238 ScopedVector<TraceBufferChunk> chunks_;
239
240 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
241 };
242
243 } // namespace
244
TraceBufferChunk(uint32_t seq)245 TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
246
~TraceBufferChunk()247 TraceBufferChunk::~TraceBufferChunk() {}
248
Reset(uint32_t new_seq)249 void TraceBufferChunk::Reset(uint32_t new_seq) {
250 for (size_t i = 0; i < next_free_; ++i)
251 chunk_[i].Reset();
252 next_free_ = 0;
253 seq_ = new_seq;
254 cached_overhead_estimate_.reset();
255 }
256
AddTraceEvent(size_t * event_index)257 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
258 DCHECK(!IsFull());
259 *event_index = next_free_++;
260 return &chunk_[*event_index];
261 }
262
EstimateTraceMemoryOverhead(TraceEventMemoryOverhead * overhead)263 void TraceBufferChunk::EstimateTraceMemoryOverhead(
264 TraceEventMemoryOverhead* overhead) {
265 if (!cached_overhead_estimate_) {
266 cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
267
268 // When estimating the size of TraceBufferChunk, exclude the array of trace
269 // events, as they are computed individually below.
270 cached_overhead_estimate_->Add("TraceBufferChunk",
271 sizeof(*this) - sizeof(chunk_));
272 }
273
274 const size_t num_cached_estimated_events =
275 cached_overhead_estimate_->GetCount("TraceEvent");
276 DCHECK_LE(num_cached_estimated_events, size());
277
278 if (IsFull() && num_cached_estimated_events == size()) {
279 overhead->Update(*cached_overhead_estimate_);
280 return;
281 }
282
283 for (size_t i = num_cached_estimated_events; i < size(); ++i)
284 chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
285
286 if (IsFull()) {
287 cached_overhead_estimate_->AddSelf();
288 } else {
289 // The unused TraceEvents in |chunks_| are not cached. They will keep
290 // changing as new TraceEvents are added to this chunk, so they are
291 // computed on the fly.
292 const size_t num_unused_trace_events = capacity() - size();
293 overhead->Add("TraceEvent (unused)",
294 num_unused_trace_events * sizeof(TraceEvent));
295 }
296
297 overhead->Update(*cached_overhead_estimate_);
298 }
299
300 TraceResultBuffer::OutputCallback
GetCallback()301 TraceResultBuffer::SimpleOutput::GetCallback() {
302 return Bind(&SimpleOutput::Append, Unretained(this));
303 }
304
Append(const std::string & json_trace_output)305 void TraceResultBuffer::SimpleOutput::Append(
306 const std::string& json_trace_output) {
307 json_output += json_trace_output;
308 }
309
TraceResultBuffer()310 TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
311
~TraceResultBuffer()312 TraceResultBuffer::~TraceResultBuffer() {}
313
SetOutputCallback(const OutputCallback & json_chunk_callback)314 void TraceResultBuffer::SetOutputCallback(
315 const OutputCallback& json_chunk_callback) {
316 output_callback_ = json_chunk_callback;
317 }
318
Start()319 void TraceResultBuffer::Start() {
320 append_comma_ = false;
321 output_callback_.Run("[");
322 }
323
AddFragment(const std::string & trace_fragment)324 void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
325 if (append_comma_)
326 output_callback_.Run(",");
327 append_comma_ = true;
328 output_callback_.Run(trace_fragment);
329 }
330
Finish()331 void TraceResultBuffer::Finish() {
332 output_callback_.Run("]");
333 }
334
CreateTraceBufferRingBuffer(size_t max_chunks)335 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
336 return new TraceBufferRingBuffer(max_chunks);
337 }
338
CreateTraceBufferVectorOfSize(size_t max_chunks)339 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
340 return new TraceBufferVector(max_chunks);
341 }
342
343 } // namespace trace_event
344 } // namespace base
345