1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/tracing/core/trace_buffer.h"
18
19 #include <limits>
20
21 #include "perfetto/base/logging.h"
22 #include "perfetto/protozero/proto_utils.h"
23 #include "perfetto/tracing/core/shared_memory_abi.h"
24 #include "perfetto/tracing/core/trace_packet.h"
25
26 #define TRACE_BUFFER_VERBOSE_LOGGING() 0 // Set to 1 when debugging unittests.
27 #if TRACE_BUFFER_VERBOSE_LOGGING()
28 #define TRACE_BUFFER_DLOG PERFETTO_DLOG
29 namespace {
HexDump(const uint8_t * src,size_t size)30 std::string HexDump(const uint8_t* src, size_t size) {
31 std::string buf;
32 buf.reserve(4096 * 4);
33 char line[64];
34 char* c = line;
35 for (size_t i = 0; i < size; i++) {
36 c += sprintf(c, "%02x ", src[i]);
37 if (i % 16 == 15) {
38 buf.append("\n");
39 buf.append(line);
40 c = line;
41 }
42 }
43 return buf;
44 }
45 } // namespace
46 #else
47 #define TRACE_BUFFER_DLOG(...) void()
48 #endif
49
50 namespace perfetto {
51
52 namespace {
53 constexpr uint8_t kFirstPacketContinuesFromPrevChunk =
54 SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk;
55 constexpr uint8_t kLastPacketContinuesOnNextChunk =
56 SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk;
57 constexpr uint8_t kChunkNeedsPatching =
58 SharedMemoryABI::ChunkHeader::kChunkNeedsPatching;
59 } // namespace.
60
61 constexpr size_t TraceBuffer::ChunkRecord::kMaxSize;
62 constexpr size_t TraceBuffer::InlineChunkHeaderSize = sizeof(ChunkRecord);
63
64 // static
Create(size_t size_in_bytes,OverwritePolicy pol)65 std::unique_ptr<TraceBuffer> TraceBuffer::Create(size_t size_in_bytes,
66 OverwritePolicy pol) {
67 std::unique_ptr<TraceBuffer> trace_buffer(new TraceBuffer(pol));
68 if (!trace_buffer->Initialize(size_in_bytes))
69 return nullptr;
70 return trace_buffer;
71 }
72
TraceBuffer(OverwritePolicy pol)73 TraceBuffer::TraceBuffer(OverwritePolicy pol) : overwrite_policy_(pol) {
74 // See comments in ChunkRecord for the rationale of this.
75 static_assert(sizeof(ChunkRecord) == sizeof(SharedMemoryABI::PageHeader) +
76 sizeof(SharedMemoryABI::ChunkHeader),
77 "ChunkRecord out of sync with the layout of SharedMemoryABI");
78 }
79
80 TraceBuffer::~TraceBuffer() = default;
81
Initialize(size_t size)82 bool TraceBuffer::Initialize(size_t size) {
83 static_assert(
84 base::kPageSize % sizeof(ChunkRecord) == 0,
85 "sizeof(ChunkRecord) must be an integer divider of a page size");
86 PERFETTO_CHECK(size % base::kPageSize == 0);
87 data_ = base::PagedMemory::Allocate(
88 size, base::PagedMemory::kMayFail | base::PagedMemory::kDontCommit);
89 if (!data_.IsValid()) {
90 PERFETTO_ELOG("Trace buffer allocation failed (size: %zu)", size);
91 return false;
92 }
93 size_ = size;
94 stats_.set_buffer_size(size);
95 max_chunk_size_ = std::min(size, ChunkRecord::kMaxSize);
96 wptr_ = begin();
97 index_.clear();
98 last_chunk_id_written_.clear();
99 read_iter_ = GetReadIterForSequence(index_.end());
100 return true;
101 }
102
103 // Note: |src| points to a shmem region that is shared with the producer. Assume
104 // that the producer is malicious and will change the content of |src|
105 // while we execute here. Don't do any processing on it other than memcpy().
CopyChunkUntrusted(ProducerID producer_id_trusted,uid_t producer_uid_trusted,WriterID writer_id,ChunkID chunk_id,uint16_t num_fragments,uint8_t chunk_flags,bool chunk_complete,const uint8_t * src,size_t size)106 void TraceBuffer::CopyChunkUntrusted(ProducerID producer_id_trusted,
107 uid_t producer_uid_trusted,
108 WriterID writer_id,
109 ChunkID chunk_id,
110 uint16_t num_fragments,
111 uint8_t chunk_flags,
112 bool chunk_complete,
113 const uint8_t* src,
114 size_t size) {
115 // |record_size| = |size| + sizeof(ChunkRecord), rounded up to avoid to end
116 // up in a fragmented state where size_to_end() < sizeof(ChunkRecord).
117 const size_t record_size =
118 base::AlignUp<sizeof(ChunkRecord)>(size + sizeof(ChunkRecord));
119 if (PERFETTO_UNLIKELY(record_size > max_chunk_size_)) {
120 stats_.set_abi_violations(stats_.abi_violations() + 1);
121 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
122 return;
123 }
124
125 TRACE_BUFFER_DLOG("CopyChunk @ %lu, size=%zu", wptr_ - begin(), record_size);
126
127 #if PERFETTO_DCHECK_IS_ON()
128 changed_since_last_read_ = true;
129 #endif
130
131 // If the chunk hasn't been completed, we should only consider the first
132 // |num_fragments - 1| packets complete. For simplicity, we simply disregard
133 // the last one when we copy the chunk.
134 if (PERFETTO_UNLIKELY(!chunk_complete)) {
135 if (num_fragments > 0) {
136 num_fragments--;
137 chunk_flags &= ~kLastPacketContinuesOnNextChunk;
138 }
139 }
140
141 ChunkRecord record(record_size);
142 record.producer_id = producer_id_trusted;
143 record.chunk_id = chunk_id;
144 record.writer_id = writer_id;
145 record.num_fragments = num_fragments;
146 record.flags = chunk_flags;
147 ChunkMeta::Key key(record);
148
149 // Check whether we have already copied the same chunk previously. This may
150 // happen if the service scrapes chunks in a potentially incomplete state
151 // before receiving commit requests for them from the producer. Note that the
152 // service may scrape and thus override chunks in arbitrary order since the
153 // chunks aren't ordered in the SMB.
154 const auto it = index_.find(key);
155 if (PERFETTO_UNLIKELY(it != index_.end())) {
156 ChunkMeta* record_meta = &it->second;
157 ChunkRecord* prev = record_meta->chunk_record;
158
159 // Verify that the old chunk's metadata corresponds to the new one.
160 // Overridden chunks should never change size, since the page layout is
161 // fixed per writer. The number of fragments should also never decrease and
162 // flags should not be removed.
163 if (PERFETTO_UNLIKELY(ChunkMeta::Key(*prev) != key ||
164 prev->size != record_size ||
165 prev->num_fragments > num_fragments ||
166 (prev->flags & chunk_flags) != prev->flags)) {
167 stats_.set_abi_violations(stats_.abi_violations() + 1);
168 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
169 return;
170 }
171
172 // If we've already started reading from chunk N+1 following this chunk N,
173 // don't override chunk N. Otherwise we may end up reading a packet from
174 // chunk N after having read from chunk N+1, thereby violating sequential
175 // read of packets. This shouldn't happen if the producer is well-behaved,
176 // because it shouldn't start chunk N+1 before completing chunk N.
177 ChunkMeta::Key subsequent_key = key;
178 static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
179 "ChunkID wraps");
180 subsequent_key.chunk_id++;
181 const auto subsequent_it = index_.find(subsequent_key);
182 if (subsequent_it != index_.end() &&
183 subsequent_it->second.num_fragments_read > 0) {
184 stats_.set_abi_violations(stats_.abi_violations() + 1);
185 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
186 return;
187 }
188
189 // If this chunk was previously copied with the same number of fragments and
190 // the number didn't change, there's no need to copy it again. If the
191 // previous chunk was complete already, this should always be the case.
192 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_ ||
193 !record_meta->is_complete() ||
194 (chunk_complete && prev->num_fragments == num_fragments));
195 if (prev->num_fragments == num_fragments) {
196 TRACE_BUFFER_DLOG(" skipping recommit of identical chunk");
197 return;
198 }
199
200 // We should not have read past the last packet.
201 if (record_meta->num_fragments_read > prev->num_fragments) {
202 PERFETTO_ELOG(
203 "TraceBuffer read too many fragments from an incomplete chunk");
204 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
205 return;
206 }
207
208 uint8_t* wptr = reinterpret_cast<uint8_t*>(prev);
209 TRACE_BUFFER_DLOG(" overriding chunk @ %lu, size=%zu", wptr - begin(),
210 record_size);
211
212 // Update chunk meta data stored in the index, as it may have changed.
213 record_meta->num_fragments = num_fragments;
214 record_meta->flags = chunk_flags;
215 record_meta->set_complete(chunk_complete);
216
217 // Override the ChunkRecord contents at the original |wptr|.
218 TRACE_BUFFER_DLOG(" copying @ [%lu - %lu] %zu", wptr - begin(),
219 uintptr_t(wptr - begin()) + record_size, record_size);
220 WriteChunkRecord(wptr, record, src, size);
221 TRACE_BUFFER_DLOG("Chunk raw: %s", HexDump(wptr, record_size).c_str());
222 stats_.set_chunks_rewritten(stats_.chunks_rewritten() + 1);
223 return;
224 }
225
226 if (PERFETTO_UNLIKELY(discard_writes_))
227 return DiscardWrite();
228
229 // If there isn't enough room from the given write position. Write a padding
230 // record to clear the end of the buffer and wrap back.
231 const size_t cached_size_to_end = size_to_end();
232 if (PERFETTO_UNLIKELY(record_size > cached_size_to_end)) {
233 ssize_t res = DeleteNextChunksFor(cached_size_to_end);
234 if (res == -1)
235 return DiscardWrite();
236 PERFETTO_DCHECK(static_cast<size_t>(res) <= cached_size_to_end);
237 AddPaddingRecord(cached_size_to_end);
238 wptr_ = begin();
239 stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
240 PERFETTO_DCHECK(size_to_end() >= record_size);
241 }
242
243 // At this point either |wptr_| points to an untouched part of the buffer
244 // (i.e. *wptr_ == 0) or we are about to overwrite one or more ChunkRecord(s).
245 // In the latter case we need to first figure out where the next valid
246 // ChunkRecord is (if it exists) and add padding between the new record.
247 // Example ((w) == write cursor):
248 //
249 // Initial state (wtpr_ == 0):
250 // |0 (w) |10 |30 |50
251 // +---------+-----------------+--------------------+--------------------+
252 // | Chunk 1 | Chunk 2 | Chunk 3 | Chunk 4 |
253 // +---------+-----------------+--------------------+--------------------+
254 //
255 // Let's assume we now want now write a 5th Chunk of size == 35. The final
256 // state should look like this:
257 // |0 |35 (w) |50
258 // +---------------------------------+---------------+--------------------+
259 // | Chunk 5 | Padding Chunk | Chunk 4 |
260 // +---------------------------------+---------------+--------------------+
261
262 // Deletes all chunks from |wptr_| to |wptr_| + |record_size|.
263 ssize_t del_res = DeleteNextChunksFor(record_size);
264 if (del_res == -1)
265 return DiscardWrite();
266 size_t padding_size = static_cast<size_t>(del_res);
267
268 // Now first insert the new chunk. At the end, if necessary, add the padding.
269 stats_.set_chunks_written(stats_.chunks_written() + 1);
270 stats_.set_bytes_written(stats_.bytes_written() + record_size);
271 auto it_and_inserted = index_.emplace(
272 key, ChunkMeta(GetChunkRecordAt(wptr_), num_fragments, chunk_complete,
273 chunk_flags, producer_uid_trusted));
274 PERFETTO_DCHECK(it_and_inserted.second);
275 TRACE_BUFFER_DLOG(" copying @ [%lu - %lu] %zu", wptr_ - begin(),
276 uintptr_t(wptr_ - begin()) + record_size, record_size);
277 WriteChunkRecord(wptr_, record, src, size);
278 TRACE_BUFFER_DLOG("Chunk raw: %s", HexDump(wptr_, record_size).c_str());
279 wptr_ += record_size;
280 if (wptr_ >= end()) {
281 PERFETTO_DCHECK(padding_size == 0);
282 wptr_ = begin();
283 stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
284 }
285 DcheckIsAlignedAndWithinBounds(wptr_);
286
287 // Chunks may be received out of order, so only update last_chunk_id if the
288 // new chunk_id is larger. But take into account overflows by only selecting
289 // the new ID if its distance to the latest ID is smaller than half the number
290 // space.
291 //
292 // This accounts for both the case where the new ID has just overflown and
293 // last_chunk_id be updated even though it's smaller (e.g. |chunk_id| = 1 and
294 // |last_chunk_id| = kMaxChunkId; chunk_id - last_chunk_id = 0) and the case
295 // where the new ID is an out-of-order ID right after an overflow and
296 // last_chunk_id shouldn't be updated even though it's larger (e.g. |chunk_id|
297 // = kMaxChunkId and |last_chunk_id| = 1; chunk_id - last_chunk_id =
298 // kMaxChunkId - 1).
299 auto producer_and_writer_id = std::make_pair(producer_id_trusted, writer_id);
300 ChunkID& last_chunk_id = last_chunk_id_written_[producer_and_writer_id];
301 static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
302 "This code assumes that ChunkID wraps at kMaxChunkID");
303 if (chunk_id - last_chunk_id < kMaxChunkID / 2) {
304 last_chunk_id = chunk_id;
305 } else {
306 stats_.set_chunks_committed_out_of_order(
307 stats_.chunks_committed_out_of_order() + 1);
308 }
309
310 if (padding_size)
311 AddPaddingRecord(padding_size);
312 }
313
DeleteNextChunksFor(size_t bytes_to_clear)314 ssize_t TraceBuffer::DeleteNextChunksFor(size_t bytes_to_clear) {
315 PERFETTO_CHECK(!discard_writes_);
316
317 // Find the position of the first chunk which begins at or after
318 // (|wptr_| + |bytes|). Note that such a chunk might not exist and we might
319 // either reach the end of the buffer or a zeroed region of the buffer.
320 uint8_t* next_chunk_ptr = wptr_;
321 uint8_t* search_end = wptr_ + bytes_to_clear;
322 TRACE_BUFFER_DLOG("Delete [%zu %zu]", wptr_ - begin(), search_end - begin());
323 DcheckIsAlignedAndWithinBounds(wptr_);
324 PERFETTO_DCHECK(search_end <= end());
325 std::vector<ChunkMap::iterator> index_delete;
326 uint64_t chunks_overwritten = stats_.chunks_overwritten();
327 uint64_t bytes_overwritten = stats_.bytes_overwritten();
328 uint64_t padding_bytes_cleared = stats_.padding_bytes_cleared();
329 while (next_chunk_ptr < search_end) {
330 const ChunkRecord& next_chunk = *GetChunkRecordAt(next_chunk_ptr);
331 TRACE_BUFFER_DLOG(
332 " scanning chunk [%zu %zu] (valid=%d)", next_chunk_ptr - begin(),
333 next_chunk_ptr - begin() + next_chunk.size, next_chunk.is_valid());
334
335 // We just reached the untouched part of the buffer, it's going to be all
336 // zeroes from here to end().
337 // Optimization: if during Initialize() we fill the buffer with padding
338 // records we could get rid of this branch.
339 if (PERFETTO_UNLIKELY(!next_chunk.is_valid())) {
340 // This should happen only at the first iteration. The zeroed area can
341 // only begin precisely at the |wptr_|, not after. Otherwise it means that
342 // we wrapped but screwed up the ChunkRecord chain.
343 PERFETTO_DCHECK(next_chunk_ptr == wptr_);
344 return 0;
345 }
346
347 // Remove |next_chunk| from the index, unless it's a padding record (padding
348 // records are not part of the index).
349 if (PERFETTO_LIKELY(!next_chunk.is_padding)) {
350 ChunkMeta::Key key(next_chunk);
351 auto it = index_.find(key);
352 bool will_remove = false;
353 if (PERFETTO_LIKELY(it != index_.end())) {
354 const ChunkMeta& meta = it->second;
355 if (PERFETTO_UNLIKELY(meta.num_fragments_read < meta.num_fragments)) {
356 if (overwrite_policy_ == kDiscard)
357 return -1;
358 chunks_overwritten++;
359 bytes_overwritten += next_chunk.size;
360 }
361 index_delete.push_back(it);
362 will_remove = true;
363 }
364 TRACE_BUFFER_DLOG(" del index {%" PRIu32 ",%" PRIu32
365 ",%u} @ [%lu - %lu] %d",
366 key.producer_id, key.writer_id, key.chunk_id,
367 next_chunk_ptr - begin(),
368 next_chunk_ptr - begin() + next_chunk.size, removed);
369 PERFETTO_DCHECK(will_remove);
370 } else {
371 padding_bytes_cleared += next_chunk.size;
372 }
373
374 next_chunk_ptr += next_chunk.size;
375
376 // We should never hit this, unless we managed to screw up while writing
377 // to the buffer and breaking the ChunkRecord(s) chain.
378 // TODO(primiano): Write more meaningful logging with the status of the
379 // buffer, to get more actionable bugs in case we hit this.
380 PERFETTO_CHECK(next_chunk_ptr <= end());
381 }
382
383 // Remove from the index.
384 for (auto it : index_delete) {
385 index_.erase(it);
386 }
387 stats_.set_chunks_overwritten(chunks_overwritten);
388 stats_.set_bytes_overwritten(bytes_overwritten);
389 stats_.set_padding_bytes_cleared(padding_bytes_cleared);
390
391 PERFETTO_DCHECK(next_chunk_ptr >= search_end && next_chunk_ptr <= end());
392 return static_cast<ssize_t>(next_chunk_ptr - search_end);
393 }
394
AddPaddingRecord(size_t size)395 void TraceBuffer::AddPaddingRecord(size_t size) {
396 PERFETTO_DCHECK(size >= sizeof(ChunkRecord) && size <= ChunkRecord::kMaxSize);
397 ChunkRecord record(size);
398 record.is_padding = 1;
399 TRACE_BUFFER_DLOG("AddPaddingRecord @ [%lu - %lu] %zu", wptr_ - begin(),
400 uintptr_t(wptr_ - begin()) + size, size);
401 WriteChunkRecord(wptr_, record, nullptr, size - sizeof(ChunkRecord));
402 stats_.set_padding_bytes_written(stats_.padding_bytes_written() + size);
403 // |wptr_| is deliberately not advanced when writing a padding record.
404 }
405
TryPatchChunkContents(ProducerID producer_id,WriterID writer_id,ChunkID chunk_id,const Patch * patches,size_t patches_size,bool other_patches_pending)406 bool TraceBuffer::TryPatchChunkContents(ProducerID producer_id,
407 WriterID writer_id,
408 ChunkID chunk_id,
409 const Patch* patches,
410 size_t patches_size,
411 bool other_patches_pending) {
412 ChunkMeta::Key key(producer_id, writer_id, chunk_id);
413 auto it = index_.find(key);
414 if (it == index_.end()) {
415 stats_.set_patches_failed(stats_.patches_failed() + 1);
416 return false;
417 }
418 ChunkMeta& chunk_meta = it->second;
419
420 // Check that the index is consistent with the actual ProducerID/WriterID
421 // stored in the ChunkRecord.
422 PERFETTO_DCHECK(ChunkMeta::Key(*chunk_meta.chunk_record) == key);
423 uint8_t* chunk_begin = reinterpret_cast<uint8_t*>(chunk_meta.chunk_record);
424 PERFETTO_DCHECK(chunk_begin >= begin());
425 uint8_t* chunk_end = chunk_begin + chunk_meta.chunk_record->size;
426 PERFETTO_DCHECK(chunk_end <= end());
427
428 static_assert(Patch::kSize == SharedMemoryABI::kPacketHeaderSize,
429 "Patch::kSize out of sync with SharedMemoryABI");
430
431 for (size_t i = 0; i < patches_size; i++) {
432 uint8_t* ptr =
433 chunk_begin + sizeof(ChunkRecord) + patches[i].offset_untrusted;
434 TRACE_BUFFER_DLOG("PatchChunk {%" PRIu32 ",%" PRIu32
435 ",%u} size=%zu @ %zu with {%02x %02x %02x %02x} cur "
436 "{%02x %02x %02x %02x}",
437 producer_id, writer_id, chunk_id, chunk_end - chunk_begin,
438 patches[i].offset_untrusted, patches[i].data[0],
439 patches[i].data[1], patches[i].data[2],
440 patches[i].data[3], ptr[0], ptr[1], ptr[2], ptr[3]);
441 if (ptr < chunk_begin + sizeof(ChunkRecord) ||
442 ptr > chunk_end - Patch::kSize) {
443 // Either the IPC was so slow and in the meantime the writer managed to
444 // wrap over |chunk_id| or the producer sent a malicious IPC.
445 stats_.set_patches_failed(stats_.patches_failed() + 1);
446 return false;
447 }
448
449 // DCHECK that we are writing into a zero-filled size field and not into
450 // valid data. It relies on ScatteredStreamWriter::ReserveBytes() to
451 // zero-fill reservations in debug builds.
452 char zero[Patch::kSize]{};
453 PERFETTO_DCHECK(memcmp(ptr, &zero, Patch::kSize) == 0);
454
455 memcpy(ptr, &patches[i].data[0], Patch::kSize);
456 }
457 TRACE_BUFFER_DLOG(
458 "Chunk raw (after patch): %s",
459 HexDump(chunk_begin, chunk_meta.chunk_record->size).c_str());
460
461 stats_.set_patches_succeeded(stats_.patches_succeeded() + patches_size);
462 if (!other_patches_pending) {
463 chunk_meta.flags &= ~kChunkNeedsPatching;
464 chunk_meta.chunk_record->flags = chunk_meta.flags;
465 }
466 return true;
467 }
468
BeginRead()469 void TraceBuffer::BeginRead() {
470 read_iter_ = GetReadIterForSequence(index_.begin());
471 #if PERFETTO_DCHECK_IS_ON()
472 changed_since_last_read_ = false;
473 #endif
474 }
475
GetReadIterForSequence(ChunkMap::iterator seq_begin)476 TraceBuffer::SequenceIterator TraceBuffer::GetReadIterForSequence(
477 ChunkMap::iterator seq_begin) {
478 SequenceIterator iter;
479 iter.seq_begin = seq_begin;
480 if (seq_begin == index_.end()) {
481 iter.cur = iter.seq_end = index_.end();
482 return iter;
483 }
484
485 #if PERFETTO_DCHECK_IS_ON()
486 // Either |seq_begin| is == index_.begin() or the item immediately before must
487 // belong to a different {ProducerID, WriterID} sequence.
488 if (seq_begin != index_.begin() && seq_begin != index_.end()) {
489 auto prev_it = seq_begin;
490 prev_it--;
491 PERFETTO_DCHECK(
492 seq_begin == index_.begin() ||
493 std::tie(prev_it->first.producer_id, prev_it->first.writer_id) <
494 std::tie(seq_begin->first.producer_id, seq_begin->first.writer_id));
495 }
496 #endif
497
498 // Find the first entry that has a greater {ProducerID, WriterID} (or just
499 // index_.end() if we reached the end).
500 ChunkMeta::Key key = seq_begin->first; // Deliberate copy.
501 key.chunk_id = kMaxChunkID;
502 iter.seq_end = index_.upper_bound(key);
503 PERFETTO_DCHECK(iter.seq_begin != iter.seq_end);
504
505 // Now find the first entry between [seq_begin, seq_end) that is
506 // > last_chunk_id_written_. This is where we the sequence will start (see
507 // notes about wrapping of IDs in the header).
508 auto producer_and_writer_id = std::make_pair(key.producer_id, key.writer_id);
509 PERFETTO_DCHECK(last_chunk_id_written_.count(producer_and_writer_id));
510 iter.wrapping_id = last_chunk_id_written_[producer_and_writer_id];
511 key.chunk_id = iter.wrapping_id;
512 iter.cur = index_.upper_bound(key);
513 if (iter.cur == iter.seq_end)
514 iter.cur = iter.seq_begin;
515 return iter;
516 }
517
MoveNext()518 void TraceBuffer::SequenceIterator::MoveNext() {
519 // Stop iterating when we reach the end of the sequence.
520 // Note: |seq_begin| might be == |seq_end|.
521 if (cur == seq_end || cur->first.chunk_id == wrapping_id) {
522 cur = seq_end;
523 return;
524 }
525
526 // If the current chunk wasn't completed yet, we shouldn't advance past it as
527 // it may be rewritten with additional packets.
528 if (!cur->second.is_complete()) {
529 cur = seq_end;
530 return;
531 }
532
533 ChunkID last_chunk_id = cur->first.chunk_id;
534 if (++cur == seq_end)
535 cur = seq_begin;
536
537 // There may be a missing chunk in the sequence of chunks, in which case the
538 // next chunk's ID won't follow the last one's. If so, skip the rest of the
539 // sequence. We'll return to it later once the hole is filled.
540 if (last_chunk_id + 1 != cur->first.chunk_id)
541 cur = seq_end;
542 }
543
ReadNextTracePacket(TracePacket * packet,PacketSequenceProperties * sequence_properties,bool * previous_packet_on_sequence_dropped)544 bool TraceBuffer::ReadNextTracePacket(
545 TracePacket* packet,
546 PacketSequenceProperties* sequence_properties,
547 bool* previous_packet_on_sequence_dropped) {
548 // Note: MoveNext() moves only within the next chunk within the same
549 // {ProducerID, WriterID} sequence. Here we want to:
550 // - return the next patched+complete packet in the current sequence, if any.
551 // - return the first patched+complete packet in the next sequence, if any.
552 // - return false if none of the above is found.
553 TRACE_BUFFER_DLOG("ReadNextTracePacket()");
554
555 // Just in case we forget to initialize these below.
556 *sequence_properties = {0, kInvalidUid, 0};
557 *previous_packet_on_sequence_dropped = false;
558
559 // At the start of each sequence iteration, we consider the last read packet
560 // dropped. While iterating over the chunks in the sequence, we update this
561 // flag based on our knowledge about the last packet that was read from each
562 // chunk (|last_read_packet_skipped| in ChunkMeta).
563 bool previous_packet_dropped = true;
564
565 #if PERFETTO_DCHECK_IS_ON()
566 PERFETTO_DCHECK(!changed_since_last_read_);
567 #endif
568 for (;; read_iter_.MoveNext()) {
569 if (PERFETTO_UNLIKELY(!read_iter_.is_valid())) {
570 // We ran out of chunks in the current {ProducerID, WriterID} sequence or
571 // we just reached the index_.end().
572
573 if (PERFETTO_UNLIKELY(read_iter_.seq_end == index_.end()))
574 return false;
575
576 // We reached the end of sequence, move to the next one.
577 // Note: ++read_iter_.seq_end might become index_.end(), but
578 // GetReadIterForSequence() knows how to deal with that.
579 read_iter_ = GetReadIterForSequence(read_iter_.seq_end);
580 PERFETTO_DCHECK(read_iter_.is_valid() && read_iter_.cur != index_.end());
581 previous_packet_dropped = true;
582 }
583
584 ChunkMeta* chunk_meta = &*read_iter_;
585
586 // If the chunk has holes that are awaiting to be patched out-of-band,
587 // skip the current sequence and move to the next one.
588 if (chunk_meta->flags & kChunkNeedsPatching) {
589 read_iter_.MoveToEnd();
590 continue;
591 }
592
593 const ProducerID trusted_producer_id = read_iter_.producer_id();
594 const WriterID writer_id = read_iter_.writer_id();
595 const uid_t trusted_uid = chunk_meta->trusted_uid;
596
597 // At this point we have a chunk in |chunk_meta| that has not been fully
598 // read. We don't know yet whether we have enough data to read the full
599 // packet (in the case it's fragmented over several chunks) and we are about
600 // to find that out. Specifically:
601 // A) If the first fragment is unread and is a fragment continuing from a
602 // previous chunk, it means we have missed the previous ChunkID. In
603 // fact, if this wasn't the case, a previous call to ReadNext() shouldn't
604 // have moved the cursor to this chunk.
605 // B) Any fragment > 0 && < last is always readable. By definition an inner
606 // packet is never fragmented and hence doesn't require neither stitching
607 // nor any out-of-band patching. The same applies to the last packet
608 // iff it doesn't continue on the next chunk.
609 // C) If the last packet (which might be also the only packet in the chunk)
610 // is a fragment and continues on the next chunk, we peek at the next
611 // chunks and, if we have all of them, mark as read and move the cursor.
612 //
613 // +---------------+ +-------------------+ +---------------+
614 // | ChunkID: 1 | | ChunkID: 2 | | ChunkID: 3 |
615 // |---------------+ +-------------------+ +---------------+
616 // | Packet 1 | | | | ... Packet 3 |
617 // | Packet 2 | | ... Packet 3 ... | | Packet 4 |
618 // | Packet 3 ... | | | | Packet 5 ... |
619 // +---------------+ +-------------------+ +---------------+
620
621 PERFETTO_DCHECK(chunk_meta->num_fragments_read <=
622 chunk_meta->num_fragments);
623
624 // If we didn't read any packets from this chunk, the last packet was from
625 // the previous chunk we iterated over; so don't update
626 // |previous_packet_dropped| in this case.
627 if (chunk_meta->num_fragments_read > 0)
628 previous_packet_dropped = chunk_meta->last_read_packet_skipped();
629
630 while (chunk_meta->num_fragments_read < chunk_meta->num_fragments) {
631 enum { kSkip = 0, kReadOnePacket, kTryReadAhead } action;
632 if (chunk_meta->num_fragments_read == 0) {
633 if (chunk_meta->flags & kFirstPacketContinuesFromPrevChunk) {
634 action = kSkip; // Case A.
635 } else if (chunk_meta->num_fragments == 1 &&
636 (chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
637 action = kTryReadAhead; // Case C.
638 } else {
639 action = kReadOnePacket; // Case B.
640 }
641 } else if (chunk_meta->num_fragments_read <
642 chunk_meta->num_fragments - 1 ||
643 !(chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
644 action = kReadOnePacket; // Case B.
645 } else {
646 action = kTryReadAhead; // Case C.
647 }
648
649 TRACE_BUFFER_DLOG(" chunk %u, packet %hu of %hu, action=%d",
650 read_iter_.chunk_id(), chunk_meta->num_fragments_read,
651 chunk_meta->num_fragments, action);
652
653 if (action == kSkip) {
654 // This fragment will be skipped forever, not just in this ReadPacket()
655 // iteration. This happens by virtue of ReadNextPacketInChunk()
656 // incrementing the |num_fragments_read| and marking the fragment as
657 // read even if we didn't really.
658 ReadNextPacketInChunk(chunk_meta, nullptr);
659 chunk_meta->set_last_read_packet_skipped(true);
660 previous_packet_dropped = true;
661 continue;
662 }
663
664 if (action == kReadOnePacket) {
665 // The easy peasy case B.
666 ReadPacketResult result = ReadNextPacketInChunk(chunk_meta, packet);
667
668 if (PERFETTO_LIKELY(result == ReadPacketResult::kSucceeded)) {
669 *sequence_properties = {trusted_producer_id, trusted_uid, writer_id};
670 *previous_packet_on_sequence_dropped = previous_packet_dropped;
671 return true;
672 } else if (result == ReadPacketResult::kFailedEmptyPacket) {
673 // We can ignore and skip empty packets.
674 PERFETTO_DCHECK(packet->slices().empty());
675 continue;
676 }
677
678 // In extremely rare cases (producer bugged / malicious) the chunk might
679 // contain an invalid fragment. In such case we don't want to stall the
680 // sequence but just skip the chunk and move on. ReadNextPacketInChunk()
681 // marks the chunk as fully read, so we don't attempt to read from it
682 // again in a future call to ReadBuffers(). It also already records an
683 // abi violation for this.
684 PERFETTO_DCHECK(result == ReadPacketResult::kFailedInvalidPacket);
685 chunk_meta->set_last_read_packet_skipped(true);
686 previous_packet_dropped = true;
687 break;
688 }
689
690 PERFETTO_DCHECK(action == kTryReadAhead);
691 ReadAheadResult ra_res = ReadAhead(packet);
692 if (ra_res == ReadAheadResult::kSucceededReturnSlices) {
693 stats_.set_readaheads_succeeded(stats_.readaheads_succeeded() + 1);
694 *sequence_properties = {trusted_producer_id, trusted_uid, writer_id};
695 *previous_packet_on_sequence_dropped = previous_packet_dropped;
696 return true;
697 }
698
699 if (ra_res == ReadAheadResult::kFailedMoveToNextSequence) {
700 // readahead didn't find a contiguous packet sequence. We'll try again
701 // on the next ReadPacket() call.
702 stats_.set_readaheads_failed(stats_.readaheads_failed() + 1);
703
704 // TODO(primiano): optimization: this MoveToEnd() is the reason why
705 // MoveNext() (that is called in the outer for(;;MoveNext)) needs to
706 // deal gracefully with the case of |cur|==|seq_end|. Maybe we can do
707 // something to avoid that check by reshuffling the code here?
708 read_iter_.MoveToEnd();
709
710 // This break will go back to beginning of the for(;;MoveNext()). That
711 // will move to the next sequence because we set the read iterator to
712 // its end.
713 break;
714 }
715
716 PERFETTO_DCHECK(ra_res == ReadAheadResult::kFailedStayOnSameSequence);
717
718 // In this case ReadAhead() might advance |read_iter_|, so we need to
719 // re-cache the |chunk_meta| pointer to point to the current chunk.
720 chunk_meta = &*read_iter_;
721 chunk_meta->set_last_read_packet_skipped(true);
722 previous_packet_dropped = true;
723 } // while(...) [iterate over packet fragments for the current chunk].
724 } // for(;;MoveNext()) [iterate over chunks].
725 }
726
ReadAhead(TracePacket * packet)727 TraceBuffer::ReadAheadResult TraceBuffer::ReadAhead(TracePacket* packet) {
728 static_assert(static_cast<ChunkID>(kMaxChunkID + 1) == 0,
729 "relying on kMaxChunkID to wrap naturally");
730 TRACE_BUFFER_DLOG(" readahead start @ chunk %u", read_iter_.chunk_id());
731 ChunkID next_chunk_id = read_iter_.chunk_id() + 1;
732 SequenceIterator it = read_iter_;
733 for (it.MoveNext(); it.is_valid(); it.MoveNext(), next_chunk_id++) {
734 // We should stay within the same sequence while iterating here.
735 PERFETTO_DCHECK(it.producer_id() == read_iter_.producer_id() &&
736 it.writer_id() == read_iter_.writer_id());
737
738 TRACE_BUFFER_DLOG(" expected chunk ID: %u, actual ID: %u", next_chunk_id,
739 it.chunk_id());
740
741 if (PERFETTO_UNLIKELY((*it).num_fragments == 0))
742 continue;
743
744 // If we miss the next chunk, stop looking in the current sequence and
745 // try another sequence. This chunk might come in the near future.
746 // The second condition is the edge case of a buggy/malicious
747 // producer. The ChunkID is contiguous but its flags don't make sense.
748 if (it.chunk_id() != next_chunk_id ||
749 PERFETTO_UNLIKELY(
750 !((*it).flags & kFirstPacketContinuesFromPrevChunk))) {
751 return ReadAheadResult::kFailedMoveToNextSequence;
752 }
753
754 // If the chunk is contiguous but has not been patched yet move to the next
755 // sequence and try coming back here on the next ReadNextTracePacket() call.
756 // TODO(primiano): add a test to cover this, it's a subtle case.
757 if ((*it).flags & kChunkNeedsPatching)
758 return ReadAheadResult::kFailedMoveToNextSequence;
759
760 // This is the case of an intermediate chunk which contains only one
761 // fragment which continues on the next chunk. This is the case for large
762 // packets, e.g.: [Packet0, Packet1(0)] [Packet1(1)] [Packet1(2), ...]
763 // (Packet1(X) := fragment X of Packet1).
764 if ((*it).num_fragments == 1 &&
765 ((*it).flags & kLastPacketContinuesOnNextChunk)) {
766 continue;
767 }
768
769 // We made it! We got all fragments for the packet without holes.
770 TRACE_BUFFER_DLOG(" readahead success @ chunk %u", it.chunk_id());
771 PERFETTO_DCHECK(((*it).num_fragments == 1 &&
772 !((*it).flags & kLastPacketContinuesOnNextChunk)) ||
773 (*it).num_fragments > 1);
774
775 // Now let's re-iterate over the [read_iter_, it] sequence and mark
776 // all the fragments as read.
777 bool packet_corruption = false;
778 for (;;) {
779 PERFETTO_DCHECK(read_iter_.is_valid());
780 TRACE_BUFFER_DLOG(" commit chunk %u", read_iter_.chunk_id());
781 if (PERFETTO_LIKELY((*read_iter_).num_fragments > 0)) {
782 // In the unlikely case of a corrupted packet (corrupted or empty
783 // fragment), invalidate the all stitching and move on to the next chunk
784 // in the same sequence, if any.
785 packet_corruption |= ReadNextPacketInChunk(&*read_iter_, packet) ==
786 ReadPacketResult::kFailedInvalidPacket;
787 }
788 if (read_iter_.cur == it.cur)
789 break;
790 read_iter_.MoveNext();
791 } // for(;;)
792 PERFETTO_DCHECK(read_iter_.cur == it.cur);
793
794 if (PERFETTO_UNLIKELY(packet_corruption)) {
795 // ReadNextPacketInChunk() already records an abi violation for this case.
796 *packet = TracePacket(); // clear.
797 return ReadAheadResult::kFailedStayOnSameSequence;
798 }
799
800 return ReadAheadResult::kSucceededReturnSlices;
801 } // for(it...) [readahead loop]
802 return ReadAheadResult::kFailedMoveToNextSequence;
803 }
804
ReadNextPacketInChunk(ChunkMeta * chunk_meta,TracePacket * packet)805 TraceBuffer::ReadPacketResult TraceBuffer::ReadNextPacketInChunk(
806 ChunkMeta* chunk_meta,
807 TracePacket* packet) {
808 PERFETTO_DCHECK(chunk_meta->num_fragments_read < chunk_meta->num_fragments);
809 PERFETTO_DCHECK(!(chunk_meta->flags & kChunkNeedsPatching));
810
811 const uint8_t* record_begin =
812 reinterpret_cast<const uint8_t*>(chunk_meta->chunk_record);
813 const uint8_t* record_end = record_begin + chunk_meta->chunk_record->size;
814 const uint8_t* packets_begin = record_begin + sizeof(ChunkRecord);
815 const uint8_t* packet_begin = packets_begin + chunk_meta->cur_fragment_offset;
816
817 if (PERFETTO_UNLIKELY(packet_begin < packets_begin ||
818 packet_begin >= record_end)) {
819 // The producer has a bug or is malicious and did declare that the chunk
820 // contains more packets beyond its boundaries.
821 stats_.set_abi_violations(stats_.abi_violations() + 1);
822 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
823 chunk_meta->cur_fragment_offset = 0;
824 chunk_meta->num_fragments_read = chunk_meta->num_fragments;
825 if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
826 stats_.set_chunks_read(stats_.chunks_read() + 1);
827 stats_.set_bytes_read(stats_.bytes_read() +
828 chunk_meta->chunk_record->size);
829 }
830 return ReadPacketResult::kFailedInvalidPacket;
831 }
832
833 // A packet (or a fragment) starts with a varint stating its size, followed
834 // by its content. The varint shouldn't be larger than 4 bytes (just in case
835 // the producer is using a redundant encoding)
836 uint64_t packet_size = 0;
837 const uint8_t* header_end =
838 std::min(packet_begin + protozero::proto_utils::kMessageLengthFieldSize,
839 record_end);
840 const uint8_t* packet_data = protozero::proto_utils::ParseVarInt(
841 packet_begin, header_end, &packet_size);
842
843 const uint8_t* next_packet = packet_data + packet_size;
844 if (PERFETTO_UNLIKELY(next_packet <= packet_begin ||
845 next_packet > record_end)) {
846 stats_.set_abi_violations(stats_.abi_violations() + 1);
847 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
848 chunk_meta->cur_fragment_offset = 0;
849 chunk_meta->num_fragments_read = chunk_meta->num_fragments;
850 if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
851 stats_.set_chunks_read(stats_.chunks_read() + 1);
852 stats_.set_bytes_read(stats_.bytes_read() +
853 chunk_meta->chunk_record->size);
854 }
855 return ReadPacketResult::kFailedInvalidPacket;
856 }
857
858 chunk_meta->cur_fragment_offset =
859 static_cast<uint16_t>(next_packet - packets_begin);
860 chunk_meta->num_fragments_read++;
861
862 if (PERFETTO_UNLIKELY(chunk_meta->num_fragments_read ==
863 chunk_meta->num_fragments &&
864 chunk_meta->is_complete())) {
865 stats_.set_chunks_read(stats_.chunks_read() + 1);
866 stats_.set_bytes_read(stats_.bytes_read() + chunk_meta->chunk_record->size);
867 }
868
869 chunk_meta->set_last_read_packet_skipped(false);
870
871 if (PERFETTO_UNLIKELY(packet_size == 0))
872 return ReadPacketResult::kFailedEmptyPacket;
873
874 if (PERFETTO_LIKELY(packet))
875 packet->AddSlice(packet_data, static_cast<size_t>(packet_size));
876
877 return ReadPacketResult::kSucceeded;
878 }
879
DiscardWrite()880 void TraceBuffer::DiscardWrite() {
881 PERFETTO_DCHECK(overwrite_policy_ == kDiscard);
882 discard_writes_ = true;
883 stats_.set_chunks_discarded(stats_.chunks_discarded() + 1);
884 TRACE_BUFFER_DLOG(" discarding write");
885 }
886
887 } // namespace perfetto
888