1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/tracing/core/trace_buffer.h"
18
19 #include <sys/mman.h>
20 #include <limits>
21
22 #include "perfetto/base/logging.h"
23 #include "perfetto/protozero/proto_utils.h"
24 #include "perfetto/tracing/core/shared_memory_abi.h"
25 #include "perfetto/tracing/core/trace_packet.h"
26
27 #define TRACE_BUFFER_VERBOSE_LOGGING() 0 // Set to 1 when debugging unittests.
28 #if TRACE_BUFFER_VERBOSE_LOGGING()
29 #define TRACE_BUFFER_DLOG PERFETTO_DLOG
30 namespace {
HexDump(const uint8_t * src,size_t size)31 std::string HexDump(const uint8_t* src, size_t size) {
32 std::string buf;
33 buf.reserve(4096 * 4);
34 char line[64];
35 char* c = line;
36 for (size_t i = 0; i < size; i++) {
37 c += sprintf(c, "%02x ", src[i]);
38 if (i % 16 == 15) {
39 buf.append("\n");
40 buf.append(line);
41 c = line;
42 }
43 }
44 return buf;
45 }
46 } // namespace
47 #else
48 #define TRACE_BUFFER_DLOG(...) void()
49 #endif
50
51 namespace perfetto {
52
53 namespace {
54 constexpr uint8_t kFirstPacketContinuesFromPrevChunk =
55 SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk;
56 constexpr uint8_t kLastPacketContinuesOnNextChunk =
57 SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk;
58 constexpr uint8_t kChunkNeedsPatching =
59 SharedMemoryABI::ChunkHeader::kChunkNeedsPatching;
60 } // namespace.
61
62 constexpr size_t TraceBuffer::ChunkRecord::kMaxSize;
63 constexpr size_t TraceBuffer::InlineChunkHeaderSize = sizeof(ChunkRecord);
64
65 // static
Create(size_t size_in_bytes)66 std::unique_ptr<TraceBuffer> TraceBuffer::Create(size_t size_in_bytes) {
67 std::unique_ptr<TraceBuffer> trace_buffer(new TraceBuffer());
68 if (!trace_buffer->Initialize(size_in_bytes))
69 return nullptr;
70 return trace_buffer;
71 }
72
TraceBuffer()73 TraceBuffer::TraceBuffer() {
74 // See comments in ChunkRecord for the rationale of this.
75 static_assert(sizeof(ChunkRecord) == sizeof(SharedMemoryABI::PageHeader) +
76 sizeof(SharedMemoryABI::ChunkHeader),
77 "ChunkRecord out of sync with the layout of SharedMemoryABI");
78 }
79
80 TraceBuffer::~TraceBuffer() = default;
81
Initialize(size_t size)82 bool TraceBuffer::Initialize(size_t size) {
83 static_assert(
84 base::kPageSize % sizeof(ChunkRecord) == 0,
85 "sizeof(ChunkRecord) must be an integer divider of a page size");
86 PERFETTO_CHECK(size % base::kPageSize == 0);
87 data_ = base::PageAllocator::AllocateMayFail(size);
88 if (!data_) {
89 PERFETTO_ELOG("Trace buffer allocation failed (size: %zu)", size);
90 return false;
91 }
92 size_ = size;
93 max_chunk_size_ = std::min(size, ChunkRecord::kMaxSize);
94 wptr_ = begin();
95 index_.clear();
96 last_chunk_id_.clear();
97 read_iter_ = GetReadIterForSequence(index_.end());
98 return true;
99 }
100
101 // Note: |src| points to a shmem region that is shared with the producer. Assume
102 // that the producer is malicious and will change the content of |src|
103 // while we execute here. Don't do any processing on it other than memcpy().
CopyChunkUntrusted(ProducerID producer_id_trusted,uid_t producer_uid_trusted,WriterID writer_id,ChunkID chunk_id,uint16_t num_fragments,uint8_t chunk_flags,const uint8_t * src,size_t size)104 void TraceBuffer::CopyChunkUntrusted(ProducerID producer_id_trusted,
105 uid_t producer_uid_trusted,
106 WriterID writer_id,
107 ChunkID chunk_id,
108 uint16_t num_fragments,
109 uint8_t chunk_flags,
110 const uint8_t* src,
111 size_t size) {
112 // |record_size| = |size| + sizeof(ChunkRecord), rounded up to avoid to end
113 // up in a fragmented state where size_to_end() < sizeof(ChunkRecord).
114 const size_t record_size =
115 base::AlignUp<sizeof(ChunkRecord)>(size + sizeof(ChunkRecord));
116 if (PERFETTO_UNLIKELY(record_size > max_chunk_size_)) {
117 stats_.abi_violations++;
118 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
119 return;
120 }
121
122 TRACE_BUFFER_DLOG("CopyChunk @ %lu, size=%zu", wptr_ - begin(), record_size);
123
124 #if PERFETTO_DCHECK_IS_ON()
125 changed_since_last_read_ = true;
126 #endif
127
128 // If there isn't enough room from the given write position. Write a padding
129 // record to clear the end of the buffer and wrap back.
130 const size_t cached_size_to_end = size_to_end();
131 if (PERFETTO_UNLIKELY(record_size > cached_size_to_end)) {
132 size_t res = DeleteNextChunksFor(cached_size_to_end);
133 PERFETTO_DCHECK(res <= cached_size_to_end);
134 AddPaddingRecord(cached_size_to_end);
135 wptr_ = begin();
136 stats_.write_wrap_count++;
137 PERFETTO_DCHECK(size_to_end() >= record_size);
138 }
139
140 ChunkRecord record(record_size);
141 record.producer_id = producer_id_trusted;
142 record.chunk_id = chunk_id;
143 record.writer_id = writer_id;
144 record.num_fragments = num_fragments;
145 record.flags = chunk_flags;
146
147 // At this point either |wptr_| points to an untouched part of the buffer
148 // (i.e. *wptr_ == 0) or we are about to overwrite one or more ChunkRecord(s).
149 // In the latter case we need to first figure out where the next valid
150 // ChunkRecord is (if it exists) and add padding between the new record.
151 // Example ((w) == write cursor):
152 //
153 // Initial state (wtpr_ == 0):
154 // |0 (w) |10 |30 |50
155 // +---------+-----------------+--------------------+--------------------+
156 // | Chunk 1 | Chunk 2 | Chunk 3 | Chunk 4 |
157 // +---------+-----------------+--------------------+--------------------+
158 //
159 // Let's assume we now want now write a 5th Chunk of size == 35. The final
160 // state should look like this:
161 // |0 |35 (w) |50
162 // +---------------------------------+---------------+--------------------+
163 // | Chunk 5 | Padding Chunk | Chunk 4 |
164 // +---------------------------------+---------------+--------------------+
165
166 // Deletes all chunks from |wptr_| to |wptr_| + |record_size|.
167 size_t padding_size = DeleteNextChunksFor(record_size);
168
169 // Now first insert the new chunk. At the end, if necessary, add the padding.
170 ChunkMeta::Key key(record);
171 stats_.chunks_written++;
172 stats_.bytes_written += size;
173 auto it_and_inserted =
174 index_.emplace(key, ChunkMeta(GetChunkRecordAt(wptr_), num_fragments,
175 chunk_flags, producer_uid_trusted));
176 if (PERFETTO_UNLIKELY(!it_and_inserted.second)) {
177 // More likely a producer bug, but could also be a malicious producer.
178 stats_.abi_violations++;
179 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
180 index_.erase(it_and_inserted.first);
181 index_.emplace(key, ChunkMeta(GetChunkRecordAt(wptr_), num_fragments,
182 chunk_flags, producer_uid_trusted));
183 }
184 TRACE_BUFFER_DLOG(" copying @ [%lu - %lu] %zu", wptr_ - begin(),
185 wptr_ - begin() + record_size, record_size);
186 WriteChunkRecord(record, src, size);
187 TRACE_BUFFER_DLOG("Chunk raw: %s", HexDump(wptr_, record_size).c_str());
188 wptr_ += record_size;
189 if (wptr_ >= end()) {
190 PERFETTO_DCHECK(padding_size == 0);
191 wptr_ = begin();
192 stats_.write_wrap_count++;
193 }
194 DcheckIsAlignedAndWithinBounds(wptr_);
195
196 last_chunk_id_[std::make_pair(producer_id_trusted, writer_id)] = chunk_id;
197
198 if (padding_size)
199 AddPaddingRecord(padding_size);
200 }
201
DeleteNextChunksFor(size_t bytes_to_clear)202 size_t TraceBuffer::DeleteNextChunksFor(size_t bytes_to_clear) {
203 // Find the position of the first chunk which begins at or after
204 // (|wptr_| + |bytes|). Note that such a chunk might not exist and we might
205 // either reach the end of the buffer or a zeroed region of the buffer.
206 uint8_t* next_chunk_ptr = wptr_;
207 uint8_t* search_end = wptr_ + bytes_to_clear;
208 TRACE_BUFFER_DLOG("Delete [%zu %zu]", wptr_ - begin(), search_end - begin());
209 DcheckIsAlignedAndWithinBounds(wptr_);
210 PERFETTO_DCHECK(search_end <= end());
211 while (next_chunk_ptr < search_end) {
212 const ChunkRecord& next_chunk = *GetChunkRecordAt(next_chunk_ptr);
213 TRACE_BUFFER_DLOG(
214 " scanning chunk [%zu %zu] (valid=%d)", next_chunk_ptr - begin(),
215 next_chunk_ptr - begin() + next_chunk.size, next_chunk.is_valid());
216
217 // We just reached the untouched part of the buffer, it's going to be all
218 // zeroes from here to end().
219 // TODO(primiano): optimization: if during Initialize() we fill the buffer
220 // with padding records we could get rid of this branch.
221 if (PERFETTO_UNLIKELY(!next_chunk.is_valid())) {
222 // This should happen only at the first iteration. The zeroed area can
223 // only begin precisely at the |wptr_|, not after. Otherwise it means that
224 // we wrapped but screwed up the ChunkRecord chain.
225 PERFETTO_DCHECK(next_chunk_ptr == wptr_);
226 return 0;
227 }
228
229 // Remove |next_chunk| from the index, unless it's a padding record (padding
230 // records are not part of the index).
231 if (PERFETTO_LIKELY(!next_chunk.is_padding)) {
232 ChunkMeta::Key key(next_chunk);
233 auto it = index_.find(key);
234 bool removed = false;
235 if (PERFETTO_LIKELY(it != index_.end())) {
236 const ChunkMeta& meta = it->second;
237 if (PERFETTO_UNLIKELY(meta.num_fragments_read < meta.num_fragments))
238 stats_.chunks_overwritten++;
239 index_.erase(it);
240 removed = true;
241 }
242 TRACE_BUFFER_DLOG(" del index {%" PRIu32 ",%" PRIu32
243 ",%u} @ [%lu - %lu] %zu",
244 key.producer_id, key.writer_id, key.chunk_id,
245 next_chunk_ptr - begin(),
246 next_chunk_ptr - begin() + next_chunk.size, removed);
247 PERFETTO_DCHECK(removed);
248 }
249
250 next_chunk_ptr += next_chunk.size;
251
252 // We should never hit this, unless we managed to screw up while writing
253 // to the buffer and breaking the ChunkRecord(s) chain.
254 // TODO(primiano): Write more meaningful logging with the status of the
255 // buffer, to get more actionable bugs in case we hit this.
256 PERFETTO_CHECK(next_chunk_ptr <= end());
257 }
258 PERFETTO_DCHECK(next_chunk_ptr >= search_end && next_chunk_ptr <= end());
259 return static_cast<size_t>(next_chunk_ptr - search_end);
260 }
261
AddPaddingRecord(size_t size)262 void TraceBuffer::AddPaddingRecord(size_t size) {
263 PERFETTO_DCHECK(size >= sizeof(ChunkRecord) && size <= ChunkRecord::kMaxSize);
264 ChunkRecord record(size);
265 record.is_padding = 1;
266 TRACE_BUFFER_DLOG("AddPaddingRecord @ [%lu - %lu] %zu", wptr_ - begin(),
267 wptr_ - begin() + size, size);
268 WriteChunkRecord(record, nullptr, size - sizeof(ChunkRecord));
269 // |wptr_| is deliberately not advanced when writing a padding record.
270 }
271
TryPatchChunkContents(ProducerID producer_id,WriterID writer_id,ChunkID chunk_id,const Patch * patches,size_t patches_size,bool other_patches_pending)272 bool TraceBuffer::TryPatchChunkContents(ProducerID producer_id,
273 WriterID writer_id,
274 ChunkID chunk_id,
275 const Patch* patches,
276 size_t patches_size,
277 bool other_patches_pending) {
278 ChunkMeta::Key key(producer_id, writer_id, chunk_id);
279 auto it = index_.find(key);
280 if (it == index_.end()) {
281 stats_.patches_failed++;
282 return false;
283 }
284 ChunkMeta& chunk_meta = it->second;
285
286 // Check that the index is consistent with the actual ProducerID/WriterID
287 // stored in the ChunkRecord.
288 PERFETTO_DCHECK(ChunkMeta::Key(*chunk_meta.chunk_record) == key);
289 uint8_t* chunk_begin = reinterpret_cast<uint8_t*>(chunk_meta.chunk_record);
290 PERFETTO_DCHECK(chunk_begin >= begin());
291 uint8_t* chunk_end = chunk_begin + chunk_meta.chunk_record->size;
292 PERFETTO_DCHECK(chunk_end <= end());
293
294 static_assert(Patch::kSize == SharedMemoryABI::kPacketHeaderSize,
295 "Patch::kSize out of sync with SharedMemoryABI");
296
297 for (size_t i = 0; i < patches_size; i++) {
298 uint8_t* ptr =
299 chunk_begin + sizeof(ChunkRecord) + patches[i].offset_untrusted;
300 TRACE_BUFFER_DLOG("PatchChunk {%" PRIu32 ",%" PRIu32
301 ",%u} size=%zu @ %zu with {%02x %02x %02x %02x} cur "
302 "{%02x %02x %02x %02x}",
303 producer_id, writer_id, chunk_id, chunk_end - chunk_begin,
304 patches[i].offset_untrusted, patches[i].data[0],
305 patches[i].data[1], patches[i].data[2],
306 patches[i].data[3], ptr[0], ptr[1], ptr[2], ptr[3]);
307 if (ptr < chunk_begin + sizeof(ChunkRecord) ||
308 ptr > chunk_end - Patch::kSize) {
309 // Either the IPC was so slow and in the meantime the writer managed to
310 // wrap over |chunk_id| or the producer sent a malicious IPC.
311 stats_.patches_failed++;
312 return false;
313 }
314
315 // DCHECK that we are writing into a zero-filled size field and not into
316 // valid data. It relies on ScatteredStreamWriter::ReserveBytes() to
317 // zero-fill reservations in debug builds.
318 char zero[Patch::kSize]{};
319 PERFETTO_DCHECK(memcmp(ptr, &zero, Patch::kSize) == 0);
320
321 memcpy(ptr, &patches[i].data[0], Patch::kSize);
322 }
323 TRACE_BUFFER_DLOG(
324 "Chunk raw (after patch): %s",
325 HexDump(chunk_begin, chunk_meta.chunk_record->size).c_str());
326
327 stats_.patches_succeeded += patches_size;
328 if (!other_patches_pending) {
329 chunk_meta.flags &= ~kChunkNeedsPatching;
330 chunk_meta.chunk_record->flags = chunk_meta.flags;
331 }
332 return true;
333 }
334
BeginRead()335 void TraceBuffer::BeginRead() {
336 read_iter_ = GetReadIterForSequence(index_.begin());
337 #if PERFETTO_DCHECK_IS_ON()
338 changed_since_last_read_ = false;
339 #endif
340 }
341
GetReadIterForSequence(ChunkMap::iterator seq_begin)342 TraceBuffer::SequenceIterator TraceBuffer::GetReadIterForSequence(
343 ChunkMap::iterator seq_begin) {
344 SequenceIterator iter;
345 iter.seq_begin = seq_begin;
346 if (seq_begin == index_.end()) {
347 iter.cur = iter.seq_end = index_.end();
348 return iter;
349 }
350
351 #if PERFETTO_DCHECK_IS_ON()
352 // Either |seq_begin| is == index_.begin() or the item immediately before must
353 // belong to a different {ProducerID, WriterID} sequence.
354 if (seq_begin != index_.begin() && seq_begin != index_.end()) {
355 auto prev_it = seq_begin;
356 prev_it--;
357 PERFETTO_DCHECK(
358 seq_begin == index_.begin() ||
359 std::tie(prev_it->first.producer_id, prev_it->first.writer_id) <
360 std::tie(seq_begin->first.producer_id, seq_begin->first.writer_id));
361 }
362 #endif
363
364 // Find the first entry that has a greater {ProducerID, WriterID} (or just
365 // index_.end() if we reached the end).
366 ChunkMeta::Key key = seq_begin->first; // Deliberate copy.
367 key.chunk_id = kMaxChunkID;
368 iter.seq_end = index_.upper_bound(key);
369 PERFETTO_DCHECK(iter.seq_begin != iter.seq_end);
370
371 // Now find the first entry between [seq_begin, seq_end) that is
372 // > last_chunk_id_. This is where we the sequence will start (see notes about
373 // wrapping in the header).
374 auto producer_and_writer_id = std::make_pair(key.producer_id, key.writer_id);
375 PERFETTO_DCHECK(last_chunk_id_.count(producer_and_writer_id));
376 iter.wrapping_id = last_chunk_id_[producer_and_writer_id];
377 key.chunk_id = iter.wrapping_id;
378 iter.cur = index_.upper_bound(key);
379 if (iter.cur == iter.seq_end)
380 iter.cur = iter.seq_begin;
381 return iter;
382 }
383
MoveNext()384 void TraceBuffer::SequenceIterator::MoveNext() {
385 // Note: |seq_begin| might be == |seq_end|.
386 if (cur == seq_end || cur->first.chunk_id == wrapping_id) {
387 cur = seq_end;
388 return;
389 }
390 if (++cur == seq_end)
391 cur = seq_begin;
392 }
393
ReadNextTracePacket(TracePacket * packet,uid_t * producer_uid)394 bool TraceBuffer::ReadNextTracePacket(TracePacket* packet,
395 uid_t* producer_uid) {
396 // Note: MoveNext() moves only within the next chunk within the same
397 // {ProducerID, WriterID} sequence. Here we want to:
398 // - return the next patched+complete packet in the current sequence, if any.
399 // - return the first patched+complete packet in the next sequence, if any.
400 // - return false if none of the above is found.
401 TRACE_BUFFER_DLOG("ReadNextTracePacket()");
402
403 // Just in case we forget to initialize it below.
404 *producer_uid = kInvalidUid;
405
406 #if PERFETTO_DCHECK_IS_ON()
407 PERFETTO_DCHECK(!changed_since_last_read_);
408 #endif
409 for (;; read_iter_.MoveNext()) {
410 if (PERFETTO_UNLIKELY(!read_iter_.is_valid())) {
411 // We ran out of chunks in the current {ProducerID, WriterID} sequence or
412 // we just reached the index_.end().
413
414 if (PERFETTO_UNLIKELY(read_iter_.seq_end == index_.end()))
415 return false;
416
417 // We reached the end of sequence, move to the next one.
418 // Note: ++read_iter_.seq_end might become index_.end(), but
419 // GetReadIterForSequence() knows how to deal with that.
420 read_iter_ = GetReadIterForSequence(read_iter_.seq_end);
421 PERFETTO_DCHECK(read_iter_.is_valid() && read_iter_.cur != index_.end());
422 }
423
424 ChunkMeta* chunk_meta = &*read_iter_;
425
426 // If the chunk has holes that are awaiting to be patched out-of-band,
427 // skip the current sequence and move to the next one.
428 if (chunk_meta->flags & kChunkNeedsPatching) {
429 read_iter_.MoveToEnd();
430 continue;
431 }
432
433 const uid_t trusted_uid = chunk_meta->trusted_uid;
434
435 // At this point we have a chunk in |chunk_meta| that has not been fully
436 // read. We don't know yet whether we have enough data to read the full
437 // packet (in the case it's fragmented over several chunks) and we are about
438 // to find that out. Specifically:
439 // A) If the first fragment is unread and is a fragment continuing from a
440 // previous chunk, it means we have missed the previous ChunkID. In
441 // fact, if this wasn't the case, a previous call to ReadNext() shouldn't
442 // have moved the cursor to this chunk.
443 // B) Any fragment > 0 && < last is always readable. By definition an inner
444 // packet is never fragmented and hence doesn't require neither stitching
445 // nor any out-of-band patching. The same applies to the last packet
446 // iff it doesn't continue on the next chunk.
447 // C) If the last packet (which might be also the only packet in the chunk)
448 // is a fragment and continues on the next chunk, we peek at the next
449 // chunks and, if we have all of them, mark as read and move the cursor.
450 //
451 // +---------------+ +-------------------+ +---------------+
452 // | ChunkID: 1 | | ChunkID: 2 | | ChunkID: 3 |
453 // |---------------+ +-------------------+ +---------------+
454 // | Packet 1 | | | | ... Packet 3 |
455 // | Packet 2 | | ... Packet 3 ... | | Packet 4 |
456 // | Packet 3 ... | | | | Packet 5 ... |
457 // +---------------+ +-------------------+ +---------------+
458
459 PERFETTO_DCHECK(chunk_meta->num_fragments_read <=
460 chunk_meta->num_fragments);
461 while (chunk_meta->num_fragments_read < chunk_meta->num_fragments) {
462 enum { kSkip = 0, kReadOnePacket, kTryReadAhead } action;
463 if (chunk_meta->num_fragments_read == 0) {
464 if (chunk_meta->flags & kFirstPacketContinuesFromPrevChunk) {
465 action = kSkip; // Case A.
466 } else if (chunk_meta->num_fragments == 1 &&
467 (chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
468 action = kTryReadAhead; // Case C.
469 } else {
470 action = kReadOnePacket; // Case B.
471 }
472 } else if (chunk_meta->num_fragments_read <
473 chunk_meta->num_fragments - 1 ||
474 !(chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
475 action = kReadOnePacket; // Case B.
476 } else {
477 action = kTryReadAhead; // Case C.
478 }
479
480 TRACE_BUFFER_DLOG(" chunk %u, packet %hu of %hu, action=%d",
481 read_iter_.chunk_id(), chunk_meta->num_fragments_read,
482 chunk_meta->num_fragments, action);
483
484 if (action == kSkip) {
485 // This fragment will be skipped forever, not just in this ReadPacket()
486 // iteration. This happens by virtue of ReadNextPacketInChunk()
487 // incrementing the |num_fragments_read| and marking the fragment as
488 // read even if we didn't really.
489 ReadNextPacketInChunk(chunk_meta, nullptr);
490 continue;
491 }
492
493 if (action == kReadOnePacket) {
494 // The easy peasy case B.
495 if (PERFETTO_LIKELY(ReadNextPacketInChunk(chunk_meta, packet))) {
496 *producer_uid = trusted_uid;
497 return true;
498 }
499
500 // In extremely rare cases (producer bugged / malicious) the chunk might
501 // contain an invalid fragment. In such case we don't want to stall the
502 // sequence but just skip the chunk and move on.
503 stats_.abi_violations++;
504 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
505 break;
506 }
507
508 PERFETTO_DCHECK(action == kTryReadAhead);
509 ReadAheadResult ra_res = ReadAhead(packet);
510 if (ra_res == ReadAheadResult::kSucceededReturnSlices) {
511 stats_.readaheads_succeeded++;
512 *producer_uid = trusted_uid;
513 return true;
514 }
515
516 if (ra_res == ReadAheadResult::kFailedMoveToNextSequence) {
517 // readahead didn't find a contigous packet sequence. We'll try again
518 // on the next ReadPacket() call.
519 stats_.readaheads_failed++;
520
521 // TODO(primiano): optimization: this MoveToEnd() is the reason why
522 // MoveNext() (that is called in the outer for(;;MoveNext)) needs to
523 // deal gracefully with the case of |cur|==|seq_end|. Maybe we can do
524 // something to avoid that check by reshuffling the code here?
525 read_iter_.MoveToEnd();
526
527 // This break will go back to beginning of the for(;;MoveNext()). That
528 // will move to the next sequence because we set the read iterator to
529 // its end.
530 break;
531 }
532
533 PERFETTO_DCHECK(ra_res == ReadAheadResult::kFailedStayOnSameSequence);
534
535 // In this case ReadAhead() might advance |read_iter_|, so we need to
536 // re-cache the |chunk_meta| pointer to point to the current chunk.
537 chunk_meta = &*read_iter_;
538 } // while(...) [iterate over packet fragments for the current chunk].
539 } // for(;;MoveNext()) [iterate over chunks].
540 }
541
ReadAhead(TracePacket * packet)542 TraceBuffer::ReadAheadResult TraceBuffer::ReadAhead(TracePacket* packet) {
543 static_assert(static_cast<ChunkID>(kMaxChunkID + 1) == 0,
544 "relying on kMaxChunkID to wrap naturally");
545 TRACE_BUFFER_DLOG(" readahead start @ chunk %u", read_iter_.chunk_id());
546 ChunkID next_chunk_id = read_iter_.chunk_id() + 1;
547 SequenceIterator it = read_iter_;
548 for (it.MoveNext(); it.is_valid(); it.MoveNext(), next_chunk_id++) {
549 // We should stay within the same sequence while iterating here.
550 PERFETTO_DCHECK(it.producer_id() == read_iter_.producer_id() &&
551 it.writer_id() == read_iter_.writer_id());
552
553 TRACE_BUFFER_DLOG(" expected chunk ID: %u, actual ID: %u", next_chunk_id,
554 it.chunk_id());
555
556 if (PERFETTO_UNLIKELY((*it).num_fragments == 0))
557 continue;
558
559 // If we miss the next chunk, stop looking in the current sequence and
560 // try another sequence. This chunk might come in the near future.
561 // The second condition is the edge case of a buggy/malicious
562 // producer. The ChunkID is contiguous but its flags don't make sense.
563 if (it.chunk_id() != next_chunk_id ||
564 PERFETTO_UNLIKELY(
565 !((*it).flags & kFirstPacketContinuesFromPrevChunk))) {
566 return ReadAheadResult::kFailedMoveToNextSequence;
567 }
568
569 // If the chunk is contiguous but has not been patched yet move to the next
570 // sequence and try coming back here on the next ReadNextTracePacket() call.
571 // TODO(primiano): add a test to cover this, it's a subtle case.
572 if ((*it).flags & kChunkNeedsPatching)
573 return ReadAheadResult::kFailedMoveToNextSequence;
574
575 // This is the case of an intermediate chunk which contains only one
576 // fragment which continues on the next chunk. This is the case for large
577 // packets, e.g.: [Packet0, Packet1(0)] [Packet1(1)] [Packet1(2), ...]
578 // (Packet1(X) := fragment X of Packet1).
579 if ((*it).num_fragments == 1 &&
580 ((*it).flags & kLastPacketContinuesOnNextChunk)) {
581 continue;
582 }
583
584 // We made it! We got all fragments for the packet without holes.
585 TRACE_BUFFER_DLOG(" readahead success @ chunk %u", it.chunk_id());
586 PERFETTO_DCHECK(((*it).num_fragments == 1 &&
587 !((*it).flags & kLastPacketContinuesOnNextChunk)) ||
588 (*it).num_fragments > 1);
589
590 // Now let's re-iterate over the [read_iter_, it] sequence and mark
591 // all the fragments as read.
592 bool packet_corruption = false;
593 for (;;) {
594 PERFETTO_DCHECK(read_iter_.is_valid());
595 TRACE_BUFFER_DLOG(" commit chunk %u", read_iter_.chunk_id());
596 if (PERFETTO_LIKELY((*read_iter_).num_fragments > 0)) {
597 // In the unlikely case of a corrupted packet, invalidate the all
598 // stitching and move on to the next chunk in the same sequence,
599 // if any.
600 packet_corruption |= !ReadNextPacketInChunk(&*read_iter_, packet);
601 }
602 if (read_iter_.cur == it.cur)
603 break;
604 read_iter_.MoveNext();
605 } // for(;;)
606 PERFETTO_DCHECK(read_iter_.cur == it.cur);
607
608 if (PERFETTO_UNLIKELY(packet_corruption)) {
609 stats_.abi_violations++;
610 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
611 *packet = TracePacket(); // clear.
612 return ReadAheadResult::kFailedStayOnSameSequence;
613 }
614
615 return ReadAheadResult::kSucceededReturnSlices;
616 } // for(it...) [readahead loop]
617 return ReadAheadResult::kFailedMoveToNextSequence;
618 }
619
ReadNextPacketInChunk(ChunkMeta * chunk_meta,TracePacket * packet)620 bool TraceBuffer::ReadNextPacketInChunk(ChunkMeta* chunk_meta,
621 TracePacket* packet) {
622 PERFETTO_DCHECK(chunk_meta->num_fragments_read < chunk_meta->num_fragments);
623 PERFETTO_DCHECK(!(chunk_meta->flags & kChunkNeedsPatching));
624
625 const uint8_t* record_begin =
626 reinterpret_cast<const uint8_t*>(chunk_meta->chunk_record);
627 const uint8_t* record_end = record_begin + chunk_meta->chunk_record->size;
628 const uint8_t* packets_begin = record_begin + sizeof(ChunkRecord);
629 const uint8_t* packet_begin = packets_begin + chunk_meta->cur_fragment_offset;
630
631 if (PERFETTO_UNLIKELY(packet_begin < packets_begin ||
632 packet_begin >= record_end)) {
633 // The producer has a bug or is malicious and did declare that the chunk
634 // contains more packets beyond its boundaries.
635 stats_.abi_violations++;
636 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
637 return false;
638 }
639
640 // A packet (or a fragment) starts with a varint stating its size, followed
641 // by its content. The varint shouldn't be larger than 4 bytes (just in case
642 // the producer is using a redundant encoding)
643 uint64_t packet_size = 0;
644 const uint8_t* header_end =
645 std::min(packet_begin + protozero::proto_utils::kMessageLengthFieldSize,
646 record_end);
647 const uint8_t* packet_data = protozero::proto_utils::ParseVarInt(
648 packet_begin, header_end, &packet_size);
649
650 const uint8_t* next_packet = packet_data + packet_size;
651 if (PERFETTO_UNLIKELY(next_packet <= packet_begin ||
652 next_packet > record_end)) {
653 stats_.abi_violations++;
654 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
655 chunk_meta->cur_fragment_offset = 0;
656 chunk_meta->num_fragments_read = chunk_meta->num_fragments;
657 return false;
658 }
659 chunk_meta->cur_fragment_offset =
660 static_cast<uint16_t>(next_packet - packets_begin);
661 chunk_meta->num_fragments_read++;
662
663 if (PERFETTO_UNLIKELY(packet_size == 0)) {
664 stats_.abi_violations++;
665 PERFETTO_DCHECK(suppress_sanity_dchecks_for_testing_);
666 return false;
667 }
668
669 if (PERFETTO_LIKELY(packet))
670 packet->AddSlice(packet_data, static_cast<size_t>(packet_size));
671
672 return true;
673 }
674
675 } // namespace perfetto
676