• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/tracing/service/trace_buffer.h"
18 
19 #include <limits>
20 
21 #include "perfetto/base/logging.h"
22 #include "perfetto/ext/base/utils.h"
23 #include "perfetto/ext/tracing/core/client_identity.h"
24 #include "perfetto/ext/tracing/core/shared_memory_abi.h"
25 #include "perfetto/ext/tracing/core/trace_packet.h"
26 #include "perfetto/protozero/proto_utils.h"
27 
28 #define TRACE_BUFFER_VERBOSE_LOGGING() 0  // Set to 1 when debugging unittests.
29 #if TRACE_BUFFER_VERBOSE_LOGGING()
30 #define TRACE_BUFFER_DLOG PERFETTO_DLOG
31 #else
32 #define TRACE_BUFFER_DLOG(...) void()
33 #endif
34 
35 namespace perfetto {
36 
37 namespace {
38 constexpr uint8_t kFirstPacketContinuesFromPrevChunk =
39     SharedMemoryABI::ChunkHeader::kFirstPacketContinuesFromPrevChunk;
40 constexpr uint8_t kLastPacketContinuesOnNextChunk =
41     SharedMemoryABI::ChunkHeader::kLastPacketContinuesOnNextChunk;
42 constexpr uint8_t kChunkNeedsPatching =
43     SharedMemoryABI::ChunkHeader::kChunkNeedsPatching;
44 }  // namespace.
45 
46 const size_t TraceBuffer::InlineChunkHeaderSize = sizeof(ChunkRecord);
47 
48 // static
Create(size_t size_in_bytes,OverwritePolicy pol)49 std::unique_ptr<TraceBuffer> TraceBuffer::Create(size_t size_in_bytes,
50                                                  OverwritePolicy pol) {
51   std::unique_ptr<TraceBuffer> trace_buffer(new TraceBuffer(pol));
52   if (!trace_buffer->Initialize(size_in_bytes))
53     return nullptr;
54   return trace_buffer;
55 }
56 
TraceBuffer(OverwritePolicy pol)57 TraceBuffer::TraceBuffer(OverwritePolicy pol) : overwrite_policy_(pol) {
58   // See comments in ChunkRecord for the rationale of this.
59   static_assert(sizeof(ChunkRecord) == sizeof(SharedMemoryABI::PageHeader) +
60                                            sizeof(SharedMemoryABI::ChunkHeader),
61                 "ChunkRecord out of sync with the layout of SharedMemoryABI");
62 }
63 
64 TraceBuffer::~TraceBuffer() = default;
65 
Initialize(size_t size)66 bool TraceBuffer::Initialize(size_t size) {
67   static_assert(
68       SharedMemoryABI::kMinPageSize % sizeof(ChunkRecord) == 0,
69       "sizeof(ChunkRecord) must be an integer divider of a page size");
70   auto max_size = std::numeric_limits<decltype(ChunkMeta::record_off)>::max();
71   PERFETTO_CHECK(size <= static_cast<size_t>(max_size));
72   data_ = base::PagedMemory::Allocate(
73       size, base::PagedMemory::kMayFail | base::PagedMemory::kDontCommit);
74   if (!data_.IsValid()) {
75     PERFETTO_ELOG("Trace buffer allocation failed (size: %zu)", size);
76     return false;
77   }
78   size_ = size;
79   used_size_ = 0;
80   stats_.set_buffer_size(size);
81   max_chunk_size_ = std::min(size, ChunkRecord::kMaxSize);
82   wptr_ = begin();
83   index_.clear();
84   last_chunk_id_written_.clear();
85   read_iter_ = GetReadIterForSequence(index_.end());
86   return true;
87 }
88 
89 // Note: |src| points to a shmem region that is shared with the producer. Assume
90 // that the producer is malicious and will change the content of |src|
91 // while we execute here. Don't do any processing on it other than memcpy().
CopyChunkUntrusted(ProducerID producer_id_trusted,const ClientIdentity & client_identity_trusted,WriterID writer_id,ChunkID chunk_id,uint16_t num_fragments,uint8_t chunk_flags,bool chunk_complete,const uint8_t * src,size_t size)92 void TraceBuffer::CopyChunkUntrusted(
93     ProducerID producer_id_trusted,
94     const ClientIdentity& client_identity_trusted,
95     WriterID writer_id,
96     ChunkID chunk_id,
97     uint16_t num_fragments,
98     uint8_t chunk_flags,
99     bool chunk_complete,
100     const uint8_t* src,
101     size_t size) {
102   PERFETTO_CHECK(!read_only_);
103 
104   // |record_size| = |size| + sizeof(ChunkRecord), rounded up to avoid to end
105   // up in a fragmented state where size_to_end() < sizeof(ChunkRecord).
106   const size_t record_size =
107       base::AlignUp<sizeof(ChunkRecord)>(size + sizeof(ChunkRecord));
108   TRACE_BUFFER_DLOG("CopyChunk @ %" PRIdPTR ", size=%zu", wptr_ - begin(),
109                     record_size);
110   if (PERFETTO_UNLIKELY(record_size > max_chunk_size_)) {
111     stats_.set_abi_violations(stats_.abi_violations() + 1);
112     PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
113     return;
114   }
115 
116   has_data_ = true;
117 #if PERFETTO_DCHECK_IS_ON()
118   changed_since_last_read_ = true;
119 #endif
120 
121   // If the chunk hasn't been completed, we should only consider the first
122   // |num_fragments - 1| packets complete. For simplicity, we simply disregard
123   // the last one when we copy the chunk.
124   if (PERFETTO_UNLIKELY(!chunk_complete)) {
125     if (num_fragments > 0) {
126       num_fragments--;
127       // These flags should only affect the last packet in the chunk. We clear
128       // them, so that TraceBuffer is able to look at the remaining packets in
129       // this chunk.
130       chunk_flags &= ~kLastPacketContinuesOnNextChunk;
131       chunk_flags &= ~kChunkNeedsPatching;
132     }
133   }
134 
135   ChunkRecord record(record_size);
136   record.producer_id = producer_id_trusted;
137   record.chunk_id = chunk_id;
138   record.writer_id = writer_id;
139   record.num_fragments = num_fragments;
140   record.flags = chunk_flags & ChunkRecord::kFlagsBitMask;
141   ChunkMeta::Key key(record);
142 
143   // Check whether we have already copied the same chunk previously. This may
144   // happen if the service scrapes chunks in a potentially incomplete state
145   // before receiving commit requests for them from the producer. Note that the
146   // service may scrape and thus override chunks in arbitrary order since the
147   // chunks aren't ordered in the SMB.
148   const auto it = index_.find(key);
149   if (PERFETTO_UNLIKELY(it != index_.end())) {
150     ChunkMeta* record_meta = &it->second;
151     ChunkRecord* prev = GetChunkRecordAt(begin() + record_meta->record_off);
152 
153     // Verify that the old chunk's metadata corresponds to the new one.
154     // Overridden chunks should never change size, since the page layout is
155     // fixed per writer. The number of fragments should also never decrease and
156     // flags should not be removed.
157     if (PERFETTO_UNLIKELY(ChunkMeta::Key(*prev) != key ||
158                           prev->size != record_size ||
159                           prev->num_fragments > num_fragments ||
160                           (prev->flags & chunk_flags) != prev->flags)) {
161       stats_.set_abi_violations(stats_.abi_violations() + 1);
162       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
163       return;
164     }
165 
166     // If this chunk was previously copied with the same number of fragments and
167     // the number didn't change, there's no need to copy it again. If the
168     // previous chunk was complete already, this should always be the case.
169     PERFETTO_DCHECK(suppress_client_dchecks_for_testing_ ||
170                     !record_meta->is_complete() ||
171                     (chunk_complete && prev->num_fragments == num_fragments));
172     if (prev->num_fragments == num_fragments) {
173       TRACE_BUFFER_DLOG("  skipping recommit of identical chunk");
174       return;
175     }
176 
177     // If we've already started reading from chunk N+1 following this chunk N,
178     // don't override chunk N. Otherwise we may end up reading a packet from
179     // chunk N after having read from chunk N+1, thereby violating sequential
180     // read of packets. This shouldn't happen if the producer is well-behaved,
181     // because it shouldn't start chunk N+1 before completing chunk N.
182     ChunkMeta::Key subsequent_key = key;
183     static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
184                   "ChunkID wraps");
185     subsequent_key.chunk_id++;
186     const auto subsequent_it = index_.find(subsequent_key);
187     if (subsequent_it != index_.end() &&
188         subsequent_it->second.num_fragments_read > 0) {
189       stats_.set_abi_violations(stats_.abi_violations() + 1);
190       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
191       return;
192     }
193 
194     // We should not have read past the last packet.
195     if (record_meta->num_fragments_read > prev->num_fragments) {
196       PERFETTO_ELOG(
197           "TraceBuffer read too many fragments from an incomplete chunk");
198       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
199       return;
200     }
201 
202     uint8_t* wptr = reinterpret_cast<uint8_t*>(prev);
203     TRACE_BUFFER_DLOG("  overriding chunk @ %" PRIdPTR ", size=%zu",
204                       wptr - begin(), record_size);
205 
206     // Update chunk meta data stored in the index, as it may have changed.
207     record_meta->num_fragments = num_fragments;
208     record_meta->flags = chunk_flags;
209     record_meta->set_complete(chunk_complete);
210 
211     // Override the ChunkRecord contents at the original |wptr|.
212     TRACE_BUFFER_DLOG("  copying @ [%" PRIdPTR " - %" PRIdPTR "] %zu",
213                       wptr - begin(), uintptr_t(wptr - begin()) + record_size,
214                       record_size);
215     WriteChunkRecord(wptr, record, src, size);
216     TRACE_BUFFER_DLOG("Chunk raw: %s",
217                       base::HexDump(wptr, record_size).c_str());
218     stats_.set_chunks_rewritten(stats_.chunks_rewritten() + 1);
219     return;
220   }
221 
222   if (PERFETTO_UNLIKELY(discard_writes_))
223     return DiscardWrite();
224 
225   // If there isn't enough room from the given write position. Write a padding
226   // record to clear the end of the buffer and wrap back.
227   const size_t cached_size_to_end = size_to_end();
228   if (PERFETTO_UNLIKELY(record_size > cached_size_to_end)) {
229     ssize_t res = DeleteNextChunksFor(cached_size_to_end);
230     if (res == -1)
231       return DiscardWrite();
232     PERFETTO_DCHECK(static_cast<size_t>(res) <= cached_size_to_end);
233     AddPaddingRecord(cached_size_to_end);
234     wptr_ = begin();
235     stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
236     PERFETTO_DCHECK(size_to_end() >= record_size);
237   }
238 
239   // At this point either |wptr_| points to an untouched part of the buffer
240   // (i.e. *wptr_ == 0) or we are about to overwrite one or more ChunkRecord(s).
241   // In the latter case we need to first figure out where the next valid
242   // ChunkRecord is (if it exists) and add padding between the new record.
243   // Example ((w) == write cursor):
244   //
245   // Initial state (wtpr_ == 0):
246   // |0 (w)    |10               |30                  |50
247   // +---------+-----------------+--------------------+--------------------+
248   // | Chunk 1 | Chunk 2         | Chunk 3            | Chunk 4            |
249   // +---------+-----------------+--------------------+--------------------+
250   //
251   // Let's assume we now want now write a 5th Chunk of size == 35. The final
252   // state should look like this:
253   // |0                                |35 (w)         |50
254   // +---------------------------------+---------------+--------------------+
255   // | Chunk 5                         | Padding Chunk | Chunk 4            |
256   // +---------------------------------+---------------+--------------------+
257 
258   // Deletes all chunks from |wptr_| to |wptr_| + |record_size|.
259   ssize_t del_res = DeleteNextChunksFor(record_size);
260   if (del_res == -1)
261     return DiscardWrite();
262   size_t padding_size = static_cast<size_t>(del_res);
263 
264   // Now first insert the new chunk. At the end, if necessary, add the padding.
265   stats_.set_chunks_written(stats_.chunks_written() + 1);
266   stats_.set_bytes_written(stats_.bytes_written() + record_size);
267 
268   uint32_t chunk_off = GetOffset(GetChunkRecordAt(wptr_));
269   auto it_and_inserted =
270       index_.emplace(key, ChunkMeta(chunk_off, num_fragments, chunk_complete,
271                                     chunk_flags, client_identity_trusted));
272   PERFETTO_DCHECK(it_and_inserted.second);
273   TRACE_BUFFER_DLOG("  copying @ [%" PRIdPTR " - %" PRIdPTR "] %zu",
274                     wptr_ - begin(), uintptr_t(wptr_ - begin()) + record_size,
275                     record_size);
276   WriteChunkRecord(wptr_, record, src, size);
277   TRACE_BUFFER_DLOG("Chunk raw: %s", base::HexDump(wptr_, record_size).c_str());
278   wptr_ += record_size;
279   if (wptr_ >= end()) {
280     PERFETTO_DCHECK(padding_size == 0);
281     wptr_ = begin();
282     stats_.set_write_wrap_count(stats_.write_wrap_count() + 1);
283   }
284   DcheckIsAlignedAndWithinBounds(wptr_);
285 
286   // Chunks may be received out of order, so only update last_chunk_id if the
287   // new chunk_id is larger. But take into account overflows by only selecting
288   // the new ID if its distance to the latest ID is smaller than half the number
289   // space.
290   //
291   // This accounts for both the case where the new ID has just overflown and
292   // last_chunk_id be updated even though it's smaller (e.g. |chunk_id| = 1 and
293   // |last_chunk_id| = kMaxChunkId; chunk_id - last_chunk_id = 0) and the case
294   // where the new ID is an out-of-order ID right after an overflow and
295   // last_chunk_id shouldn't be updated even though it's larger (e.g. |chunk_id|
296   // = kMaxChunkId and |last_chunk_id| = 1; chunk_id - last_chunk_id =
297   // kMaxChunkId - 1).
298   auto producer_and_writer_id = std::make_pair(producer_id_trusted, writer_id);
299   ChunkID& last_chunk_id = last_chunk_id_written_[producer_and_writer_id];
300   static_assert(std::numeric_limits<ChunkID>::max() == kMaxChunkID,
301                 "This code assumes that ChunkID wraps at kMaxChunkID");
302   if (chunk_id - last_chunk_id < kMaxChunkID / 2) {
303     last_chunk_id = chunk_id;
304   } else {
305     stats_.set_chunks_committed_out_of_order(
306         stats_.chunks_committed_out_of_order() + 1);
307   }
308 
309   if (padding_size)
310     AddPaddingRecord(padding_size);
311 }
312 
DeleteNextChunksFor(size_t bytes_to_clear)313 ssize_t TraceBuffer::DeleteNextChunksFor(size_t bytes_to_clear) {
314   PERFETTO_CHECK(!discard_writes_);
315 
316   // Find the position of the first chunk which begins at or after
317   // (|wptr_| + |bytes|). Note that such a chunk might not exist and we might
318   // either reach the end of the buffer or a zeroed region of the buffer.
319   uint8_t* next_chunk_ptr = wptr_;
320   uint8_t* search_end = wptr_ + bytes_to_clear;
321   TRACE_BUFFER_DLOG("Delete [%zu %zu]", wptr_ - begin(), search_end - begin());
322   DcheckIsAlignedAndWithinBounds(wptr_);
323   PERFETTO_DCHECK(search_end <= end());
324   std::vector<ChunkMap::iterator> index_delete;
325   uint64_t chunks_overwritten = stats_.chunks_overwritten();
326   uint64_t bytes_overwritten = stats_.bytes_overwritten();
327   uint64_t padding_bytes_cleared = stats_.padding_bytes_cleared();
328   while (next_chunk_ptr < search_end) {
329     const ChunkRecord& next_chunk = *GetChunkRecordAt(next_chunk_ptr);
330     TRACE_BUFFER_DLOG(
331         "  scanning chunk [%zu %zu] (valid=%d)", next_chunk_ptr - begin(),
332         next_chunk_ptr - begin() + next_chunk.size, next_chunk.is_valid());
333 
334     // We just reached the untouched part of the buffer, it's going to be all
335     // zeroes from here to end().
336     // Optimization: if during Initialize() we fill the buffer with padding
337     // records we could get rid of this branch.
338     if (PERFETTO_UNLIKELY(!next_chunk.is_valid())) {
339       // This should happen only at the first iteration. The zeroed area can
340       // only begin precisely at the |wptr_|, not after. Otherwise it means that
341       // we wrapped but screwed up the ChunkRecord chain.
342       PERFETTO_DCHECK(next_chunk_ptr == wptr_);
343       return 0;
344     }
345 
346     // Remove |next_chunk| from the index, unless it's a padding record (padding
347     // records are not part of the index).
348     if (PERFETTO_LIKELY(!next_chunk.is_padding)) {
349       ChunkMeta::Key key(next_chunk);
350       auto it = index_.find(key);
351       bool will_remove = false;
352       if (PERFETTO_LIKELY(it != index_.end())) {
353         const ChunkMeta& meta = it->second;
354         if (PERFETTO_UNLIKELY(meta.num_fragments_read < meta.num_fragments)) {
355           if (overwrite_policy_ == kDiscard)
356             return -1;
357           chunks_overwritten++;
358           bytes_overwritten += next_chunk.size;
359         }
360         index_delete.push_back(it);
361         will_remove = true;
362       }
363       TRACE_BUFFER_DLOG("  del index {%" PRIu32 ",%" PRIu32 ",%u} @ [%" PRIdPTR
364                         " - %" PRIdPTR "] %d",
365                         key.producer_id, key.writer_id, key.chunk_id,
366                         next_chunk_ptr - begin(),
367                         next_chunk_ptr - begin() + next_chunk.size,
368                         will_remove);
369       PERFETTO_DCHECK(will_remove);
370     } else {
371       padding_bytes_cleared += next_chunk.size;
372     }
373 
374     next_chunk_ptr += next_chunk.size;
375 
376     // We should never hit this, unless we managed to screw up while writing
377     // to the buffer and breaking the ChunkRecord(s) chain.
378     // TODO(primiano): Write more meaningful logging with the status of the
379     // buffer, to get more actionable bugs in case we hit this.
380     PERFETTO_CHECK(next_chunk_ptr <= end());
381   }
382 
383   // Remove from the index.
384   for (auto it : index_delete) {
385     index_.erase(it);
386   }
387   stats_.set_chunks_overwritten(chunks_overwritten);
388   stats_.set_bytes_overwritten(bytes_overwritten);
389   stats_.set_padding_bytes_cleared(padding_bytes_cleared);
390 
391   PERFETTO_DCHECK(next_chunk_ptr >= search_end && next_chunk_ptr <= end());
392   return static_cast<ssize_t>(next_chunk_ptr - search_end);
393 }
394 
AddPaddingRecord(size_t size)395 void TraceBuffer::AddPaddingRecord(size_t size) {
396   PERFETTO_DCHECK(size >= sizeof(ChunkRecord) && size <= ChunkRecord::kMaxSize);
397   ChunkRecord record(size);
398   record.is_padding = 1;
399   TRACE_BUFFER_DLOG("AddPaddingRecord @ [%" PRIdPTR " - %" PRIdPTR "] %zu",
400                     wptr_ - begin(), uintptr_t(wptr_ - begin()) + size, size);
401   WriteChunkRecord(wptr_, record, nullptr, size - sizeof(ChunkRecord));
402   stats_.set_padding_bytes_written(stats_.padding_bytes_written() + size);
403   // |wptr_| is deliberately not advanced when writing a padding record.
404 }
405 
TryPatchChunkContents(ProducerID producer_id,WriterID writer_id,ChunkID chunk_id,const Patch * patches,size_t patches_size,bool other_patches_pending)406 bool TraceBuffer::TryPatchChunkContents(ProducerID producer_id,
407                                         WriterID writer_id,
408                                         ChunkID chunk_id,
409                                         const Patch* patches,
410                                         size_t patches_size,
411                                         bool other_patches_pending) {
412   PERFETTO_CHECK(!read_only_);
413   ChunkMeta::Key key(producer_id, writer_id, chunk_id);
414   auto it = index_.find(key);
415   if (it == index_.end()) {
416     stats_.set_patches_failed(stats_.patches_failed() + 1);
417     return false;
418   }
419   ChunkMeta& chunk_meta = it->second;
420 
421   // Check that the index is consistent with the actual ProducerID/WriterID
422   // stored in the ChunkRecord.
423 
424   ChunkRecord* chunk_record = GetChunkRecordAt(begin() + chunk_meta.record_off);
425   PERFETTO_DCHECK(ChunkMeta::Key(*chunk_record) == key);
426   uint8_t* chunk_begin = reinterpret_cast<uint8_t*>(chunk_record);
427   PERFETTO_DCHECK(chunk_begin >= begin());
428   uint8_t* chunk_end = chunk_begin + chunk_record->size;
429   PERFETTO_DCHECK(chunk_end <= end());
430   uint8_t* payload_begin = chunk_begin + sizeof(ChunkRecord);
431   const size_t payload_size = static_cast<size_t>(chunk_end - payload_begin);
432 
433   static_assert(Patch::kSize == SharedMemoryABI::kPacketHeaderSize,
434                 "Patch::kSize out of sync with SharedMemoryABI");
435 
436   for (size_t i = 0; i < patches_size; i++) {
437     const size_t offset_untrusted = patches[i].offset_untrusted;
438     if (payload_size < Patch::kSize ||
439         offset_untrusted > payload_size - Patch::kSize) {
440       // Either the IPC was so slow and in the meantime the writer managed to
441       // wrap over |chunk_id| or the producer sent a malicious IPC.
442       stats_.set_patches_failed(stats_.patches_failed() + 1);
443       return false;
444     }
445     TRACE_BUFFER_DLOG("PatchChunk {%" PRIu32 ",%" PRIu32
446                       ",%u} size=%zu @ %zu with {%02x %02x %02x %02x}",
447                       producer_id, writer_id, chunk_id, chunk_end - chunk_begin,
448                       offset_untrusted, patches[i].data[0], patches[i].data[1],
449                       patches[i].data[2], patches[i].data[3]);
450     uint8_t* dst = payload_begin + offset_untrusted;
451     memcpy(dst, &patches[i].data[0], Patch::kSize);
452   }
453   TRACE_BUFFER_DLOG("Chunk raw (after patch): %s",
454                     base::HexDump(chunk_begin, chunk_record->size).c_str());
455 
456   stats_.set_patches_succeeded(stats_.patches_succeeded() + patches_size);
457   if (!other_patches_pending) {
458     chunk_meta.flags &= ~kChunkNeedsPatching;
459     chunk_record->flags = chunk_meta.flags & ChunkRecord::kFlagsBitMask;
460   }
461   return true;
462 }
463 
BeginRead()464 void TraceBuffer::BeginRead() {
465   read_iter_ = GetReadIterForSequence(index_.begin());
466 #if PERFETTO_DCHECK_IS_ON()
467   changed_since_last_read_ = false;
468 #endif
469 }
470 
GetReadIterForSequence(ChunkMap::iterator seq_begin)471 TraceBuffer::SequenceIterator TraceBuffer::GetReadIterForSequence(
472     ChunkMap::iterator seq_begin) {
473   SequenceIterator iter;
474   iter.seq_begin = seq_begin;
475   if (seq_begin == index_.end()) {
476     iter.cur = iter.seq_end = index_.end();
477     return iter;
478   }
479 
480 #if PERFETTO_DCHECK_IS_ON()
481   // Either |seq_begin| is == index_.begin() or the item immediately before must
482   // belong to a different {ProducerID, WriterID} sequence.
483   if (seq_begin != index_.begin() && seq_begin != index_.end()) {
484     auto prev_it = seq_begin;
485     prev_it--;
486     PERFETTO_DCHECK(
487         seq_begin == index_.begin() ||
488         std::tie(prev_it->first.producer_id, prev_it->first.writer_id) <
489             std::tie(seq_begin->first.producer_id, seq_begin->first.writer_id));
490   }
491 #endif
492 
493   // Find the first entry that has a greater {ProducerID, WriterID} (or just
494   // index_.end() if we reached the end).
495   ChunkMeta::Key key = seq_begin->first;  // Deliberate copy.
496   key.chunk_id = kMaxChunkID;
497   iter.seq_end = index_.upper_bound(key);
498   PERFETTO_DCHECK(iter.seq_begin != iter.seq_end);
499 
500   // Now find the first entry between [seq_begin, seq_end) that is
501   // > last_chunk_id_written_. This is where we the sequence will start (see
502   // notes about wrapping of IDs in the header).
503   auto producer_and_writer_id = std::make_pair(key.producer_id, key.writer_id);
504   PERFETTO_DCHECK(last_chunk_id_written_.count(producer_and_writer_id));
505   iter.wrapping_id = last_chunk_id_written_[producer_and_writer_id];
506   key.chunk_id = iter.wrapping_id;
507   iter.cur = index_.upper_bound(key);
508   if (iter.cur == iter.seq_end)
509     iter.cur = iter.seq_begin;
510   return iter;
511 }
512 
MoveNext()513 void TraceBuffer::SequenceIterator::MoveNext() {
514   // Stop iterating when we reach the end of the sequence.
515   // Note: |seq_begin| might be == |seq_end|.
516   if (cur == seq_end || cur->first.chunk_id == wrapping_id) {
517     cur = seq_end;
518     return;
519   }
520 
521   // If the current chunk wasn't completed yet, we shouldn't advance past it as
522   // it may be rewritten with additional packets.
523   if (!cur->second.is_complete()) {
524     cur = seq_end;
525     return;
526   }
527 
528   ChunkID last_chunk_id = cur->first.chunk_id;
529   if (++cur == seq_end)
530     cur = seq_begin;
531 
532   // There may be a missing chunk in the sequence of chunks, in which case the
533   // next chunk's ID won't follow the last one's. If so, skip the rest of the
534   // sequence. We'll return to it later once the hole is filled.
535   if (last_chunk_id + 1 != cur->first.chunk_id)
536     cur = seq_end;
537 }
538 
ReadNextTracePacket(TracePacket * packet,PacketSequenceProperties * sequence_properties,bool * previous_packet_on_sequence_dropped)539 bool TraceBuffer::ReadNextTracePacket(
540     TracePacket* packet,
541     PacketSequenceProperties* sequence_properties,
542     bool* previous_packet_on_sequence_dropped) {
543   // Note: MoveNext() moves only within the next chunk within the same
544   // {ProducerID, WriterID} sequence. Here we want to:
545   // - return the next patched+complete packet in the current sequence, if any.
546   // - return the first patched+complete packet in the next sequence, if any.
547   // - return false if none of the above is found.
548   TRACE_BUFFER_DLOG("ReadNextTracePacket()");
549 
550   // Just in case we forget to initialize these below.
551   *sequence_properties = {0, ClientIdentity(), 0};
552   *previous_packet_on_sequence_dropped = false;
553 
554   // At the start of each sequence iteration, we consider the last read packet
555   // dropped. While iterating over the chunks in the sequence, we update this
556   // flag based on our knowledge about the last packet that was read from each
557   // chunk (|last_read_packet_skipped| in ChunkMeta).
558   bool previous_packet_dropped = true;
559 
560 #if PERFETTO_DCHECK_IS_ON()
561   PERFETTO_DCHECK(!changed_since_last_read_);
562 #endif
563   for (;; read_iter_.MoveNext()) {
564     if (PERFETTO_UNLIKELY(!read_iter_.is_valid())) {
565       // We ran out of chunks in the current {ProducerID, WriterID} sequence or
566       // we just reached the index_.end().
567 
568       if (PERFETTO_UNLIKELY(read_iter_.seq_end == index_.end()))
569         return false;
570 
571       // We reached the end of sequence, move to the next one.
572       // Note: ++read_iter_.seq_end might become index_.end(), but
573       // GetReadIterForSequence() knows how to deal with that.
574       read_iter_ = GetReadIterForSequence(read_iter_.seq_end);
575       PERFETTO_DCHECK(read_iter_.is_valid() && read_iter_.cur != index_.end());
576       previous_packet_dropped = true;
577     }
578 
579     ChunkMeta* chunk_meta = &*read_iter_;
580 
581     // If the chunk has holes that are awaiting to be patched out-of-band,
582     // skip the current sequence and move to the next one.
583     if (chunk_meta->flags & kChunkNeedsPatching) {
584       read_iter_.MoveToEnd();
585       continue;
586     }
587 
588     const ProducerID trusted_producer_id = read_iter_.producer_id();
589     const WriterID writer_id = read_iter_.writer_id();
590     const ProducerAndWriterID producer_and_writer_id =
591         MkProducerAndWriterID(trusted_producer_id, writer_id);
592     const ClientIdentity& client_identity = chunk_meta->client_identity_trusted;
593 
594     // At this point we have a chunk in |chunk_meta| that has not been fully
595     // read. We don't know yet whether we have enough data to read the full
596     // packet (in the case it's fragmented over several chunks) and we are about
597     // to find that out. Specifically:
598     // A) If the first fragment is unread and is a fragment continuing from a
599     //    previous chunk, it means we have missed the previous ChunkID. In
600     //    fact, if this wasn't the case, a previous call to ReadNext() shouldn't
601     //    have moved the cursor to this chunk.
602     // B) Any fragment > 0 && < last is always readable. By definition an inner
603     //    packet is never fragmented and hence doesn't require neither stitching
604     //    nor any out-of-band patching. The same applies to the last packet
605     //    iff it doesn't continue on the next chunk.
606     // C) If the last packet (which might be also the only packet in the chunk)
607     //    is a fragment and continues on the next chunk, we peek at the next
608     //    chunks and, if we have all of them, mark as read and move the cursor.
609     //
610     // +---------------+   +-------------------+  +---------------+
611     // | ChunkID: 1    |   | ChunkID: 2        |  | ChunkID: 3    |
612     // |---------------+   +-------------------+  +---------------+
613     // | Packet 1      |   |                   |  | ... Packet 3  |
614     // | Packet 2      |   | ... Packet 3  ... |  | Packet 4      |
615     // | Packet 3  ... |   |                   |  | Packet 5 ...  |
616     // +---------------+   +-------------------+  +---------------+
617 
618     PERFETTO_DCHECK(chunk_meta->num_fragments_read <=
619                     chunk_meta->num_fragments);
620 
621     // If we didn't read any packets from this chunk, the last packet was from
622     // the previous chunk we iterated over; so don't update
623     // |previous_packet_dropped| in this case.
624     if (chunk_meta->num_fragments_read > 0)
625       previous_packet_dropped = chunk_meta->last_read_packet_skipped();
626 
627     while (chunk_meta->num_fragments_read < chunk_meta->num_fragments) {
628       enum { kSkip = 0, kReadOnePacket, kTryReadAhead } action;
629       if (chunk_meta->num_fragments_read == 0) {
630         if (chunk_meta->flags & kFirstPacketContinuesFromPrevChunk) {
631           action = kSkip;  // Case A.
632         } else if (chunk_meta->num_fragments == 1 &&
633                    (chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
634           action = kTryReadAhead;  // Case C.
635         } else {
636           action = kReadOnePacket;  // Case B.
637         }
638       } else if (chunk_meta->num_fragments_read <
639                      chunk_meta->num_fragments - 1 ||
640                  !(chunk_meta->flags & kLastPacketContinuesOnNextChunk)) {
641         action = kReadOnePacket;  // Case B.
642       } else {
643         action = kTryReadAhead;  // Case C.
644       }
645 
646       TRACE_BUFFER_DLOG("  chunk %u, packet %hu of %hu, action=%d",
647                         read_iter_.chunk_id(), chunk_meta->num_fragments_read,
648                         chunk_meta->num_fragments, action);
649 
650       if (action == kSkip) {
651         // This fragment will be skipped forever, not just in this ReadPacket()
652         // iteration. This happens by virtue of ReadNextPacketInChunk()
653         // incrementing the |num_fragments_read| and marking the fragment as
654         // read even if we didn't really.
655         ReadNextPacketInChunk(producer_and_writer_id, chunk_meta, nullptr);
656         chunk_meta->set_last_read_packet_skipped(true);
657         previous_packet_dropped = true;
658         continue;
659       }
660 
661       if (action == kReadOnePacket) {
662         // The easy peasy case B.
663         ReadPacketResult result =
664             ReadNextPacketInChunk(producer_and_writer_id, chunk_meta, packet);
665 
666         if (PERFETTO_LIKELY(result == ReadPacketResult::kSucceeded)) {
667           *sequence_properties = {trusted_producer_id, client_identity,
668                                   writer_id};
669           *previous_packet_on_sequence_dropped = previous_packet_dropped;
670           return true;
671         } else if (result == ReadPacketResult::kFailedEmptyPacket) {
672           // We can ignore and skip empty packets.
673           PERFETTO_DCHECK(packet->slices().empty());
674           continue;
675         }
676 
677         // In extremely rare cases (producer bugged / malicious) the chunk might
678         // contain an invalid fragment. In such case we don't want to stall the
679         // sequence but just skip the chunk and move on. ReadNextPacketInChunk()
680         // marks the chunk as fully read, so we don't attempt to read from it
681         // again in a future call to ReadBuffers(). It also already records an
682         // abi violation for this.
683         PERFETTO_DCHECK(result == ReadPacketResult::kFailedInvalidPacket);
684         chunk_meta->set_last_read_packet_skipped(true);
685         previous_packet_dropped = true;
686         break;
687       }
688 
689       PERFETTO_DCHECK(action == kTryReadAhead);
690       ReadAheadResult ra_res = ReadAhead(packet);
691       if (ra_res == ReadAheadResult::kSucceededReturnSlices) {
692         stats_.set_readaheads_succeeded(stats_.readaheads_succeeded() + 1);
693         *sequence_properties = {trusted_producer_id, client_identity,
694                                 writer_id};
695         *previous_packet_on_sequence_dropped = previous_packet_dropped;
696         return true;
697       }
698 
699       if (ra_res == ReadAheadResult::kFailedMoveToNextSequence) {
700         // readahead didn't find a contiguous packet sequence. We'll try again
701         // on the next ReadPacket() call.
702         stats_.set_readaheads_failed(stats_.readaheads_failed() + 1);
703 
704         // TODO(primiano): optimization: this MoveToEnd() is the reason why
705         // MoveNext() (that is called in the outer for(;;MoveNext)) needs to
706         // deal gracefully with the case of |cur|==|seq_end|. Maybe we can do
707         // something to avoid that check by reshuffling the code here?
708         read_iter_.MoveToEnd();
709 
710         // This break will go back to beginning of the for(;;MoveNext()). That
711         // will move to the next sequence because we set the read iterator to
712         // its end.
713         break;
714       }
715 
716       PERFETTO_DCHECK(ra_res == ReadAheadResult::kFailedStayOnSameSequence);
717 
718       // In this case ReadAhead() might advance |read_iter_|, so we need to
719       // re-cache the |chunk_meta| pointer to point to the current chunk.
720       chunk_meta = &*read_iter_;
721       chunk_meta->set_last_read_packet_skipped(true);
722       previous_packet_dropped = true;
723     }  // while(...)  [iterate over packet fragments for the current chunk].
724   }    // for(;;MoveNext()) [iterate over chunks].
725 }
726 
ReadAhead(TracePacket * packet)727 TraceBuffer::ReadAheadResult TraceBuffer::ReadAhead(TracePacket* packet) {
728   static_assert(static_cast<ChunkID>(kMaxChunkID + 1) == 0,
729                 "relying on kMaxChunkID to wrap naturally");
730   TRACE_BUFFER_DLOG(" readahead start @ chunk %u", read_iter_.chunk_id());
731   ChunkID next_chunk_id = read_iter_.chunk_id() + 1;
732   SequenceIterator it = read_iter_;
733   for (it.MoveNext(); it.is_valid(); it.MoveNext(), next_chunk_id++) {
734     // We should stay within the same sequence while iterating here.
735     PERFETTO_DCHECK(it.producer_id() == read_iter_.producer_id() &&
736                     it.writer_id() == read_iter_.writer_id());
737 
738     TRACE_BUFFER_DLOG("   expected chunk ID: %u, actual ID: %u", next_chunk_id,
739                       it.chunk_id());
740 
741     if (PERFETTO_UNLIKELY((*it).num_fragments == 0))
742       continue;
743 
744     // If we miss the next chunk, stop looking in the current sequence and
745     // try another sequence. This chunk might come in the near future.
746     // The second condition is the edge case of a buggy/malicious
747     // producer. The ChunkID is contiguous but its flags don't make sense.
748     if (it.chunk_id() != next_chunk_id ||
749         PERFETTO_UNLIKELY(
750             !((*it).flags & kFirstPacketContinuesFromPrevChunk))) {
751       return ReadAheadResult::kFailedMoveToNextSequence;
752     }
753 
754     // If the chunk is contiguous but has not been patched yet move to the next
755     // sequence and try coming back here on the next ReadNextTracePacket() call.
756     // TODO(primiano): add a test to cover this, it's a subtle case.
757     if ((*it).flags & kChunkNeedsPatching)
758       return ReadAheadResult::kFailedMoveToNextSequence;
759 
760     // This is the case of an intermediate chunk which contains only one
761     // fragment which continues on the next chunk. This is the case for large
762     // packets, e.g.: [Packet0, Packet1(0)] [Packet1(1)] [Packet1(2), ...]
763     // (Packet1(X) := fragment X of Packet1).
764     if ((*it).num_fragments == 1 &&
765         ((*it).flags & kLastPacketContinuesOnNextChunk)) {
766       continue;
767     }
768 
769     // We made it! We got all fragments for the packet without holes.
770     TRACE_BUFFER_DLOG("  readahead success @ chunk %u", it.chunk_id());
771     PERFETTO_DCHECK(((*it).num_fragments == 1 &&
772                      !((*it).flags & kLastPacketContinuesOnNextChunk)) ||
773                     (*it).num_fragments > 1);
774 
775     // Now let's re-iterate over the [read_iter_, it] sequence and mark
776     // all the fragments as read.
777     bool packet_corruption = false;
778     for (;;) {
779       PERFETTO_DCHECK(read_iter_.is_valid());
780       TRACE_BUFFER_DLOG("    commit chunk %u", read_iter_.chunk_id());
781       if (PERFETTO_LIKELY((*read_iter_).num_fragments > 0)) {
782         // In the unlikely case of a corrupted packet (corrupted or empty
783         // fragment), invalidate the all stitching and move on to the next chunk
784         // in the same sequence, if any.
785         auto pw_id = MkProducerAndWriterID(it.producer_id(), it.writer_id());
786         packet_corruption |=
787             ReadNextPacketInChunk(pw_id, &*read_iter_, packet) ==
788             ReadPacketResult::kFailedInvalidPacket;
789       }
790       if (read_iter_.cur == it.cur)
791         break;
792       read_iter_.MoveNext();
793     }  // for(;;)
794     PERFETTO_DCHECK(read_iter_.cur == it.cur);
795 
796     if (PERFETTO_UNLIKELY(packet_corruption)) {
797       // ReadNextPacketInChunk() already records an abi violation for this case.
798       *packet = TracePacket();  // clear.
799       return ReadAheadResult::kFailedStayOnSameSequence;
800     }
801 
802     return ReadAheadResult::kSucceededReturnSlices;
803   }  // for(it...)  [readahead loop]
804   return ReadAheadResult::kFailedMoveToNextSequence;
805 }
806 
ReadNextPacketInChunk(ProducerAndWriterID producer_and_writer_id,ChunkMeta * const chunk_meta,TracePacket * packet)807 TraceBuffer::ReadPacketResult TraceBuffer::ReadNextPacketInChunk(
808     ProducerAndWriterID producer_and_writer_id,
809     ChunkMeta* const chunk_meta,
810     TracePacket* packet) {
811   PERFETTO_DCHECK(chunk_meta->num_fragments_read < chunk_meta->num_fragments);
812   PERFETTO_DCHECK(!(chunk_meta->flags & kChunkNeedsPatching));
813 
814   const uint8_t* record_begin = begin() + chunk_meta->record_off;
815   DcheckIsAlignedAndWithinBounds(record_begin);
816   auto* chunk_record = reinterpret_cast<const ChunkRecord*>(record_begin);
817   const uint8_t* record_end = record_begin + chunk_record->size;
818   const uint8_t* packets_begin = record_begin + sizeof(ChunkRecord);
819   const uint8_t* packet_begin = packets_begin + chunk_meta->cur_fragment_offset;
820 
821   if (PERFETTO_UNLIKELY(packet_begin < packets_begin ||
822                         packet_begin >= record_end)) {
823     // The producer has a bug or is malicious and did declare that the chunk
824     // contains more packets beyond its boundaries.
825     stats_.set_abi_violations(stats_.abi_violations() + 1);
826     PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
827     chunk_meta->cur_fragment_offset = 0;
828     chunk_meta->num_fragments_read = chunk_meta->num_fragments;
829     if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
830       stats_.set_chunks_read(stats_.chunks_read() + 1);
831       stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
832     }
833     return ReadPacketResult::kFailedInvalidPacket;
834   }
835 
836   // A packet (or a fragment) starts with a varint stating its size, followed
837   // by its content. The varint shouldn't be larger than 4 bytes (just in case
838   // the producer is using a redundant encoding)
839   uint64_t packet_size = 0;
840   const uint8_t* header_end =
841       std::min(packet_begin + protozero::proto_utils::kMessageLengthFieldSize,
842                record_end);
843   const uint8_t* packet_data = protozero::proto_utils::ParseVarInt(
844       packet_begin, header_end, &packet_size);
845 
846   const uint8_t* next_packet = packet_data + packet_size;
847   if (PERFETTO_UNLIKELY(next_packet <= packet_begin ||
848                         next_packet > record_end)) {
849     // In BufferExhaustedPolicy::kDrop mode, TraceWriter may abort a fragmented
850     // packet by writing an invalid size in the last fragment's header. We
851     // should handle this case without recording an ABI violation (since Android
852     // R).
853     if (packet_size != SharedMemoryABI::kPacketSizeDropPacket) {
854       stats_.set_abi_violations(stats_.abi_violations() + 1);
855       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
856     } else {
857       stats_.set_trace_writer_packet_loss(stats_.trace_writer_packet_loss() +
858                                           1);
859     }
860     chunk_meta->cur_fragment_offset = 0;
861     chunk_meta->num_fragments_read = chunk_meta->num_fragments;
862     if (PERFETTO_LIKELY(chunk_meta->is_complete())) {
863       stats_.set_chunks_read(stats_.chunks_read() + 1);
864       stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
865     }
866     return ReadPacketResult::kFailedInvalidPacket;
867   }
868 
869   chunk_meta->cur_fragment_offset =
870       static_cast<uint16_t>(next_packet - packets_begin);
871   chunk_meta->num_fragments_read++;
872 
873   if (PERFETTO_UNLIKELY(chunk_meta->num_fragments_read ==
874                             chunk_meta->num_fragments &&
875                         chunk_meta->is_complete())) {
876     stats_.set_chunks_read(stats_.chunks_read() + 1);
877     stats_.set_bytes_read(stats_.bytes_read() + chunk_record->size);
878     auto* writer_stats = writer_stats_.Insert(producer_and_writer_id, {}).first;
879     writer_stats->used_chunk_hist.Add(chunk_meta->cur_fragment_offset);
880   } else {
881     // We have at least one more packet to parse. It should be within the chunk.
882     if (chunk_meta->cur_fragment_offset + sizeof(ChunkRecord) >=
883         chunk_record->size) {
884       PERFETTO_DCHECK(suppress_client_dchecks_for_testing_);
885     }
886   }
887 
888   chunk_meta->set_last_read_packet_skipped(false);
889 
890   if (PERFETTO_UNLIKELY(packet_size == 0))
891     return ReadPacketResult::kFailedEmptyPacket;
892 
893   if (PERFETTO_LIKELY(packet))
894     packet->AddSlice(packet_data, static_cast<size_t>(packet_size));
895 
896   return ReadPacketResult::kSucceeded;
897 }
898 
DiscardWrite()899 void TraceBuffer::DiscardWrite() {
900   PERFETTO_DCHECK(overwrite_policy_ == kDiscard);
901   discard_writes_ = true;
902   stats_.set_chunks_discarded(stats_.chunks_discarded() + 1);
903   TRACE_BUFFER_DLOG("  discarding write");
904 }
905 
CloneReadOnly() const906 std::unique_ptr<TraceBuffer> TraceBuffer::CloneReadOnly() const {
907   std::unique_ptr<TraceBuffer> buf(new TraceBuffer(CloneCtor(), *this));
908   if (!buf->data_.IsValid())
909     return nullptr;  // PagedMemory::Allocate() failed. We are out of memory.
910   return buf;
911 }
912 
TraceBuffer(CloneCtor,const TraceBuffer & src)913 TraceBuffer::TraceBuffer(CloneCtor, const TraceBuffer& src)
914     : overwrite_policy_(src.overwrite_policy_),
915       read_only_(true),
916       discard_writes_(src.discard_writes_) {
917   if (!Initialize(src.data_.size()))
918     return;  // TraceBuffer::Clone() will check |data_| and return nullptr.
919 
920   // The assignments below must be done after Initialize().
921 
922   EnsureCommitted(src.used_size_);
923   memcpy(data_.Get(), src.data_.Get(), src.used_size_);
924   last_chunk_id_written_ = src.last_chunk_id_written_;
925 
926   stats_ = src.stats_;
927   stats_.set_bytes_read(0);
928   stats_.set_chunks_read(0);
929   stats_.set_readaheads_failed(0);
930   stats_.set_readaheads_succeeded(0);
931 
932   // Copy the index of chunk metadata and reset the read states.
933   index_ = ChunkMap(src.index_);
934   for (auto& kv : index_) {
935     ChunkMeta& chunk_meta = kv.second;
936     chunk_meta.num_fragments_read = 0;
937     chunk_meta.cur_fragment_offset = 0;
938     chunk_meta.set_last_read_packet_skipped(false);
939   }
940   read_iter_ = SequenceIterator();
941 }
942 
943 }  // namespace perfetto
944