• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef SRC_TRACING_CORE_TRACE_BUFFER_H_
18 #define SRC_TRACING_CORE_TRACE_BUFFER_H_
19 
20 #include <stdint.h>
21 #include <string.h>
22 
23 #include <array>
24 #include <limits>
25 #include <map>
26 #include <tuple>
27 
28 #include "perfetto/base/logging.h"
29 #include "perfetto/base/page_allocator.h"
30 #include "perfetto/tracing/core/basic_types.h"
31 #include "perfetto/tracing/core/slice.h"
32 
33 namespace perfetto {
34 
35 class TracePacket;
36 
37 // The main buffer, owned by the tracing service, where all the trace data is
38 // ultimately stored into. The service will own several instances of this class,
39 // at least one per active consumer (as defined in the |buffers| section of
40 // trace_config.proto) and will copy chunks from the producer's shared memory
41 // buffers into here when a CommitData IPC is received.
42 //
43 // Writing into the buffer
44 // -----------------------
45 // Data is copied from the SMB(s) using CopyChunkUntrusted(). The buffer will
46 // hence contain data coming from different producers and different writer
47 // sequences, more specifically:
48 // - The service receives data by several producer(s), identified by their ID.
49 // - Each producer writes several sequences identified by the same WriterID.
50 //   (they correspond to TraceWriter instances in the producer).
51 // - Each Writer writes, in order, several chunks.
52 // - Each chunk contains zero, one, or more TracePacket(s), or even just
53 //   fragments of packets (when they span across several chunks).
54 //
55 // So at any point in time, the buffer will contain a variable number of logical
56 // sequences identified by the {ProducerID, WriterID} tuple. Any given chunk
57 // will only contain packets (or fragments) belonging to the same sequence.
58 //
59 // The buffer operates by default as a ring buffer. Chunks are (over-)written
60 // in the same order of the CopyChunkUntrusted() calls. When overwriting old
61 // content, entire chunks are overwritten or clobbered. The buffer never leaves
62 // a partial chunk around. Chunks' payload is copied as-is, but their header is
63 // not and is repacked in order to keep the ProducerID around.
64 //
65 // Chunks are stored in the buffer next to each other. Each chunk is prefixed by
66 // an inline header (ChunkRecord), which contains most of the fields of the
67 // SharedMemoryABI ChunkHeader + the ProducerID + the size of the payload.
68 // It's a conventional binary object stream essentially, where each ChunkRecord
69 // tells where it ends and hence where to find the next one, like this:
70 //
71 //          .-------------------------. 16 byte boundary
72 //          | ChunkRecord:   16 bytes |
73 //          | - chunk id:     4 bytes |
74 //          | - producer id:  2 bytes |
75 //          | - writer id:    2 bytes |
76 //          | - #fragments:   2 bytes |
77 //    +-----+ - record size:  2 bytes |
78 //    |     | - flags+pad:    4 bytes |
79 //    |     +-------------------------+
80 //    |     |                         |
81 //    |     :     Chunk payload       :
82 //    |     |                         |
83 //    |     +-------------------------+
84 //    |     |    Optional padding     |
85 //    +---> +-------------------------+ 16 byte boundary
86 //          |      ChunkRecord        |
87 //          :                         :
88 // Chunks stored in the buffer are always rounded up to 16 bytes (that is
89 // sizeof(ChunkRecord)), in order to avoid further inner fragmentation.
90 // Special "padding" chunks can be put in the buffer, e.g. in the case when we
91 // try to write a chunk of size N while the write pointer is at the end of the
92 // buffer, but the write pointer is < N bytes from the end (and hence needs to
93 // wrap over).
94 // Because of this, the buffer is self-describing: the contents of the buffer
95 // can be reconstructed by just looking at the buffer content (this will be
96 // quite useful in future to recover the buffer from crash reports).
97 //
98 // However, in order to keep some operations (patching and reading) fast, a
99 // lookaside index is maintained (in |index_|), keeping each chunk in the buffer
100 // indexed by their {ProducerID, WriterID, ChunkID} tuple.
101 //
102 // Patching data out-of-band
103 // -------------------------
104 // This buffer also supports patching chunks' payload out-of-band, after they
105 // have been stored. This is to allow producers to backfill the "size" fields
106 // of the protos that spawn across several chunks, when the previous chunks are
107 // returned to the service. The MaybePatchChunkContents() deals with the fact
108 // that a chunk might have been lost (because of wrapping) by the time the OOB
109 // IPC comes.
110 //
111 // Reading from the buffer
112 // -----------------------
113 // This class supports one reader only (the consumer). Reads are NOT idempotent
114 // as they move the read cursors around. Reading back the buffer is the most
115 // conceptually complex part. The ReadNextTracePacket() method operates with
116 // whole packet granularity. Packets are returned only when all their fragments
117 // are available.
118 // This class takes care of:
119 // - Gluing packets within the same sequence, even if they are not stored
120 //   adjacently in the buffer.
121 // - Re-ordering chunks within a sequence (using the ChunkID, which wraps).
122 // - Detecting holes in packet fragments (because of loss of chunks).
123 // Reads guarantee that packets for the same sequence are read in FIFO order
124 // (according to their ChunkID), but don't give any guarantee about the read
125 // order of packets from different sequences, see comments in
126 // ReadNextTracePacket() below.
127 class TraceBuffer {
128  public:
129   static const size_t InlineChunkHeaderSize;  // For test/fake_packet.{cc,h}.
130 
131   // Maintain these fields consistent with trace_stats.proto. See comments in
132   // the .proto for the semantic of these fields.
133   struct Stats {
134     uint64_t bytes_written = 0;
135     uint64_t chunks_written = 0;
136     uint64_t chunks_overwritten = 0;
137     uint64_t write_wrap_count = 0;
138     uint64_t patches_succeeded = 0;
139     uint64_t patches_failed = 0;
140     uint64_t readaheads_succeeded = 0;
141     uint64_t readaheads_failed = 0;
142     uint64_t abi_violations = 0;
143     // TODO(primiano): add bytes_lost_for_padding.
144   };
145 
146   // Argument for out-of-band patches applied through TryPatchChunkContents().
147   struct Patch {
148     // From SharedMemoryABI::kPacketHeaderSize.
149     static constexpr size_t kSize = 4;
150 
151     size_t offset_untrusted;
152     std::array<uint8_t, kSize> data;
153   };
154 
155   // Can return nullptr if the memory allocation fails.
156   static std::unique_ptr<TraceBuffer> Create(size_t size_in_bytes);
157 
158   ~TraceBuffer();
159 
160   // Copies a Chunk from a producer Shared Memory Buffer into the trace buffer.
161   // |src| points to the first packet in the SharedMemoryABI's chunk shared
162   // with an untrusted producer. "untrusted" here means: the producer might be
163   // malicious and might change |src| concurrently while we read it (internally
164   // this method memcpy()-s first the chunk before processing it).
165   // None of the arguments should be trusted, unless otherwise stated. We can
166   // trust that |src| points to a valid memory area, but not its contents.
167   void CopyChunkUntrusted(ProducerID producer_id_trusted,
168                           uid_t producer_uid_trusted,
169                           WriterID writer_id,
170                           ChunkID chunk_id,
171                           uint16_t num_fragments,
172                           uint8_t chunk_flags,
173                           const uint8_t* src,
174                           size_t size);
175   // Applies a batch of |patches| to the given chunk, if the given chunk is
176   // still in the buffer. Does nothing if the given ChunkID is gone.
177   // Returns true if the chunk has been found and patched, false otherwise.
178   // |other_patches_pending| is used to determine whether this is the only
179   // batch of patches for the chunk or there is more.
180   // If |other_patches_pending| == false, the chunk is marked as ready to be
181   // consumed. If true, the state of the chunk is not altered.
182   bool TryPatchChunkContents(ProducerID,
183                              WriterID,
184                              ChunkID,
185                              const Patch* patches,
186                              size_t patches_size,
187                              bool other_patches_pending);
188 
189   // To read the contents of the buffer the caller needs to:
190   //   BeginRead()
191   //   while (ReadNextTracePacket(packet_fragments)) { ... }
192   // No other calls to any other method should be interleaved between
193   // BeginRead() and ReadNextTracePacket().
194   // Reads in the TraceBuffer are NOT idempotent.
195   void BeginRead();
196 
197   // Returns the next packet in the buffer, if any, and the uid of the producer
198   // that wrote it (as passed in the CopyChunkUntrusted() call). Returns false
199   // if no packets can be read at this point.
200   // This function returns only complete packets. Specifically:
201   // When there is at least one complete packet in the buffer, this function
202   // returns true and populates the TracePacket argument with the boundaries of
203   // each fragment for one packet.
204   // TracePacket will have at least one slice when this function returns true.
205   // When there are no whole packets eligible to read (e.g. we are still missing
206   // fragments) this function returns false.
207   // This function guarantees also that packets for a given
208   // {ProducerID, WriterID} are read in FIFO order.
209   // This function does not guarantee any ordering w.r.t. packets belonging to
210   // different WriterID(s). For instance, given the following packets copied
211   // into the buffer:
212   //   {ProducerID: 1, WriterID: 1}: P1 P2 P3
213   //   {ProducerID: 1, WriterID: 2}: P4 P5 P6
214   //   {ProducerID: 2, WriterID: 1}: P7 P8 P9
215   // The following read sequence is possible:
216   //   P1, P4, P7, P2, P3, P5, P8, P9, P6
217   // But the following is guaranteed to NOT happen:
218   //   P1, P5, P7, P4 (P4 cannot come after P5)
219   bool ReadNextTracePacket(TracePacket*, uid_t* producer_uid);
220 
stats()221   const Stats& stats() const { return stats_; }
size()222   size_t size() const { return size_; }
223 
224  private:
225   friend class TraceBufferTest;
226 
227   // ChunkRecord is a Chunk header stored inline in the |data_| buffer, before
228   // the chunk payload (the packets' data). The |data_| buffer looks like this:
229   // +---------------+------------------++---------------+-----------------+
230   // | ChunkRecord 1 | Chunk payload 1  || ChunkRecord 2 | Chunk payload 2 | ...
231   // +---------------+------------------++---------------+-----------------+
232   // Most of the ChunkRecord fields are copied from SharedMemoryABI::ChunkHeader
233   // (the chunk header used in the the shared memory buffers).
234   // A ChunkRecord can be a special "padding" record. In this case its payload
235   // should be ignored and the record should be just skipped.
236   //
237   // Full page move optimization:
238   // This struct has to be exactly (sizeof(PageHeader) + sizeof(ChunkHeader))
239   // (from shared_memory_abi.h) to allow full page move optimizations
240   // (TODO(primiano): not implemented yet). In the special case of moving a full
241   // 4k page that contains only one chunk, in fact, we can just ask the kernel
242   // to move the full SHM page (see SPLICE_F_{GIFT,MOVE}) and overlay the
243   // ChunkRecord on top of the moved SMB's header (page + chunk header).
244   // This special requirement is covered by static_assert(s) in the .cc file.
245   struct ChunkRecord {
ChunkRecordChunkRecord246     explicit ChunkRecord(size_t sz) : flags{0}, is_padding{0} {
247       PERFETTO_DCHECK(sz >= sizeof(ChunkRecord) &&
248                       sz % sizeof(ChunkRecord) == 0 && sz <= kMaxSize);
249       size = static_cast<decltype(size)>(sz);
250     }
251 
is_validChunkRecord252     bool is_valid() const { return size != 0; }
253 
254     // Keep this structure packed and exactly 16 bytes (128 bits) big.
255 
256     // [32 bits] Monotonic counter within the same writer_id.
257     ChunkID chunk_id = 0;
258 
259     // [16 bits] ID of the Producer from which the Chunk was copied from.
260     ProducerID producer_id = 0;
261 
262     // [16 bits] Unique per Producer (but not within the service).
263     // If writer_id == kWriterIdPadding the record should just be skipped.
264     WriterID writer_id = 0;
265 
266     // Number of fragments contained in the chunk.
267     uint16_t num_fragments = 0;
268 
269     // Size in bytes, including sizeof(ChunkRecord) itself.
270     uint16_t size;
271 
272     uint8_t flags : 6;  // See SharedMemoryABI::ChunkHeader::flags.
273     uint8_t is_padding : 1;
274     uint8_t unused_flag : 1;
275 
276     // Not strictly needed, can be reused for more fields in the future. But
277     // right now helps to spot chunks in hex dumps.
278     char unused[3] = {'C', 'H', 'U'};
279 
280     static constexpr size_t kMaxSize =
281         std::numeric_limits<decltype(size)>::max();
282   };
283 
284   // Lookaside index entry. This serves two purposes:
285   // 1) Allow a fast lookup of ChunkRecord by their ID (the tuple
286   //   {ProducerID, WriterID, ChunkID}). This is used when applying out-of-band
287   //   patches to the contents of the chunks after they have been copied into
288   //   the TraceBuffer.
289   // 2) keep the chunks ordered by their ID. This is used when reading back.
290   // 3) Keep metadata about the status of the chunk, e.g. whether the contents
291   //    have been read already and should be skipped in a future read pass.
292   // This struct should not have any field that is essential for reconstructing
293   // the contents of the buffer from a crash dump.
294   struct ChunkMeta {
295     // Key used for sorting in the map.
296     struct Key {
KeyChunkMeta::Key297       Key(ProducerID p, WriterID w, ChunkID c)
298           : producer_id{p}, writer_id{w}, chunk_id{c} {}
299 
KeyChunkMeta::Key300       explicit Key(const ChunkRecord& cr)
301           : Key(cr.producer_id, cr.writer_id, cr.chunk_id) {}
302 
303       // Note that this sorting doesn't keep into account the fact that ChunkID
304       // will wrap over at some point. The extra logic in SequenceIterator deals
305       // with that.
306       bool operator<(const Key& other) const {
307         return std::tie(producer_id, writer_id, chunk_id) <
308                std::tie(other.producer_id, other.writer_id, other.chunk_id);
309       }
310 
311       bool operator==(const Key& other) const {
312         return std::tie(producer_id, writer_id, chunk_id) ==
313                std::tie(other.producer_id, other.writer_id, other.chunk_id);
314       }
315 
316       // These fields should match at all times the corresponding fields in
317       // the |chunk_record|. They are copied here purely for efficiency to avoid
318       // dereferencing the buffer all the time.
319       ProducerID producer_id;
320       WriterID writer_id;
321       ChunkID chunk_id;
322     };
323 
ChunkMetaChunkMeta324     ChunkMeta(ChunkRecord* c, uint16_t p, uint8_t f, uid_t u)
325         : chunk_record{c}, trusted_uid{u}, flags{f}, num_fragments{p} {}
326 
327     ChunkRecord* const chunk_record;   // Addr of ChunkRecord within |data_|.
328     const uid_t trusted_uid;           // uid of the producer.
329     uint8_t flags = 0;                 // See SharedMemoryABI::flags.
330     const uint16_t num_fragments = 0;  // Total number of packet fragments.
331     uint16_t num_fragments_read = 0;   // Number of fragments already read.
332 
333     // The start offset of the next fragment (the |num_fragments_read|-th) to be
334     // read. This is the offset in bytes from the beginning of the ChunkRecord's
335     // payload (the 1st fragment starts at |chunk_record| +
336     // sizeof(ChunkRecord)).
337     uint16_t cur_fragment_offset = 0;
338   };
339 
340   using ChunkMap = std::map<ChunkMeta::Key, ChunkMeta>;
341 
342   // Allows to iterate over a sub-sequence of |index_| for all keys belonging to
343   // the same {ProducerID,WriterID}. Furthermore takes into account the wrapping
344   // of ChunkID. Instances are valid only as long as the |index_| is not altered
345   // (can be used safely only between adjacent ReadNextTracePacket() calls).
346   // The order of the iteration will proceed in the following order:
347   // |wrapping_id| + 1 -> |seq_end|, |seq_begin| -> |wrapping_id|.
348   // Practical example:
349   // - Assume that kMaxChunkID == 7
350   // - Assume that we have all 8 chunks in the range (0..7).
351   // - Hence, |seq_begin| == c0, |seq_end| == c7
352   // - Assume |wrapping_id| = 4 (c4 is the last chunk copied over
353   //   through a CopyChunkUntrusted()).
354   // The resulting iteration order will be: c5, c6, c7, c0, c1, c2, c3, c4.
355   struct SequenceIterator {
356     // Points to the 1st key (the one with the numerically min ChunkID).
357     ChunkMap::iterator seq_begin;
358 
359     // Points one past the last key (the one with the numerically max ChunkID).
360     ChunkMap::iterator seq_end;
361 
362     // Current iterator, always >= seq_begin && <= seq_end.
363     ChunkMap::iterator cur;
364 
365     // The latest ChunkID written. Determines the start/end of the sequence.
366     ChunkID wrapping_id;
367 
is_validSequenceIterator368     bool is_valid() const { return cur != seq_end; }
369 
producer_idSequenceIterator370     ProducerID producer_id() const {
371       PERFETTO_DCHECK(is_valid());
372       return cur->first.producer_id;
373     }
374 
writer_idSequenceIterator375     WriterID writer_id() const {
376       PERFETTO_DCHECK(is_valid());
377       return cur->first.writer_id;
378     }
379 
chunk_idSequenceIterator380     ChunkID chunk_id() const {
381       PERFETTO_DCHECK(is_valid());
382       return cur->first.chunk_id;
383     }
384 
385     ChunkMeta& operator*() {
386       PERFETTO_DCHECK(is_valid());
387       return cur->second;
388     }
389 
390     // Moves |cur| to the next chunk in the index.
391     // is_valid() will become false after calling this, if this was the last
392     // entry of the sequence.
393     void MoveNext();
394 
MoveToEndSequenceIterator395     void MoveToEnd() { cur = seq_end; }
396   };
397 
398   enum class ReadAheadResult {
399     kSucceededReturnSlices,
400     kFailedMoveToNextSequence,
401     kFailedStayOnSameSequence,
402   };
403 
404   TraceBuffer();
405   TraceBuffer(const TraceBuffer&) = delete;
406   TraceBuffer& operator=(const TraceBuffer&) = delete;
407 
408   bool Initialize(size_t size);
409 
410   // Returns an object that allows to iterate over chunks in the |index_| that
411   // have the same {ProducerID, WriterID} of
412   // |seq_begin.first.{producer,writer}_id|. |seq_begin| must be an iterator to
413   // the first entry in the |index_| that has a different {ProducerID, WriterID}
414   // from the previous one. It is valid for |seq_begin| to be == index_.end()
415   // (i.e. if the index is empty). The iteration takes care of ChunkID wrapping,
416   // by using |last_chunk_id_|.
417   SequenceIterator GetReadIterForSequence(ChunkMap::iterator seq_begin);
418 
419   // Used as a last resort when a buffer corruption is detected.
420   void ClearContentsAndResetRWCursors();
421 
422   // Adds a padding record of the given size (must be a multiple of
423   // sizeof(ChunkRecord)).
424   void AddPaddingRecord(size_t);
425 
426   // Look for contiguous fragment of the same packet starting from |read_iter_|.
427   // If a contiguous packet is found, all the fragments are pushed into
428   // TracePacket and the function returns kSucceededReturnSlices. If not, the
429   // function returns either kFailedMoveToNextSequence or
430   // kFailedStayOnSameSequence, telling the caller to continue looking for
431   // packets.
432   ReadAheadResult ReadAhead(TracePacket*);
433 
434   // Deletes (by marking the record invalid and removing form the index) all
435   // chunks from |wptr_| to |wptr_| + |bytes_to_clear|. Returns the size of the
436   // gap left between the next valid Chunk and the end of the deletion range, or
437   // 0 if such next valid chunk doesn't exist (if the buffer is still zeroed).
438   // Graphically, assume the initial situation is the following (|wptr_| = 10).
439   // |0        |10 (wptr_)       |30       |40                 |60
440   // +---------+-----------------+---------+-------------------+---------+
441   // | Chunk 1 | Chunk 2         | Chunk 3 | Chunk 4           | Chunk 5 |
442   // +---------+-----------------+---------+-------------------+---------+
443   //           |_________Deletion range_______|~~return value~~|
444   //
445   // A call to DeleteNextChunksFor(32) will remove chunks 2,3,4 and return 18
446   // (60 - 42), the distance between chunk 5 and the end of the deletion range.
447   size_t DeleteNextChunksFor(size_t bytes_to_clear);
448 
449   // Decodes the boundaries of the next packet (or a fragment) pointed by
450   // ChunkMeta and pushes that into |TracePacket|. It also increments the
451   // |num_fragments_read| counter.
452   // TracePacket can be nullptr, in which case the read state is still advanced.
453   // When TracePacket is not nullptr, ProducerID must also be not null and will
454   // be updated with the ProducerID that originally wrote the chunk.
455   bool ReadNextPacketInChunk(ChunkMeta*, TracePacket*);
456 
DcheckIsAlignedAndWithinBounds(const uint8_t * ptr)457   void DcheckIsAlignedAndWithinBounds(const uint8_t* ptr) const {
458     PERFETTO_DCHECK(ptr >= begin() && ptr <= end() - sizeof(ChunkRecord));
459     PERFETTO_DCHECK(
460         (reinterpret_cast<uintptr_t>(ptr) & (alignof(ChunkRecord) - 1)) == 0);
461   }
462 
GetChunkRecordAt(uint8_t * ptr)463   ChunkRecord* GetChunkRecordAt(uint8_t* ptr) const {
464     DcheckIsAlignedAndWithinBounds(ptr);
465     return reinterpret_cast<ChunkRecord*>(ptr);
466   }
467 
468   // |src| can be nullptr (in which case |size| must be ==
469   // record.size - sizeof(ChunkRecord)), for the case of writing a padding
470   // record. |wptr_| is NOT advanced by this function, the caller must do that.
WriteChunkRecord(const ChunkRecord & record,const uint8_t * src,size_t size)471   void WriteChunkRecord(const ChunkRecord& record,
472                         const uint8_t* src,
473                         size_t size) {
474     // Note: |record.size| will be slightly bigger than |size| because of the
475     // ChunkRecord header and rounding, to ensure that all ChunkRecord(s) are
476     // multiple of sizeof(ChunkRecord). The invariant is:
477     // record.size >= |size| + sizeof(ChunkRecord) (== if no rounding).
478     PERFETTO_DCHECK(size <= ChunkRecord::kMaxSize);
479     PERFETTO_DCHECK(record.size >= sizeof(record));
480     PERFETTO_DCHECK(record.size % sizeof(record) == 0);
481     PERFETTO_DCHECK(record.size >= size + sizeof(record));
482     PERFETTO_CHECK(record.size <= size_to_end());
483     DcheckIsAlignedAndWithinBounds(wptr_);
484 
485     // Deliberately not a *D*CHECK.
486     PERFETTO_CHECK(wptr_ + sizeof(record) + size <= end());
487     memcpy(wptr_, &record, sizeof(record));
488     if (PERFETTO_LIKELY(src)) {
489       memcpy(wptr_ + sizeof(record), src, size);
490     } else {
491       PERFETTO_DCHECK(size == record.size - sizeof(record));
492     }
493     const size_t rounding_size = record.size - sizeof(record) - size;
494     memset(wptr_ + sizeof(record) + size, 0, rounding_size);
495   }
496 
begin()497   uint8_t* begin() const { return reinterpret_cast<uint8_t*>(data_.get()); }
end()498   uint8_t* end() const { return begin() + size_; }
size_to_end()499   size_t size_to_end() const { return static_cast<size_t>(end() - wptr_); }
500 
501   base::PageAllocator::UniquePtr data_;
502   size_t size_ = 0;            // Size in bytes of |data_|.
503   size_t max_chunk_size_ = 0;  // Max size in bytes allowed for a chunk.
504   uint8_t* wptr_ = nullptr;    // Write pointer.
505 
506   // An index that keeps track of the positions and metadata of each
507   // ChunkRecord.
508   ChunkMap index_;
509 
510   // Read iterator used for ReadNext(). It is reset by calling BeginRead().
511   // It becomes invalid after any call to methods that alters the |index_|.
512   SequenceIterator read_iter_;
513 
514   // Keeps track of the last ChunkID written for a given writer.
515   // TODO(primiano): should clean up keys from this map. Right now this map
516   // grows without bounds (although realistically is not a problem unless we
517   // have too many producers/writers within the same trace session).
518   std::map<std::pair<ProducerID, WriterID>, ChunkID> last_chunk_id_;
519 
520   // Statistics about buffer usage.
521   Stats stats_;
522 
523 #if PERFETTO_DCHECK_IS_ON()
524   bool changed_since_last_read_ = false;
525 #endif
526 
527   // When true disable some DCHECKs that have been put in place to detect
528   // bugs in the producers. This is for tests that feed malicious inputs and
529   // hence mimic a buggy producer.
530   bool suppress_sanity_dchecks_for_testing_ = false;
531 };
532 
533 }  // namespace perfetto
534 
535 #endif  // SRC_TRACING_CORE_TRACE_BUFFER_H_
536