1 /* 2 * Copyright (C) 2018 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef SRC_TRACING_CORE_TRACE_BUFFER_H_ 18 #define SRC_TRACING_CORE_TRACE_BUFFER_H_ 19 20 #include <stdint.h> 21 #include <string.h> 22 23 #include <array> 24 #include <limits> 25 #include <map> 26 #include <tuple> 27 28 #include "perfetto/base/logging.h" 29 #include "perfetto/ext/base/paged_memory.h" 30 #include "perfetto/ext/base/thread_annotations.h" 31 #include "perfetto/ext/base/utils.h" 32 #include "perfetto/ext/tracing/core/basic_types.h" 33 #include "perfetto/ext/tracing/core/slice.h" 34 #include "perfetto/ext/tracing/core/trace_stats.h" 35 36 namespace perfetto { 37 38 class TracePacket; 39 40 // The main buffer, owned by the tracing service, where all the trace data is 41 // ultimately stored into. The service will own several instances of this class, 42 // at least one per active consumer (as defined in the |buffers| section of 43 // trace_config.proto) and will copy chunks from the producer's shared memory 44 // buffers into here when a CommitData IPC is received. 45 // 46 // Writing into the buffer 47 // ----------------------- 48 // Data is copied from the SMB(s) using CopyChunkUntrusted(). The buffer will 49 // hence contain data coming from different producers and different writer 50 // sequences, more specifically: 51 // - The service receives data by several producer(s), identified by their ID. 52 // - Each producer writes several sequences identified by the same WriterID. 53 // (they correspond to TraceWriter instances in the producer). 54 // - Each Writer writes, in order, several chunks. 55 // - Each chunk contains zero, one, or more TracePacket(s), or even just 56 // fragments of packets (when they span across several chunks). 57 // 58 // So at any point in time, the buffer will contain a variable number of logical 59 // sequences identified by the {ProducerID, WriterID} tuple. Any given chunk 60 // will only contain packets (or fragments) belonging to the same sequence. 61 // 62 // The buffer operates by default as a ring buffer. 63 // It has two overwrite policies: 64 // 1. kOverwrite (default): if the write pointer reaches the read pointer, old 65 // unread chunks will be overwritten by new chunks. 66 // 2. kDiscard: if the write pointer reaches the read pointer, unread chunks 67 // are preserved and the new chunks are discarded. Any future write becomes 68 // a no-op, even if the reader manages to fully catch up. This is because 69 // once a chunk is discarded, the sequence of packets is broken and trying 70 // to recover would be too hard (also due to the fact that, at the same 71 // time, we allow out-of-order commits and chunk re-writes). 72 // 73 // Chunks are (over)written in the same order of the CopyChunkUntrusted() calls. 74 // When overwriting old content, entire chunks are overwritten or clobbered. 75 // The buffer never leaves a partial chunk around. Chunks' payload is copied 76 // as-is, but their header is not and is repacked in order to keep the 77 // ProducerID around. 78 // 79 // Chunks are stored in the buffer next to each other. Each chunk is prefixed by 80 // an inline header (ChunkRecord), which contains most of the fields of the 81 // SharedMemoryABI ChunkHeader + the ProducerID + the size of the payload. 82 // It's a conventional binary object stream essentially, where each ChunkRecord 83 // tells where it ends and hence where to find the next one, like this: 84 // 85 // .-------------------------. 16 byte boundary 86 // | ChunkRecord: 16 bytes | 87 // | - chunk id: 4 bytes | 88 // | - producer id: 2 bytes | 89 // | - writer id: 2 bytes | 90 // | - #fragments: 2 bytes | 91 // +-----+ - record size: 2 bytes | 92 // | | - flags+pad: 4 bytes | 93 // | +-------------------------+ 94 // | | | 95 // | : Chunk payload : 96 // | | | 97 // | +-------------------------+ 98 // | | Optional padding | 99 // +---> +-------------------------+ 16 byte boundary 100 // | ChunkRecord | 101 // : : 102 // Chunks stored in the buffer are always rounded up to 16 bytes (that is 103 // sizeof(ChunkRecord)), in order to avoid further inner fragmentation. 104 // Special "padding" chunks can be put in the buffer, e.g. in the case when we 105 // try to write a chunk of size N while the write pointer is at the end of the 106 // buffer, but the write pointer is < N bytes from the end (and hence needs to 107 // wrap over). 108 // Because of this, the buffer is self-describing: the contents of the buffer 109 // can be reconstructed by just looking at the buffer content (this will be 110 // quite useful in future to recover the buffer from crash reports). 111 // 112 // However, in order to keep some operations (patching and reading) fast, a 113 // lookaside index is maintained (in |index_|), keeping each chunk in the buffer 114 // indexed by their {ProducerID, WriterID, ChunkID} tuple. 115 // 116 // Patching data out-of-band 117 // ------------------------- 118 // This buffer also supports patching chunks' payload out-of-band, after they 119 // have been stored. This is to allow producers to backfill the "size" fields 120 // of the protos that spawn across several chunks, when the previous chunks are 121 // returned to the service. The MaybePatchChunkContents() deals with the fact 122 // that a chunk might have been lost (because of wrapping) by the time the OOB 123 // IPC comes. 124 // 125 // Reading from the buffer 126 // ----------------------- 127 // This class supports one reader only (the consumer). Reads are NOT idempotent 128 // as they move the read cursors around. Reading back the buffer is the most 129 // conceptually complex part. The ReadNextTracePacket() method operates with 130 // whole packet granularity. Packets are returned only when all their fragments 131 // are available. 132 // This class takes care of: 133 // - Gluing packets within the same sequence, even if they are not stored 134 // adjacently in the buffer. 135 // - Re-ordering chunks within a sequence (using the ChunkID, which wraps). 136 // - Detecting holes in packet fragments (because of loss of chunks). 137 // Reads guarantee that packets for the same sequence are read in FIFO order 138 // (according to their ChunkID), but don't give any guarantee about the read 139 // order of packets from different sequences, see comments in 140 // ReadNextTracePacket() below. 141 class TraceBuffer { 142 public: 143 static const size_t InlineChunkHeaderSize; // For test/fake_packet.{cc,h}. 144 145 // See comment in the header above. 146 enum OverwritePolicy { kOverwrite, kDiscard }; 147 148 // Argument for out-of-band patches applied through TryPatchChunkContents(). 149 struct Patch { 150 // From SharedMemoryABI::kPacketHeaderSize. 151 static constexpr size_t kSize = 4; 152 153 size_t offset_untrusted; 154 std::array<uint8_t, kSize> data; 155 }; 156 157 // Identifiers that are constant for a packet sequence. 158 struct PacketSequenceProperties { 159 ProducerID producer_id_trusted; 160 uid_t producer_uid_trusted; 161 WriterID writer_id; 162 }; 163 164 // Can return nullptr if the memory allocation fails. 165 static std::unique_ptr<TraceBuffer> Create(size_t size_in_bytes, 166 OverwritePolicy = kOverwrite); 167 168 ~TraceBuffer(); 169 170 // Copies a Chunk from a producer Shared Memory Buffer into the trace buffer. 171 // |src| points to the first packet in the SharedMemoryABI's chunk shared with 172 // an untrusted producer. "untrusted" here means: the producer might be 173 // malicious and might change |src| concurrently while we read it (internally 174 // this method memcpy()-s first the chunk before processing it). None of the 175 // arguments should be trusted, unless otherwise stated. We can trust that 176 // |src| points to a valid memory area, but not its contents. 177 // 178 // This method may be called multiple times for the same chunk. In this case, 179 // the original chunk's payload will be overridden and its number of fragments 180 // and flags adjusted to match |num_fragments| and |chunk_flags|. The service 181 // may use this to insert partial chunks (|chunk_complete = false|) before the 182 // producer has committed them. 183 // 184 // If |chunk_complete| is |false|, the TraceBuffer will only consider the 185 // first |num_fragments - 1| packets to be complete, since the producer may 186 // not have finished writing the latest packet. Reading from a sequence will 187 // also not progress past any incomplete chunks until they were rewritten with 188 // |chunk_complete = true|, e.g. after a producer's commit. 189 // 190 // TODO(eseckler): Pass in a PacketStreamProperties instead of individual IDs. 191 void CopyChunkUntrusted(ProducerID producer_id_trusted, 192 uid_t producer_uid_trusted, 193 WriterID writer_id, 194 ChunkID chunk_id, 195 uint16_t num_fragments, 196 uint8_t chunk_flags, 197 bool chunk_complete, 198 const uint8_t* src, 199 size_t size); 200 // Applies a batch of |patches| to the given chunk, if the given chunk is 201 // still in the buffer. Does nothing if the given ChunkID is gone. 202 // Returns true if the chunk has been found and patched, false otherwise. 203 // |other_patches_pending| is used to determine whether this is the only 204 // batch of patches for the chunk or there is more. 205 // If |other_patches_pending| == false, the chunk is marked as ready to be 206 // consumed. If true, the state of the chunk is not altered. 207 bool TryPatchChunkContents(ProducerID, 208 WriterID, 209 ChunkID, 210 const Patch* patches, 211 size_t patches_size, 212 bool other_patches_pending); 213 214 // To read the contents of the buffer the caller needs to: 215 // BeginRead() 216 // while (ReadNextTracePacket(packet_fragments)) { ... } 217 // No other calls to any other method should be interleaved between 218 // BeginRead() and ReadNextTracePacket(). 219 // Reads in the TraceBuffer are NOT idempotent. 220 void BeginRead(); 221 222 // Returns the next packet in the buffer, if any, and the producer_id, 223 // producer_uid, and writer_id of the producer/writer that wrote it (as passed 224 // in the CopyChunkUntrusted() call). Returns false if no packets can be read 225 // at this point. If a packet was read successfully, 226 // |previous_packet_on_sequence_dropped| is set to |true| if the previous 227 // packet on the sequence was dropped from the buffer before it could be read 228 // (e.g. because its chunk was overridden due to the ring buffer wrapping or 229 // due to an ABI violation), and to |false| otherwise. 230 // 231 // This function returns only complete packets. Specifically: 232 // When there is at least one complete packet in the buffer, this function 233 // returns true and populates the TracePacket argument with the boundaries of 234 // each fragment for one packet. 235 // TracePacket will have at least one slice when this function returns true. 236 // When there are no whole packets eligible to read (e.g. we are still missing 237 // fragments) this function returns false. 238 // This function guarantees also that packets for a given 239 // {ProducerID, WriterID} are read in FIFO order. 240 // This function does not guarantee any ordering w.r.t. packets belonging to 241 // different WriterID(s). For instance, given the following packets copied 242 // into the buffer: 243 // {ProducerID: 1, WriterID: 1}: P1 P2 P3 244 // {ProducerID: 1, WriterID: 2}: P4 P5 P6 245 // {ProducerID: 2, WriterID: 1}: P7 P8 P9 246 // The following read sequence is possible: 247 // P1, P4, P7, P2, P3, P5, P8, P9, P6 248 // But the following is guaranteed to NOT happen: 249 // P1, P5, P7, P4 (P4 cannot come after P5) 250 bool ReadNextTracePacket(TracePacket*, 251 PacketSequenceProperties* sequence_properties, 252 bool* previous_packet_on_sequence_dropped); 253 stats()254 const TraceStats::BufferStats& stats() const { return stats_; } size()255 size_t size() const { return size_; } 256 257 private: 258 friend class TraceBufferTest; 259 260 // ChunkRecord is a Chunk header stored inline in the |data_| buffer, before 261 // the chunk payload (the packets' data). The |data_| buffer looks like this: 262 // +---------------+------------------++---------------+-----------------+ 263 // | ChunkRecord 1 | Chunk payload 1 || ChunkRecord 2 | Chunk payload 2 | ... 264 // +---------------+------------------++---------------+-----------------+ 265 // Most of the ChunkRecord fields are copied from SharedMemoryABI::ChunkHeader 266 // (the chunk header used in the shared memory buffers). 267 // A ChunkRecord can be a special "padding" record. In this case its payload 268 // should be ignored and the record should be just skipped. 269 // 270 // Full page move optimization: 271 // This struct has to be exactly (sizeof(PageHeader) + sizeof(ChunkHeader)) 272 // (from shared_memory_abi.h) to allow full page move optimizations 273 // (TODO(primiano): not implemented yet). In the special case of moving a full 274 // 4k page that contains only one chunk, in fact, we can just ask the kernel 275 // to move the full SHM page (see SPLICE_F_{GIFT,MOVE}) and overlay the 276 // ChunkRecord on top of the moved SMB's header (page + chunk header). 277 // This special requirement is covered by static_assert(s) in the .cc file. 278 struct ChunkRecord { ChunkRecordChunkRecord279 explicit ChunkRecord(size_t sz) : flags{0}, is_padding{0} { 280 PERFETTO_DCHECK(sz >= sizeof(ChunkRecord) && 281 sz % sizeof(ChunkRecord) == 0 && sz <= kMaxSize); 282 size = static_cast<decltype(size)>(sz); 283 } 284 is_validChunkRecord285 bool is_valid() const { return size != 0; } 286 287 // Keep this structure packed and exactly 16 bytes (128 bits) big. 288 289 // [32 bits] Monotonic counter within the same writer_id. 290 ChunkID chunk_id = 0; 291 292 // [16 bits] ID of the Producer from which the Chunk was copied from. 293 ProducerID producer_id = 0; 294 295 // [16 bits] Unique per Producer (but not within the service). 296 // If writer_id == kWriterIdPadding the record should just be skipped. 297 WriterID writer_id = 0; 298 299 // Number of fragments contained in the chunk. 300 uint16_t num_fragments = 0; 301 302 // Size in bytes, including sizeof(ChunkRecord) itself. 303 uint16_t size; 304 305 uint8_t flags : 6; // See SharedMemoryABI::ChunkHeader::flags. 306 uint8_t is_padding : 1; 307 uint8_t unused_flag : 1; 308 309 // Not strictly needed, can be reused for more fields in the future. But 310 // right now helps to spot chunks in hex dumps. 311 char unused[3] = {'C', 'H', 'U'}; 312 313 static constexpr size_t kMaxSize = 314 std::numeric_limits<decltype(size)>::max(); 315 }; 316 317 // Lookaside index entry. This serves two purposes: 318 // 1) Allow a fast lookup of ChunkRecord by their ID (the tuple 319 // {ProducerID, WriterID, ChunkID}). This is used when applying out-of-band 320 // patches to the contents of the chunks after they have been copied into 321 // the TraceBuffer. 322 // 2) keep the chunks ordered by their ID. This is used when reading back. 323 // 3) Keep metadata about the status of the chunk, e.g. whether the contents 324 // have been read already and should be skipped in a future read pass. 325 // This struct should not have any field that is essential for reconstructing 326 // the contents of the buffer from a crash dump. 327 struct ChunkMeta { 328 // Key used for sorting in the map. 329 struct Key { KeyChunkMeta::Key330 Key(ProducerID p, WriterID w, ChunkID c) 331 : producer_id{p}, writer_id{w}, chunk_id{c} {} 332 KeyChunkMeta::Key333 explicit Key(const ChunkRecord& cr) 334 : Key(cr.producer_id, cr.writer_id, cr.chunk_id) {} 335 336 // Note that this sorting doesn't keep into account the fact that ChunkID 337 // will wrap over at some point. The extra logic in SequenceIterator deals 338 // with that. 339 bool operator<(const Key& other) const { 340 return std::tie(producer_id, writer_id, chunk_id) < 341 std::tie(other.producer_id, other.writer_id, other.chunk_id); 342 } 343 344 bool operator==(const Key& other) const { 345 return std::tie(producer_id, writer_id, chunk_id) == 346 std::tie(other.producer_id, other.writer_id, other.chunk_id); 347 } 348 349 bool operator!=(const Key& other) const { return !(*this == other); } 350 351 // These fields should match at all times the corresponding fields in 352 // the |chunk_record|. They are copied here purely for efficiency to avoid 353 // dereferencing the buffer all the time. 354 ProducerID producer_id; 355 WriterID writer_id; 356 ChunkID chunk_id; 357 }; 358 359 enum IndexFlags : uint8_t { 360 // If set, the chunk state was kChunkComplete at the time it was copied. 361 // If unset, the chunk was still kChunkBeingWritten while copied. When 362 // reading from the chunk's sequence, the sequence will not advance past 363 // this chunk until this flag is set. 364 kComplete = 1 << 0, 365 366 // If set, we skipped the last packet that we read from this chunk e.g. 367 // because we it was a continuation from a previous chunk that was dropped 368 // or due to an ABI violation. 369 kLastReadPacketSkipped = 1 << 1 370 }; 371 ChunkMetaChunkMeta372 ChunkMeta(ChunkRecord* r, uint16_t p, bool complete, uint8_t f, uid_t u) 373 : chunk_record{r}, trusted_uid{u}, flags{f}, num_fragments{p} { 374 if (complete) 375 index_flags = kComplete; 376 } 377 is_completeChunkMeta378 bool is_complete() const { return index_flags & kComplete; } 379 set_completeChunkMeta380 void set_complete(bool complete) { 381 if (complete) { 382 index_flags |= kComplete; 383 } else { 384 index_flags &= ~kComplete; 385 } 386 } 387 last_read_packet_skippedChunkMeta388 bool last_read_packet_skipped() const { 389 return index_flags & kLastReadPacketSkipped; 390 } 391 set_last_read_packet_skippedChunkMeta392 void set_last_read_packet_skipped(bool skipped) { 393 if (skipped) { 394 index_flags |= kLastReadPacketSkipped; 395 } else { 396 index_flags &= ~kLastReadPacketSkipped; 397 } 398 } 399 400 ChunkRecord* const chunk_record; // Addr of ChunkRecord within |data_|. 401 const uid_t trusted_uid; // uid of the producer. 402 403 // Flags set by TraceBuffer to track the state of the chunk in the index. 404 uint8_t index_flags = 0; 405 406 // Correspond to |chunk_record->flags| and |chunk_record->num_fragments|. 407 // Copied here for performance reasons (avoids having to dereference 408 // |chunk_record| while iterating over ChunkMeta) and to aid debugging in 409 // case the buffer gets corrupted. 410 uint8_t flags = 0; // See SharedMemoryABI::ChunkHeader::flags. 411 uint16_t num_fragments = 0; // Total number of packet fragments. 412 413 uint16_t num_fragments_read = 0; // Number of fragments already read. 414 415 // The start offset of the next fragment (the |num_fragments_read|-th) to be 416 // read. This is the offset in bytes from the beginning of the ChunkRecord's 417 // payload (the 1st fragment starts at |chunk_record| + 418 // sizeof(ChunkRecord)). 419 uint16_t cur_fragment_offset = 0; 420 }; 421 422 using ChunkMap = std::map<ChunkMeta::Key, ChunkMeta>; 423 424 // Allows to iterate over a sub-sequence of |index_| for all keys belonging to 425 // the same {ProducerID,WriterID}. Furthermore takes into account the wrapping 426 // of ChunkID. Instances are valid only as long as the |index_| is not altered 427 // (can be used safely only between adjacent ReadNextTracePacket() calls). 428 // The order of the iteration will proceed in the following order: 429 // |wrapping_id| + 1 -> |seq_end|, |seq_begin| -> |wrapping_id|. 430 // Practical example: 431 // - Assume that kMaxChunkID == 7 432 // - Assume that we have all 8 chunks in the range (0..7). 433 // - Hence, |seq_begin| == c0, |seq_end| == c7 434 // - Assume |wrapping_id| = 4 (c4 is the last chunk copied over 435 // through a CopyChunkUntrusted()). 436 // The resulting iteration order will be: c5, c6, c7, c0, c1, c2, c3, c4. 437 struct SequenceIterator { 438 // Points to the 1st key (the one with the numerically min ChunkID). 439 ChunkMap::iterator seq_begin; 440 441 // Points one past the last key (the one with the numerically max ChunkID). 442 ChunkMap::iterator seq_end; 443 444 // Current iterator, always >= seq_begin && <= seq_end. 445 ChunkMap::iterator cur; 446 447 // The latest ChunkID written. Determines the start/end of the sequence. 448 ChunkID wrapping_id; 449 is_validSequenceIterator450 bool is_valid() const { return cur != seq_end; } 451 producer_idSequenceIterator452 ProducerID producer_id() const { 453 PERFETTO_DCHECK(is_valid()); 454 return cur->first.producer_id; 455 } 456 writer_idSequenceIterator457 WriterID writer_id() const { 458 PERFETTO_DCHECK(is_valid()); 459 return cur->first.writer_id; 460 } 461 chunk_idSequenceIterator462 ChunkID chunk_id() const { 463 PERFETTO_DCHECK(is_valid()); 464 return cur->first.chunk_id; 465 } 466 467 ChunkMeta& operator*() { 468 PERFETTO_DCHECK(is_valid()); 469 return cur->second; 470 } 471 472 // Moves |cur| to the next chunk in the index. 473 // is_valid() will become false after calling this, if this was the last 474 // entry of the sequence. 475 void MoveNext(); 476 MoveToEndSequenceIterator477 void MoveToEnd() { cur = seq_end; } 478 }; 479 480 enum class ReadAheadResult { 481 kSucceededReturnSlices, 482 kFailedMoveToNextSequence, 483 kFailedStayOnSameSequence, 484 }; 485 486 enum class ReadPacketResult { 487 kSucceeded, 488 kFailedInvalidPacket, 489 kFailedEmptyPacket, 490 }; 491 492 explicit TraceBuffer(OverwritePolicy); 493 TraceBuffer(const TraceBuffer&) = delete; 494 TraceBuffer& operator=(const TraceBuffer&) = delete; 495 496 bool Initialize(size_t size); 497 498 // Returns an object that allows to iterate over chunks in the |index_| that 499 // have the same {ProducerID, WriterID} of 500 // |seq_begin.first.{producer,writer}_id|. |seq_begin| must be an iterator to 501 // the first entry in the |index_| that has a different {ProducerID, WriterID} 502 // from the previous one. It is valid for |seq_begin| to be == index_.end() 503 // (i.e. if the index is empty). The iteration takes care of ChunkID wrapping, 504 // by using |last_chunk_id_|. 505 SequenceIterator GetReadIterForSequence(ChunkMap::iterator seq_begin); 506 507 // Used as a last resort when a buffer corruption is detected. 508 void ClearContentsAndResetRWCursors(); 509 510 // Adds a padding record of the given size (must be a multiple of 511 // sizeof(ChunkRecord)). 512 void AddPaddingRecord(size_t); 513 514 // Look for contiguous fragment of the same packet starting from |read_iter_|. 515 // If a contiguous packet is found, all the fragments are pushed into 516 // TracePacket and the function returns kSucceededReturnSlices. If not, the 517 // function returns either kFailedMoveToNextSequence or 518 // kFailedStayOnSameSequence, telling the caller to continue looking for 519 // packets. 520 ReadAheadResult ReadAhead(TracePacket*); 521 522 // Deletes (by marking the record invalid and removing form the index) all 523 // chunks from |wptr_| to |wptr_| + |bytes_to_clear|. 524 // Returns: 525 // * The size of the gap left between the next valid Chunk and the end of 526 // the deletion range. 527 // * 0 if no next valid chunk exists (if the buffer is still zeroed). 528 // * -1 if the buffer |overwrite_policy_| == kDiscard and the deletion would 529 // cause unread chunks to be overwritten. In this case the buffer is left 530 // untouched. 531 // Graphically, assume the initial situation is the following (|wptr_| = 10). 532 // |0 |10 (wptr_) |30 |40 |60 533 // +---------+-----------------+---------+-------------------+---------+ 534 // | Chunk 1 | Chunk 2 | Chunk 3 | Chunk 4 | Chunk 5 | 535 // +---------+-----------------+---------+-------------------+---------+ 536 // |_________Deletion range_______|~~return value~~| 537 // 538 // A call to DeleteNextChunksFor(32) will remove chunks 2,3,4 and return 18 539 // (60 - 42), the distance between chunk 5 and the end of the deletion range. 540 ssize_t DeleteNextChunksFor(size_t bytes_to_clear); 541 542 // Decodes the boundaries of the next packet (or a fragment) pointed by 543 // ChunkMeta and pushes that into |TracePacket|. It also increments the 544 // |num_fragments_read| counter. 545 // TracePacket can be nullptr, in which case the read state is still advanced. 546 // When TracePacket is not nullptr, ProducerID must also be not null and will 547 // be updated with the ProducerID that originally wrote the chunk. 548 ReadPacketResult ReadNextPacketInChunk(ChunkMeta*, TracePacket*); 549 DcheckIsAlignedAndWithinBounds(const uint8_t * ptr)550 void DcheckIsAlignedAndWithinBounds(const uint8_t* ptr) const { 551 PERFETTO_DCHECK(ptr >= begin() && ptr <= end() - sizeof(ChunkRecord)); 552 PERFETTO_DCHECK( 553 (reinterpret_cast<uintptr_t>(ptr) & (alignof(ChunkRecord) - 1)) == 0); 554 } 555 GetChunkRecordAt(uint8_t * ptr)556 ChunkRecord* GetChunkRecordAt(uint8_t* ptr) { 557 DcheckIsAlignedAndWithinBounds(ptr); 558 // We may be accessing a new (empty) record. 559 data_.EnsureCommitted( 560 static_cast<size_t>(ptr + sizeof(ChunkRecord) - begin())); 561 return reinterpret_cast<ChunkRecord*>(ptr); 562 } 563 564 void DiscardWrite(); 565 566 // |src| can be nullptr (in which case |size| must be == 567 // record.size - sizeof(ChunkRecord)), for the case of writing a padding 568 // record. |wptr_| is NOT advanced by this function, the caller must do that. WriteChunkRecord(uint8_t * wptr,const ChunkRecord & record,const uint8_t * src,size_t size)569 void WriteChunkRecord(uint8_t* wptr, 570 const ChunkRecord& record, 571 const uint8_t* src, 572 size_t size) { 573 // Note: |record.size| will be slightly bigger than |size| because of the 574 // ChunkRecord header and rounding, to ensure that all ChunkRecord(s) are 575 // multiple of sizeof(ChunkRecord). The invariant is: 576 // record.size >= |size| + sizeof(ChunkRecord) (== if no rounding). 577 PERFETTO_DCHECK(size <= ChunkRecord::kMaxSize); 578 PERFETTO_DCHECK(record.size >= sizeof(record)); 579 PERFETTO_DCHECK(record.size % sizeof(record) == 0); 580 PERFETTO_DCHECK(record.size >= size + sizeof(record)); 581 PERFETTO_CHECK(record.size <= size_to_end()); 582 DcheckIsAlignedAndWithinBounds(wptr); 583 584 // We may be writing to this area for the first time. 585 data_.EnsureCommitted(static_cast<size_t>(wptr + record.size - begin())); 586 587 // Deliberately not a *D*CHECK. 588 PERFETTO_CHECK(wptr + sizeof(record) + size <= end()); 589 memcpy(wptr, &record, sizeof(record)); 590 if (PERFETTO_LIKELY(src)) { 591 // If the producer modifies the data in the shared memory buffer while we 592 // are copying it to the central buffer, TSAN will (rightfully) flag that 593 // as a race. However the entire purpose of copying the data into the 594 // central buffer is that we can validate it without worrying that the 595 // producer changes it from under our feet, so this race is benign. The 596 // alternative would be to try computing which part of the buffer is safe 597 // to read (assuming a well-behaving client), but the risk of introducing 598 // a bug that way outweighs the benefit. 599 PERFETTO_ANNOTATE_BENIGN_RACE_SIZED( 600 src, size, "Benign race when copying chunk from shared memory.") 601 memcpy(wptr + sizeof(record), src, size); 602 } else { 603 PERFETTO_DCHECK(size == record.size - sizeof(record)); 604 } 605 const size_t rounding_size = record.size - sizeof(record) - size; 606 memset(wptr + sizeof(record) + size, 0, rounding_size); 607 } 608 begin()609 uint8_t* begin() const { return reinterpret_cast<uint8_t*>(data_.Get()); } end()610 uint8_t* end() const { return begin() + size_; } size_to_end()611 size_t size_to_end() const { return static_cast<size_t>(end() - wptr_); } 612 613 base::PagedMemory data_; 614 size_t size_ = 0; // Size in bytes of |data_|. 615 size_t max_chunk_size_ = 0; // Max size in bytes allowed for a chunk. 616 uint8_t* wptr_ = nullptr; // Write pointer. 617 618 // An index that keeps track of the positions and metadata of each 619 // ChunkRecord. 620 ChunkMap index_; 621 622 // Read iterator used for ReadNext(). It is reset by calling BeginRead(). 623 // It becomes invalid after any call to methods that alters the |index_|. 624 SequenceIterator read_iter_; 625 626 // See comments at the top of the file. 627 OverwritePolicy overwrite_policy_ = kOverwrite; 628 629 // Only used when |overwrite_policy_ == kDiscard|. This is set the first time 630 // a write fails because it would overwrite unread chunks. 631 bool discard_writes_ = false; 632 633 // Keeps track of the highest ChunkID written for a given sequence, taking 634 // into account a potential overflow of ChunkIDs. In the case of overflow, 635 // stores the highest ChunkID written since the overflow. 636 // 637 // TODO(primiano): should clean up keys from this map. Right now it grows 638 // without bounds (although realistically is not a problem unless we have too 639 // many producers/writers within the same trace session). 640 std::map<std::pair<ProducerID, WriterID>, ChunkID> last_chunk_id_written_; 641 642 // Statistics about buffer usage. 643 TraceStats::BufferStats stats_; 644 645 #if PERFETTO_DCHECK_IS_ON() 646 bool changed_since_last_read_ = false; 647 #endif 648 649 // When true disable some DCHECKs that have been put in place to detect 650 // bugs in the producers. This is for tests that feed malicious inputs and 651 // hence mimic a buggy producer. 652 bool suppress_sanity_dchecks_for_testing_ = false; 653 }; 654 655 } // namespace perfetto 656 657 #endif // SRC_TRACING_CORE_TRACE_BUFFER_H_ 658