1 /* 2 * Copyright (C) 2017 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef INCLUDE_PERFETTO_TRACING_CORE_SHARED_MEMORY_ABI_H_ 18 #define INCLUDE_PERFETTO_TRACING_CORE_SHARED_MEMORY_ABI_H_ 19 20 #include <stddef.h> 21 #include <stdint.h> 22 23 #include <array> 24 #include <atomic> 25 #include <bitset> 26 #include <thread> 27 #include <type_traits> 28 #include <utility> 29 30 #include "perfetto/base/logging.h" 31 32 namespace perfetto { 33 34 // This file defines the binary interface of the memory buffers shared between 35 // Producer and Service. This is a long-term stable ABI and has to be backwards 36 // compatible to deal with mismatching Producer and Service versions. 37 // 38 // Overview 39 // -------- 40 // SMB := "Shared Memory Buffer". 41 // In the most typical case of a multi-process architecture (i.e. Producer and 42 // Service are hosted by different processes), a Producer means almost always 43 // a "client process producing data" (almost: in some cases a process might host 44 // > 1 Producer, if it links two libraries, independent of each other, that both 45 // use Perfetto tracing). 46 // The Service has one SMB for each Producer. 47 // A producer has one or (typically) more data sources. They all share the same 48 // SMB. 49 // The SMB is a staging area to decouple data sources living in the Producer 50 // and allow them to do non-blocking async writes. 51 // The SMB is *not* the ultimate logging buffer seen by the Consumer. That one 52 // is larger (~MBs) and not shared with Producers. 53 // Each SMB is small, typically few KB. Its size is configurable by the producer 54 // within a max limit of ~MB (see kMaxShmSize in service_impl.cc). 55 // The SMB is partitioned into fixed-size Page(s). The size of the Pages are 56 // determined by each Producer at connection time and cannot be changed. 57 // Hence, different producers can have SMB(s) that have a different Page size 58 // from each other, but the page size will be constant throughout all the 59 // lifetime of the SMB. 60 // Page(s) are partitioned by the Producer into variable size Chunk(s): 61 // 62 // +------------+ +--------------------------+ 63 // | Producer 1 | <-> | SMB 1 [~32K - 1MB] | 64 // +------------+ +--------+--------+--------+ 65 // | Page | Page | Page | 66 // +--------+--------+--------+ 67 // | Chunk | | Chunk | 68 // +--------+ Chunk +--------+ <----+ 69 // | Chunk | | Chunk | | 70 // +--------+--------+--------+ +---------------------+ 71 // | Service | 72 // +------------+ +--------------------------+ +---------------------+ 73 // | Producer 2 | <-> | SMB 2 [~32K - 1MB] | /| large ring buffers | 74 // +------------+ +--------+--------+--------+ <--+ | (100K - several MB) | 75 // | Page | Page | Page | +---------------------+ 76 // +--------+--------+--------+ 77 // | Chunk | | Chunk | 78 // +--------+ Chunk +--------+ 79 // | Chunk | | Chunk | 80 // +--------+--------+--------+ 81 // 82 // * Sizes of both SMB and ring buffers are purely indicative and decided at 83 // configuration time by the Producer (for SMB sizes) and the Consumer (for the 84 // final ring buffer size). 85 86 // Page 87 // ---- 88 // A page is a portion of the shared memory buffer and defines the granularity 89 // of the interaction between the Producer and tracing Service. When scanning 90 // the shared memory buffer to determine if something should be moved to the 91 // central logging buffers, the Service most of the times looks at and moves 92 // whole pages. Similarly, the Producer sends an IPC to invite the Service to 93 // drain the shared memory buffer only when a whole page is filled. 94 // Having fixed the total SMB size (hence the total memory overhead), the page 95 // size is a triangular tradeoff between: 96 // 1) IPC traffic: smaller pages -> more IPCs. 97 // 2) Producer lock freedom: larger pages -> larger chunks -> data sources can 98 // write more data without needing to swap chunks and synchronize. 99 // 3) Risk of write-starving the SMB: larger pages -> higher chance that the 100 // Service won't manage to drain them and the SMB remains full. 101 // The page size, on the other side, has no implications on wasted memory due to 102 // fragmentations (see Chunk below). 103 // The size of the page is chosen by the Service at connection time and stays 104 // fixed throughout all the lifetime of the Producer. Different producers (i.e. 105 // ~ different client processes) can use different page sizes. 106 // The page size must be an integer multiple of 4k (this is to allow VM page 107 // stealing optimizations) and obviously has to be an integer divisor of the 108 // total SMB size. 109 110 // Chunk 111 // ----- 112 // A chunk is a portion of a Page which is written and handled by a Producer. 113 // A chunk contains a linear sequence of TracePacket(s) (the root proto). 114 // A chunk cannot be written concurrently by two data sources. Protobufs must be 115 // encoded as contiguous byte streams and cannot be interleaved. Therefore, on 116 // the Producer side, a chunk is almost always owned exclusively by one thread 117 // (% extremely peculiar slow-path cases). 118 // Chunks are essentially single-writer single-thread lock-free arenas. Locking 119 // happens only when a Chunk is full and a new one needs to be acquired. 120 // Locking happens only within the scope of a Producer process. There is no 121 // inter-process locking. The Producer cannot lock the Service and viceversa. 122 // In the worst case, any of the two can starve the SMB, by marking all chunks 123 // as either being read or written. But that has the only side effect of 124 // losing the trace data. 125 // The Producer can decide to partition each page into a number of limited 126 // configurations (e.g., 1 page == 1 chunk, 1 page == 2 chunks and so on). 127 128 // TracePacket 129 // ----------- 130 // Is the atom of tracing. Putting aside pages and chunks a trace is merely a 131 // sequence of TracePacket(s). TracePacket is the root protobuf message. 132 // A TracePacket can span across several chunks (hence even across several 133 // pages). A TracePacket can therefore be >> chunk size, >> page size and even 134 // >> SMB size. The Chunk header carries metadata to deal with the TracePacket 135 // splitting case. 136 137 // Use only explicitly-sized types below. DO NOT use size_t or any architecture 138 // dependent size (e.g. size_t) in the struct fields. This buffer will be read 139 // and written by processes that have a different bitness in the same OS. 140 // Instead it's fine to assume little-endianess. Big-endian is a dream we are 141 // not currently pursuing. 142 143 class SharedMemoryABI { 144 public: 145 // This is due to Chunk::size being 16 bits. 146 static constexpr size_t kMaxPageSize = 64 * 1024; 147 148 // "14" is the max number that can be encoded in a 32 bit atomic word using 149 // 2 state bits per Chunk and leaving 4 bits for the page layout. 150 // See PageLayout below. 151 static constexpr size_t kMaxChunksPerPage = 14; 152 153 // Each TracePacket in the Chunk is prefixed by a 4 bytes redundant VarInt 154 // (see proto_utils.h) stating its size. 155 static constexpr size_t kPacketHeaderSize = 4; 156 157 // Chunk states and transitions: 158 // kChunkFree <----------------+ 159 // | (Producer) | 160 // V | 161 // kChunkBeingWritten | 162 // | (Producer) | 163 // V | 164 // kChunkComplete | 165 // | (Service) | 166 // V | 167 // kChunkBeingRead | 168 // | (Service) | 169 // +------------------------+ 170 enum ChunkState : uint32_t { 171 // The Chunk is free. The Service shall never touch it, the Producer can 172 // acquire it and transition it into kChunkBeingWritten. 173 kChunkFree = 0, 174 175 // The Chunk is being used by the Producer and is not complete yet. 176 // The Service shall never touch kChunkBeingWritten pages. 177 kChunkBeingWritten = 1, 178 179 // The Service is moving the page into its non-shared ring buffer. The 180 // Producer shall never touch kChunkBeingRead pages. 181 kChunkBeingRead = 2, 182 183 // The Producer is done writing the page and won't touch it again. The 184 // Service can now move it to its non-shared ring buffer. 185 // kAllChunksComplete relies on this being == 3. 186 kChunkComplete = 3, 187 }; 188 static constexpr const char* kChunkStateStr[] = {"Free", "BeingWritten", 189 "BeingRead", "Complete"}; 190 191 enum PageLayout : uint32_t { 192 // The page is fully free and has not been partitioned yet. 193 kPageNotPartitioned = 0, 194 195 // TODO(primiano): Aligning a chunk @ 16 bytes could allow to use faster 196 // intrinsics based on quad-word moves. Do the path and check what is the 197 // fragmentation loss. 198 199 // align4(X) := the largest integer N s.t. (N % 4) == 0 && N <= X. 200 // 8 == sizeof(PageHeader). 201 kPageDiv1 = 1, // Only one chunk of size: PAGE_SIZE - 8. 202 kPageDiv2 = 2, // Two chunks of size: align4((PAGE_SIZE - 8) / 2). 203 kPageDiv4 = 3, // Four chunks of size: align4((PAGE_SIZE - 8) / 4). 204 kPageDiv7 = 4, // Seven chunks of size: align4((PAGE_SIZE - 8) / 7). 205 kPageDiv14 = 5, // Fourteen chunks of size: align4((PAGE_SIZE - 8) / 14). 206 207 // The rationale for 7 and 14 above is to maximize the page usage for the 208 // likely case of |page_size| == 4096: 209 // (((4096 - 8) / 14) % 4) == 0, while (((4096 - 8) / 16 % 4)) == 3. So 210 // Div16 would waste 3 * 16 = 48 bytes per page for chunk alignment gaps. 211 212 kPageDivReserved1 = 6, 213 kPageDivReserved2 = 7, 214 kNumPageLayouts = 8, 215 }; 216 217 // Keep this consistent with the PageLayout enum above. 218 static constexpr uint32_t kNumChunksForLayout[] = {0, 1, 2, 4, 7, 14, 0, 0}; 219 220 // Layout of a Page. 221 // +===================================================+ 222 // | Page header [8 bytes] | 223 // | Tells how many chunks there are, how big they are | 224 // | and their state (free, read, write, complete). | 225 // +===================================================+ 226 // +***************************************************+ 227 // | Chunk #0 header [8 bytes] | 228 // | Tells how many packets there are and whether the | 229 // | whether the 1st and last ones are fragmented. | 230 // | Also has a chunk id to reassemble fragments. | 231 // +***************************************************+ 232 // +---------------------------------------------------+ 233 // | Packet #0 size [varint, up to 4 bytes] | 234 // + - - - - - - - - - - - - - - - - - - - - - - - - - + 235 // | Packet #0 payload | 236 // | A TracePacket protobuf message | 237 // +---------------------------------------------------+ 238 // ... 239 // + . . . . . . . . . . . . . . . . . . . . . . . . . + 240 // | Optional padding to maintain aligment | 241 // + . . . . . . . . . . . . . . . . . . . . . . . . . + 242 // +---------------------------------------------------+ 243 // | Packet #N size [varint, up to 4 bytes] | 244 // + - - - - - - - - - - - - - - - - - - - - - - - - - + 245 // | Packet #N payload | 246 // | A TracePacket protobuf message | 247 // +---------------------------------------------------+ 248 // ... 249 // +***************************************************+ 250 // | Chunk #M header [8 bytes] | 251 // ... 252 253 // Alignment applies to start offset only. The Chunk size is *not* aligned. 254 static constexpr uint32_t kChunkAlignment = 4; 255 static constexpr uint32_t kChunkShift = 2; 256 static constexpr uint32_t kChunkMask = 0x3; 257 static constexpr uint32_t kLayoutMask = 0x70000000; 258 static constexpr uint32_t kLayoutShift = 28; 259 static constexpr uint32_t kAllChunksMask = 0x0FFFFFFF; 260 261 // This assumes that kChunkComplete == 3. 262 static constexpr uint32_t kAllChunksComplete = 0x0FFFFFFF; 263 static constexpr uint32_t kAllChunksFree = 0; 264 static constexpr size_t kInvalidPageIdx = static_cast<size_t>(-1); 265 266 // There is one page header per page, at the beginning of the page. 267 struct PageHeader { 268 // |layout| bits: 269 // [31] [30:28] [27:26] ... [1:0] 270 // | | | | | 271 // | | | | +---------- ChunkState[0] 272 // | | | +--------------- ChunkState[12..1] 273 // | | +--------------------- ChunkState[13] 274 // | +----------------------------- PageLayout (0 == page fully free) 275 // +------------------------------------ Reserved for future use 276 std::atomic<uint32_t> layout; 277 278 // If we'll ever going to use this in the future it might come handy 279 // reviving the kPageBeingPartitioned logic (look in git log, it was there 280 // at some point in the past). 281 uint32_t reserved; 282 }; 283 284 // There is one Chunk header per chunk (hence PageLayout per page) at the 285 // beginning of each chunk. 286 struct ChunkHeader { 287 enum Flags : uint8_t { 288 // If set, the first TracePacket in the chunk is partial and continues 289 // from |chunk_id| - 1 (within the same |writer_id|). 290 kFirstPacketContinuesFromPrevChunk = 1 << 0, 291 292 // If set, the last TracePacket in the chunk is partial and continues on 293 // |chunk_id| + 1 (within the same |writer_id|). 294 kLastPacketContinuesOnNextChunk = 1 << 1, 295 296 // The data in the chunk has holes (even if the chunk is marked as 297 // kChunkComplete) that need to be patched out-of-band before the chunk 298 // can be read. 299 kChunkNeedsPatching = 1 << 2, 300 }; 301 302 struct Packets { 303 // Number of valid TracePacket protobuf messages contained in the chunk. 304 // Each TracePacket is prefixed by its own size. This field is 305 // monotonically updated by the Producer with release store semantic after 306 // the packet has been written into the chunk. 307 uint16_t count : 10; 308 309 // See Flags above. 310 uint16_t flags : 6; 311 }; 312 313 // A monotonic counter of the chunk within the scoped of a |writer_id|. 314 // The tuple (ProducerID, WriterID, ChunkID) allows to figure out if two 315 // chunks are contiguous (and hence a trace packets spanning across them can 316 // be glued) or we had some holes due to the ring buffer wrapping. 317 // This is set only when transitioning from kChunkFree to kChunkBeingWritten 318 // and remains unchanged throughout the remaining lifetime of the chunk. 319 std::atomic<uint32_t> chunk_id; 320 321 // ID of the writer, unique within the producer. 322 // Like |chunk_id|, this is set only when transitioning from kChunkFree to 323 // kChunkBeingWritten. 324 std::atomic<uint16_t> writer_id; 325 326 // There is no ProducerID here. The service figures that out from the IPC 327 // channel, which is unspoofable. 328 329 // Updated with release-store semantics. 330 std::atomic<Packets> packets; 331 }; 332 333 class Chunk { 334 public: 335 Chunk(); // Constructs an invalid chunk. 336 337 // Chunk is move-only, to document the scope of the Acquire/Release 338 // TryLock operations below. 339 Chunk(const Chunk&) = delete; 340 Chunk operator=(const Chunk&) = delete; 341 Chunk(Chunk&&) noexcept; 342 Chunk& operator=(Chunk&&); 343 begin()344 uint8_t* begin() const { return begin_; } end()345 uint8_t* end() const { return begin_ + size_; } 346 347 // Size, including Chunk header. size()348 size_t size() const { return size_; } 349 350 // Begin of the first packet (or packet fragment). payload_begin()351 uint8_t* payload_begin() const { return begin_ + sizeof(ChunkHeader); } payload_size()352 size_t payload_size() const { 353 PERFETTO_DCHECK(size_ >= sizeof(ChunkHeader)); 354 return size_ - sizeof(ChunkHeader); 355 } 356 is_valid()357 bool is_valid() const { return begin_ && size_; } 358 359 // Index of the chunk within the page [0..13] (13 comes from kPageDiv14). chunk_idx()360 uint8_t chunk_idx() const { return chunk_idx_; } 361 header()362 ChunkHeader* header() { return reinterpret_cast<ChunkHeader*>(begin_); } 363 writer_id()364 uint16_t writer_id() { 365 return header()->writer_id.load(std::memory_order_relaxed); 366 } 367 368 // Returns the count of packets and the flags with acquire-load semantics. GetPacketCountAndFlags()369 std::pair<uint16_t, uint8_t> GetPacketCountAndFlags() { 370 auto packets = header()->packets.load(std::memory_order_acquire); 371 const uint16_t packets_count = packets.count; 372 const uint8_t packets_flags = packets.flags; 373 return std::make_pair(packets_count, packets_flags); 374 } 375 376 // Increases |packets.count| with release semantics (note, however, that the 377 // packet count is incremented *before* starting writing a packet). 378 // The increment is atomic but NOT race-free (i.e. no CAS). Only the 379 // Producer is supposed to perform this increment, and it's supposed to do 380 // that in a thread-safe way (holding a lock). A Chunk cannot be shared by 381 // multiple Producer threads without locking. The packet count is cleared by 382 // TryAcquireChunk(), when passing the new header for the chunk. IncrementPacketCount()383 void IncrementPacketCount() { 384 ChunkHeader* chunk_header = header(); 385 auto packets = chunk_header->packets.load(std::memory_order_relaxed); 386 packets.count++; 387 chunk_header->packets.store(packets, std::memory_order_release); 388 } 389 390 // Flags are cleared by TryAcquireChunk(), by passing the new header for 391 // the chunk. SetFlag(ChunkHeader::Flags flag)392 void SetFlag(ChunkHeader::Flags flag) { 393 ChunkHeader* chunk_header = header(); 394 auto packets = chunk_header->packets.load(std::memory_order_relaxed); 395 packets.flags |= flag; 396 chunk_header->packets.store(packets, std::memory_order_release); 397 } 398 399 private: 400 friend class SharedMemoryABI; 401 Chunk(uint8_t* begin, uint16_t size, uint8_t chunk_idx); 402 403 // Don't add extra fields, keep the move operator fast. 404 uint8_t* begin_ = nullptr; 405 uint16_t size_ = 0; 406 uint8_t chunk_idx_ = 0; 407 }; 408 409 // Construct an instance from an existing shared memory buffer. 410 SharedMemoryABI(uint8_t* start, size_t size, size_t page_size); 411 SharedMemoryABI(); 412 413 void Initialize(uint8_t* start, size_t size, size_t page_size); 414 start()415 uint8_t* start() const { return start_; } end()416 uint8_t* end() const { return start_ + size_; } size()417 size_t size() const { return size_; } page_size()418 size_t page_size() const { return page_size_; } num_pages()419 size_t num_pages() const { return num_pages_; } is_valid()420 bool is_valid() { return num_pages() > 0; } 421 page_start(size_t page_idx)422 uint8_t* page_start(size_t page_idx) { 423 PERFETTO_DCHECK(page_idx < num_pages_); 424 return start_ + page_size_ * page_idx; 425 } 426 page_header(size_t page_idx)427 PageHeader* page_header(size_t page_idx) { 428 return reinterpret_cast<PageHeader*>(page_start(page_idx)); 429 } 430 431 // Returns true if the page is fully clear and has not been partitioned yet. 432 // The state of the page can change at any point after this returns (or even 433 // before). The Producer should use this only as a hint to decide out whether 434 // it should TryPartitionPage() or acquire an individual chunk. is_page_free(size_t page_idx)435 bool is_page_free(size_t page_idx) { 436 return page_header(page_idx)->layout.load(std::memory_order_relaxed) == 0; 437 } 438 439 // Returns true if all chunks in the page are kChunkComplete. As above, this 440 // is advisory only. The Service is supposed to use this only to decide 441 // whether to TryAcquireAllChunksForReading() or not. is_page_complete(size_t page_idx)442 bool is_page_complete(size_t page_idx) { 443 auto layout = page_header(page_idx)->layout.load(std::memory_order_relaxed); 444 const uint32_t num_chunks = GetNumChunksForLayout(layout); 445 if (num_chunks == 0) 446 return false; // Non partitioned pages cannot be complete. 447 return (layout & kAllChunksMask) == 448 (kAllChunksComplete & ((1 << (num_chunks * kChunkShift)) - 1)); 449 } 450 451 // For testing / debugging only. page_header_dbg(size_t page_idx)452 std::string page_header_dbg(size_t page_idx) { 453 uint32_t x = page_header(page_idx)->layout.load(std::memory_order_relaxed); 454 return std::bitset<32>(x).to_string(); 455 } 456 457 // For testing / debugging only. page_layout_dbg(size_t page_idx)458 uint32_t page_layout_dbg(size_t page_idx) { 459 return page_header(page_idx)->layout.load(std::memory_order_relaxed); 460 } 461 462 // Returns a bitmap in which each bit is set if the corresponding Chunk exists 463 // in the page (according to the page layout) and is free. If the page is not 464 // partitioned it returns 0 (as if the page had no free chunks). 465 uint32_t GetFreeChunks(size_t page_idx); 466 467 // Tries to atomically partition a page with the given |layout|. Returns true 468 // if the page was free and has been partitioned with the given |layout|, 469 // false if the page wasn't free anymore by the time we got there. 470 // If succeeds all the chunks are atomically set in the kChunkFree state. 471 bool TryPartitionPage(size_t page_idx, PageLayout layout); 472 473 // Tries to atomically mark a single chunk within the page as 474 // kChunkBeingWritten. Returns an invalid chunk if the page is not partitioned 475 // or the chunk is not in the kChunkFree state. If succeeds sets the chunk 476 // header to |header|. TryAcquireChunkForWriting(size_t page_idx,size_t chunk_idx,const ChunkHeader * header)477 Chunk TryAcquireChunkForWriting(size_t page_idx, 478 size_t chunk_idx, 479 const ChunkHeader* header) { 480 return TryAcquireChunk(page_idx, chunk_idx, kChunkBeingWritten, header); 481 } 482 483 // Similar to TryAcquireChunkForWriting. Fails if the chunk isn't in the 484 // kChunkComplete state. TryAcquireChunkForReading(size_t page_idx,size_t chunk_idx)485 Chunk TryAcquireChunkForReading(size_t page_idx, size_t chunk_idx) { 486 return TryAcquireChunk(page_idx, chunk_idx, kChunkBeingRead, nullptr); 487 } 488 489 // Used by the Service to take full ownership of all the chunks in the a page 490 // in one shot. It tries to atomically migrate all chunks into the 491 // kChunkBeingRead state. Can only be done if all chunks are either kChunkFree 492 // or kChunkComplete. If this fails, the service has to fall back acquiring 493 // the chunks individually. 494 bool TryAcquireAllChunksForReading(size_t page_idx); 495 void ReleaseAllChunksAsFree(size_t page_idx); 496 497 // The caller must have successfully TryAcquireAllChunksForReading(). 498 Chunk GetChunkUnchecked(size_t page_idx, 499 uint32_t page_layout, 500 size_t chunk_idx); 501 502 // Puts a chunk into the kChunkComplete state. Returns the page index. ReleaseChunkAsComplete(Chunk chunk)503 size_t ReleaseChunkAsComplete(Chunk chunk) { 504 return ReleaseChunk(std::move(chunk), kChunkComplete); 505 } 506 507 // Puts a chunk into the kChunkFree state. Returns the page index. ReleaseChunkAsFree(Chunk chunk)508 size_t ReleaseChunkAsFree(Chunk chunk) { 509 return ReleaseChunk(std::move(chunk), kChunkFree); 510 } 511 GetChunkState(size_t page_idx,size_t chunk_idx)512 ChunkState GetChunkState(size_t page_idx, size_t chunk_idx) { 513 PageHeader* phdr = page_header(page_idx); 514 uint32_t layout = phdr->layout.load(std::memory_order_relaxed); 515 return static_cast<ChunkState>((layout >> (chunk_idx * kChunkShift)) & 516 kChunkMask); 517 } 518 519 std::pair<size_t, size_t> GetPageAndChunkIndex(const Chunk& chunk); 520 GetNumChunksForLayout(uint32_t page_layout)521 static constexpr uint32_t GetNumChunksForLayout(uint32_t page_layout) { 522 return kNumChunksForLayout[(page_layout & kLayoutMask) >> kLayoutShift]; 523 } 524 525 private: 526 SharedMemoryABI(const SharedMemoryABI&) = delete; 527 SharedMemoryABI& operator=(const SharedMemoryABI&) = delete; 528 GetChunkSizeForLayout(uint32_t page_layout)529 uint16_t GetChunkSizeForLayout(uint32_t page_layout) const { 530 return chunk_sizes_[(page_layout & kLayoutMask) >> kLayoutShift]; 531 } 532 533 Chunk TryAcquireChunk(size_t page_idx, 534 size_t chunk_idx, 535 ChunkState, 536 const ChunkHeader*); 537 size_t ReleaseChunk(Chunk chunk, ChunkState); 538 539 uint8_t* start_ = nullptr; 540 size_t size_ = 0; 541 size_t page_size_ = 0; 542 size_t num_pages_ = 0; 543 std::array<uint16_t, kNumPageLayouts> chunk_sizes_; 544 }; 545 546 } // namespace perfetto 547 548 #endif // INCLUDE_PERFETTO_TRACING_CORE_SHARED_MEMORY_ABI_H_ 549