• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2017 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the
6  * License. You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing,
11  * software distributed under the License is distributed on an "AS
12  * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
13  * express or implied. See the License for the specific language
14  * governing permissions and limitations under the License.
15  */
16 #include "perfetto/ext/tracing/core/shared_memory_abi.h"
17 
18 #include "perfetto/base/build_config.h"
19 #include "perfetto/base/time.h"
20 
21 #if !PERFETTO_BUILDFLAG(PERFETTO_OS_WIN)
22 #include <sys/mman.h>
23 #endif
24 
25 #include "perfetto/ext/base/utils.h"
26 #include "perfetto/ext/tracing/core/basic_types.h"
27 
28 namespace perfetto {
29 
30 namespace {
31 
32 constexpr int kRetryAttempts = 64;
33 
WaitBeforeNextAttempt(int attempt)34 inline void WaitBeforeNextAttempt(int attempt) {
35   if (attempt < kRetryAttempts / 2) {
36     std::this_thread::yield();
37   } else {
38     base::SleepMicroseconds((unsigned(attempt) / 10) * 1000);
39   }
40 }
41 
42 // Returns the largest 4-bytes aligned chunk size <= |page_size| / |divider|
43 // for each divider in PageLayout.
GetChunkSize(size_t page_size,size_t divider)44 constexpr size_t GetChunkSize(size_t page_size, size_t divider) {
45   return ((page_size - sizeof(SharedMemoryABI::PageHeader)) / divider) & ~3UL;
46 }
47 
48 // Initializer for the const |chunk_sizes_| array.
InitChunkSizes(size_t page_size)49 std::array<uint16_t, SharedMemoryABI::kNumPageLayouts> InitChunkSizes(
50     size_t page_size) {
51   static_assert(SharedMemoryABI::kNumPageLayouts ==
52                     base::ArraySize(SharedMemoryABI::kNumChunksForLayout),
53                 "kNumPageLayouts out of date");
54   std::array<uint16_t, SharedMemoryABI::kNumPageLayouts> res = {};
55   for (size_t i = 0; i < SharedMemoryABI::kNumPageLayouts; i++) {
56     size_t num_chunks = SharedMemoryABI::kNumChunksForLayout[i];
57     size_t size = num_chunks == 0 ? 0 : GetChunkSize(page_size, num_chunks);
58     PERFETTO_CHECK(size <= std::numeric_limits<uint16_t>::max());
59     res[i] = static_cast<uint16_t>(size);
60   }
61   return res;
62 }
63 
ClearChunkHeader(SharedMemoryABI::ChunkHeader * header)64 inline void ClearChunkHeader(SharedMemoryABI::ChunkHeader* header) {
65   header->writer_id.store(0u, std::memory_order_relaxed);
66   header->chunk_id.store(0u, std::memory_order_relaxed);
67   header->packets.store({}, std::memory_order_release);
68 }
69 
70 }  // namespace
71 
72 // static
73 constexpr uint32_t SharedMemoryABI::kNumChunksForLayout[];
74 constexpr const char* SharedMemoryABI::kChunkStateStr[];
75 constexpr const size_t SharedMemoryABI::kInvalidPageIdx;
76 constexpr const size_t SharedMemoryABI::kMinPageSize;
77 constexpr const size_t SharedMemoryABI::kMaxPageSize;
78 constexpr const size_t SharedMemoryABI::kPacketSizeDropPacket;
79 
80 SharedMemoryABI::SharedMemoryABI() = default;
81 
SharedMemoryABI(uint8_t * start,size_t size,size_t page_size)82 SharedMemoryABI::SharedMemoryABI(uint8_t* start,
83                                  size_t size,
84                                  size_t page_size) {
85   Initialize(start, size, page_size);
86 }
87 
Initialize(uint8_t * start,size_t size,size_t page_size)88 void SharedMemoryABI::Initialize(uint8_t* start,
89                                  size_t size,
90                                  size_t page_size) {
91   start_ = start;
92   size_ = size;
93   page_size_ = page_size;
94   num_pages_ = size / page_size;
95   chunk_sizes_ = InitChunkSizes(page_size);
96   static_assert(sizeof(PageHeader) == 8, "PageHeader size");
97   static_assert(sizeof(ChunkHeader) == 8, "ChunkHeader size");
98   static_assert(sizeof(ChunkHeader::chunk_id) == sizeof(ChunkID),
99                 "ChunkID size");
100 
101   static_assert(sizeof(ChunkHeader::Packets) == 2, "ChunkHeader::Packets size");
102   static_assert(alignof(ChunkHeader) == kChunkAlignment,
103                 "ChunkHeader alignment");
104 
105   // In theory std::atomic does not guarantee that the underlying type
106   // consists only of the actual atomic word. Theoretically it could have
107   // locks or other state. In practice most implementations just implement
108   // them without extra state. The code below overlays the atomic into the
109   // SMB, hence relies on this implementation detail. This should be fine
110   // pragmatically (Chrome's base makes the same assumption), but let's have a
111   // check for this.
112   static_assert(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t) &&
113                     sizeof(std::atomic<uint16_t>) == sizeof(uint16_t),
114                 "Incompatible STL <atomic> implementation");
115 
116   // Chec that the kAllChunks(Complete,Free) are consistent with the
117   // ChunkState enum values.
118 
119   // These must be zero because rely on zero-initialized memory being
120   // interpreted as "free".
121   static_assert(kChunkFree == 0 && kAllChunksFree == 0,
122                 "kChunkFree/kAllChunksFree and must be 0");
123 
124   static_assert((kAllChunksComplete & kChunkMask) == kChunkComplete,
125                 "kAllChunksComplete out of sync with kChunkComplete");
126 
127   // Check the consistency of the kMax... constants.
128   static_assert(sizeof(ChunkHeader::writer_id) == sizeof(WriterID),
129                 "WriterID size");
130   ChunkHeader chunk_header{};
131   chunk_header.chunk_id.store(static_cast<uint32_t>(-1));
132   PERFETTO_CHECK(chunk_header.chunk_id.load() == kMaxChunkID);
133 
134   chunk_header.writer_id.store(static_cast<uint16_t>(-1));
135   PERFETTO_CHECK(kMaxWriterID <= chunk_header.writer_id.load());
136 
137   PERFETTO_CHECK(page_size >= kMinPageSize);
138   PERFETTO_CHECK(page_size <= kMaxPageSize);
139   PERFETTO_CHECK(page_size % kMinPageSize == 0);
140   PERFETTO_CHECK(reinterpret_cast<uintptr_t>(start) % kMinPageSize == 0);
141   PERFETTO_CHECK(size % page_size == 0);
142 }
143 
GetChunkUnchecked(size_t page_idx,uint32_t page_layout,size_t chunk_idx)144 SharedMemoryABI::Chunk SharedMemoryABI::GetChunkUnchecked(size_t page_idx,
145                                                           uint32_t page_layout,
146                                                           size_t chunk_idx) {
147   const size_t num_chunks = GetNumChunksForLayout(page_layout);
148   PERFETTO_DCHECK(chunk_idx < num_chunks);
149   // Compute the chunk virtual address and write it into |chunk|.
150   const uint16_t chunk_size = GetChunkSizeForLayout(page_layout);
151   size_t chunk_offset_in_page = sizeof(PageHeader) + chunk_idx * chunk_size;
152 
153   Chunk chunk(page_start(page_idx) + chunk_offset_in_page, chunk_size,
154               static_cast<uint8_t>(chunk_idx));
155   PERFETTO_DCHECK(chunk.end() <= end());
156   return chunk;
157 }
158 
TryAcquireChunk(size_t page_idx,size_t chunk_idx,ChunkState desired_chunk_state,const ChunkHeader * header)159 SharedMemoryABI::Chunk SharedMemoryABI::TryAcquireChunk(
160     size_t page_idx,
161     size_t chunk_idx,
162     ChunkState desired_chunk_state,
163     const ChunkHeader* header) {
164   PERFETTO_DCHECK(desired_chunk_state == kChunkBeingRead ||
165                   desired_chunk_state == kChunkBeingWritten);
166   PageHeader* phdr = page_header(page_idx);
167   for (int attempt = 0; attempt < kRetryAttempts; attempt++) {
168     uint32_t layout = phdr->layout.load(std::memory_order_acquire);
169     const size_t num_chunks = GetNumChunksForLayout(layout);
170 
171     // The page layout has changed (or the page is free).
172     if (chunk_idx >= num_chunks)
173       return Chunk();
174 
175     // Verify that the chunk is still in a state that allows the transition to
176     // |desired_chunk_state|. The only allowed transitions are:
177     // 1. kChunkFree -> kChunkBeingWritten (Producer).
178     // 2. kChunkComplete -> kChunkBeingRead (Service).
179     ChunkState expected_chunk_state =
180         desired_chunk_state == kChunkBeingWritten ? kChunkFree : kChunkComplete;
181     auto cur_chunk_state = (layout >> (chunk_idx * kChunkShift)) & kChunkMask;
182     if (cur_chunk_state != expected_chunk_state)
183       return Chunk();
184 
185     uint32_t next_layout = layout;
186     next_layout &= ~(kChunkMask << (chunk_idx * kChunkShift));
187     next_layout |= (desired_chunk_state << (chunk_idx * kChunkShift));
188     if (phdr->layout.compare_exchange_strong(layout, next_layout,
189                                              std::memory_order_acq_rel)) {
190       // Compute the chunk virtual address and write it into |chunk|.
191       Chunk chunk = GetChunkUnchecked(page_idx, layout, chunk_idx);
192       if (desired_chunk_state == kChunkBeingWritten) {
193         PERFETTO_DCHECK(header);
194         ChunkHeader* new_header = chunk.header();
195         new_header->writer_id.store(header->writer_id,
196                                     std::memory_order_relaxed);
197         new_header->chunk_id.store(header->chunk_id, std::memory_order_relaxed);
198         new_header->packets.store(header->packets, std::memory_order_release);
199       }
200       return chunk;
201     }
202     WaitBeforeNextAttempt(attempt);
203   }
204   return Chunk();  // All our attempts failed.
205 }
206 
TryPartitionPage(size_t page_idx,PageLayout layout)207 bool SharedMemoryABI::TryPartitionPage(size_t page_idx, PageLayout layout) {
208   PERFETTO_DCHECK(layout >= kPageDiv1 && layout <= kPageDiv14);
209   uint32_t expected_layout = 0;  // Free page.
210   uint32_t next_layout = (layout << kLayoutShift) & kLayoutMask;
211   PageHeader* phdr = page_header(page_idx);
212   if (!phdr->layout.compare_exchange_strong(expected_layout, next_layout,
213                                             std::memory_order_acq_rel)) {
214     return false;
215   }
216   return true;
217 }
218 
GetFreeChunks(size_t page_idx)219 uint32_t SharedMemoryABI::GetFreeChunks(size_t page_idx) {
220   uint32_t layout =
221       page_header(page_idx)->layout.load(std::memory_order_relaxed);
222   const uint32_t num_chunks = GetNumChunksForLayout(layout);
223   uint32_t res = 0;
224   for (uint32_t i = 0; i < num_chunks; i++) {
225     res |= ((layout & kChunkMask) == kChunkFree) ? (1 << i) : 0;
226     layout >>= kChunkShift;
227   }
228   return res;
229 }
230 
ReleaseChunk(Chunk chunk,ChunkState desired_chunk_state)231 size_t SharedMemoryABI::ReleaseChunk(Chunk chunk,
232                                      ChunkState desired_chunk_state) {
233   PERFETTO_DCHECK(desired_chunk_state == kChunkComplete ||
234                   desired_chunk_state == kChunkFree);
235 
236   size_t page_idx;
237   size_t chunk_idx;
238   std::tie(page_idx, chunk_idx) = GetPageAndChunkIndex(chunk);
239 
240   // Reset header fields, so that the service can identify when the chunk's
241   // header has been initialized by the producer.
242   if (desired_chunk_state == kChunkFree)
243     ClearChunkHeader(chunk.header());
244 
245   for (int attempt = 0; attempt < kRetryAttempts; attempt++) {
246     PageHeader* phdr = page_header(page_idx);
247     uint32_t layout = phdr->layout.load(std::memory_order_relaxed);
248     const size_t page_chunk_size = GetChunkSizeForLayout(layout);
249 
250     // TODO(primiano): this should not be a CHECK, because a malicious producer
251     // could crash us by putting the chunk in an invalid state. This should
252     // gracefully fail. Keep a CHECK until then.
253     PERFETTO_CHECK(chunk.size() == page_chunk_size);
254     const uint32_t chunk_state =
255         ((layout >> (chunk_idx * kChunkShift)) & kChunkMask);
256 
257     // Verify that the chunk is still in a state that allows the transition to
258     // |desired_chunk_state|. The only allowed transitions are:
259     // 1. kChunkBeingWritten -> kChunkComplete (Producer).
260     // 2. kChunkBeingRead -> kChunkFree (Service).
261     ChunkState expected_chunk_state;
262     if (desired_chunk_state == kChunkComplete) {
263       expected_chunk_state = kChunkBeingWritten;
264     } else {
265       expected_chunk_state = kChunkBeingRead;
266     }
267 
268     // TODO(primiano): should not be a CHECK (same rationale of comment above).
269     PERFETTO_CHECK(chunk_state == expected_chunk_state);
270     uint32_t next_layout = layout;
271     next_layout &= ~(kChunkMask << (chunk_idx * kChunkShift));
272     next_layout |= (desired_chunk_state << (chunk_idx * kChunkShift));
273 
274     // If we are freeing a chunk and all the other chunks in the page are free
275     // we should de-partition the page and mark it as clear.
276     if ((next_layout & kAllChunksMask) == kAllChunksFree)
277       next_layout = 0;
278 
279     if (phdr->layout.compare_exchange_strong(layout, next_layout,
280                                              std::memory_order_acq_rel)) {
281       return page_idx;
282     }
283     WaitBeforeNextAttempt(attempt);
284   }
285   // Too much contention on this page. Give up. This page will be left pending
286   // forever but there isn't much more we can do at this point.
287   PERFETTO_DFATAL("Too much contention on page.");
288   return kInvalidPageIdx;
289 }
290 
291 SharedMemoryABI::Chunk::Chunk() = default;
292 
Chunk(uint8_t * begin,uint16_t size,uint8_t chunk_idx)293 SharedMemoryABI::Chunk::Chunk(uint8_t* begin, uint16_t size, uint8_t chunk_idx)
294     : begin_(begin), size_(size), chunk_idx_(chunk_idx) {
295   PERFETTO_CHECK(reinterpret_cast<uintptr_t>(begin) % kChunkAlignment == 0);
296   PERFETTO_CHECK(size > 0);
297 }
298 
Chunk(Chunk && o)299 SharedMemoryABI::Chunk::Chunk(Chunk&& o) noexcept {
300   *this = std::move(o);
301 }
302 
operator =(Chunk && o)303 SharedMemoryABI::Chunk& SharedMemoryABI::Chunk::operator=(Chunk&& o) {
304   begin_ = o.begin_;
305   size_ = o.size_;
306   chunk_idx_ = o.chunk_idx_;
307   o.begin_ = nullptr;
308   o.size_ = 0;
309   o.chunk_idx_ = 0;
310   return *this;
311 }
312 
GetPageAndChunkIndex(const Chunk & chunk)313 std::pair<size_t, size_t> SharedMemoryABI::GetPageAndChunkIndex(
314     const Chunk& chunk) {
315   PERFETTO_DCHECK(chunk.is_valid());
316   PERFETTO_DCHECK(chunk.begin() >= start_);
317   PERFETTO_DCHECK(chunk.end() <= start_ + size_);
318 
319   // TODO(primiano): The divisions below could be avoided if we cached
320   // |page_shift_|.
321   const uintptr_t rel_addr = static_cast<uintptr_t>(chunk.begin() - start_);
322   const size_t page_idx = rel_addr / page_size_;
323   const size_t offset = rel_addr % page_size_;
324   PERFETTO_DCHECK(offset >= sizeof(PageHeader));
325   PERFETTO_DCHECK(offset % kChunkAlignment == 0);
326   PERFETTO_DCHECK((offset - sizeof(PageHeader)) % chunk.size() == 0);
327   const size_t chunk_idx = (offset - sizeof(PageHeader)) / chunk.size();
328   PERFETTO_DCHECK(chunk_idx < kMaxChunksPerPage);
329   PERFETTO_DCHECK(chunk_idx < GetNumChunksForLayout(GetPageLayout(page_idx)));
330   return std::make_pair(page_idx, chunk_idx);
331 }
332 
333 }  // namespace perfetto
334