• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2019 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "src/profiling/memory/shared_ring_buffer.h"
18 
19 #include <atomic>
20 #include <type_traits>
21 
22 #include <errno.h>
23 #include <fcntl.h>
24 #include <inttypes.h>
25 #include <sys/mman.h>
26 #include <sys/stat.h>
27 #include <unistd.h>
28 
29 #include "perfetto/base/build_config.h"
30 #include "perfetto/ext/base/scoped_file.h"
31 #include "perfetto/ext/base/temp_file.h"
32 #include "src/profiling/memory/scoped_spinlock.h"
33 
34 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
35 #include <linux/memfd.h>
36 #include <sys/syscall.h>
37 #endif
38 
39 namespace perfetto {
40 namespace profiling {
41 
42 namespace {
43 
44 constexpr auto kMetaPageSize = base::kPageSize;
45 constexpr auto kAlignment = 8;  // 64 bits to use aligned memcpy().
46 constexpr auto kHeaderSize = kAlignment;
47 constexpr auto kGuardSize = base::kPageSize * 1024 * 16;  // 64 MB.
48 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
49 constexpr auto kFDSeals = F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_SEAL;
50 #endif
51 
52 }  // namespace
53 
54 
SharedRingBuffer(CreateFlag,size_t size)55 SharedRingBuffer::SharedRingBuffer(CreateFlag, size_t size) {
56   size_t size_with_meta = size + kMetaPageSize;
57   base::ScopedFile fd;
58 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
59   bool is_memfd = false;
60   fd.reset(static_cast<int>(syscall(__NR_memfd_create, "heapprofd_ringbuf",
61                                     MFD_CLOEXEC | MFD_ALLOW_SEALING)));
62   is_memfd = !!fd;
63 
64   if (!fd) {
65 #if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
66     // In-tree builds only allow mem_fd, so we can inspect the seals to verify
67     // the fd is appropriately sealed.
68     PERFETTO_ELOG("memfd_create() failed");
69     return;
70 #else
71     PERFETTO_DPLOG("memfd_create() failed");
72 #endif
73   }
74 #endif
75 
76   if (!fd)
77     fd = base::TempFile::CreateUnlinked().ReleaseFD();
78 
79   PERFETTO_CHECK(fd);
80   int res = ftruncate(fd.get(), static_cast<off_t>(size_with_meta));
81   PERFETTO_CHECK(res == 0);
82 #if PERFETTO_BUILDFLAG(PERFETTO_OS_ANDROID)
83   if (is_memfd) {
84     res = fcntl(*fd, F_ADD_SEALS, kFDSeals);
85     if (res != 0) {
86       PERFETTO_PLOG("Failed to seal FD.");
87       return;
88     }
89   }
90 #endif
91   Initialize(std::move(fd));
92   if (!is_valid())
93     return;
94 
95   new (meta_) MetadataPage();
96 }
97 
~SharedRingBuffer()98 SharedRingBuffer::~SharedRingBuffer() {
99   static_assert(std::is_trivially_constructible<MetadataPage>::value,
100                 "MetadataPage must be trivially constructible");
101   static_assert(std::is_trivially_destructible<MetadataPage>::value,
102                 "MetadataPage must be trivially destructible");
103 
104   if (is_valid()) {
105     size_t outer_size = kMetaPageSize + size_ * 2 + kGuardSize;
106     munmap(meta_, outer_size);
107   }
108 
109   // This is work-around for code like the following:
110   // https://android.googlesource.com/platform/libcore/+/4ecb71f94378716f88703b9f7548b5d24839262f/ojluni/src/main/native/UNIXProcess_md.c#427
111   // They fork, close all fds by iterating over /proc/self/fd using opendir.
112   // Unfortunately closedir calls free, which detects the fork, and then tries
113   // to destruct the Client that holds this SharedRingBuffer.
114   //
115   // ScopedResource crashes on failure to close, so we explicitly ignore
116   // failures here.
117   int fd = mem_fd_.release();
118   if (fd != -1)
119     close(fd);
120 }
121 
Initialize(base::ScopedFile mem_fd)122 void SharedRingBuffer::Initialize(base::ScopedFile mem_fd) {
123 #if PERFETTO_BUILDFLAG(PERFETTO_ANDROID_BUILD)
124   int seals = fcntl(*mem_fd, F_GET_SEALS);
125   if (seals == -1) {
126     PERFETTO_PLOG("Failed to get seals of FD.");
127     return;
128   }
129   if ((seals & kFDSeals) != kFDSeals) {
130     PERFETTO_ELOG("FD not properly sealed. Expected %x, got %x", kFDSeals,
131                   seals);
132     return;
133   }
134 #endif
135 
136   struct stat stat_buf = {};
137   int res = fstat(*mem_fd, &stat_buf);
138   if (res != 0 || stat_buf.st_size == 0) {
139     PERFETTO_PLOG("Could not attach to fd.");
140     return;
141   }
142   auto size_with_meta = static_cast<size_t>(stat_buf.st_size);
143   auto size = size_with_meta - kMetaPageSize;
144 
145   // |size_with_meta| must be a power of two number of pages + 1 page (for
146   // metadata).
147   if (size_with_meta < 2 * base::kPageSize || size % base::kPageSize ||
148       (size & (size - 1))) {
149     PERFETTO_ELOG("SharedRingBuffer size is invalid (%zu)", size_with_meta);
150     return;
151   }
152 
153   // First of all reserve the whole virtual region to fit the buffer twice
154   // + metadata page + red zone at the end.
155   size_t outer_size = kMetaPageSize + size * 2 + kGuardSize;
156   uint8_t* region = reinterpret_cast<uint8_t*>(
157       mmap(nullptr, outer_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0));
158   if (region == MAP_FAILED) {
159     PERFETTO_PLOG("mmap(PROT_NONE) failed");
160     return;
161   }
162 
163   // Map first the whole buffer (including the initial metadata page) @ off=0.
164   void* reg1 = mmap(region, size_with_meta, PROT_READ | PROT_WRITE,
165                     MAP_SHARED | MAP_FIXED, *mem_fd, 0);
166 
167   // Then map again the buffer, skipping the metadata page. The final result is:
168   // [ METADATA ] [ RING BUFFER SHMEM ] [ RING BUFFER SHMEM ]
169   void* reg2 = mmap(region + size_with_meta, size, PROT_READ | PROT_WRITE,
170                     MAP_SHARED | MAP_FIXED, *mem_fd,
171                     /*offset=*/kMetaPageSize);
172 
173   if (reg1 != region || reg2 != region + size_with_meta) {
174     PERFETTO_PLOG("mmap(MAP_SHARED) failed");
175     munmap(region, outer_size);
176     return;
177   }
178   set_size(size);
179   meta_ = reinterpret_cast<MetadataPage*>(region);
180   mem_ = region + kMetaPageSize;
181   mem_fd_ = std::move(mem_fd);
182 }
183 
BeginWrite(const ScopedSpinlock & spinlock,size_t size)184 SharedRingBuffer::Buffer SharedRingBuffer::BeginWrite(
185     const ScopedSpinlock& spinlock,
186     size_t size) {
187   PERFETTO_DCHECK(spinlock.locked());
188   Buffer result;
189 
190   base::Optional<PointerPositions> opt_pos = GetPointerPositions();
191   if (!opt_pos) {
192     meta_->stats.num_writes_corrupt++;
193     errno = EBADF;
194     return result;
195   }
196   auto pos = opt_pos.value();
197 
198   const uint64_t size_with_header =
199       base::AlignUp<kAlignment>(size + kHeaderSize);
200 
201   // size_with_header < size is for catching overflow of size_with_header.
202   if (PERFETTO_UNLIKELY(size_with_header < size)) {
203     errno = EINVAL;
204     return result;
205   }
206 
207   if (size_with_header > write_avail(pos)) {
208     meta_->stats.num_writes_overflow++;
209     errno = EAGAIN;
210     return result;
211   }
212 
213   uint8_t* wr_ptr = at(pos.write_pos);
214 
215   result.size = size;
216   result.data = wr_ptr + kHeaderSize;
217   result.bytes_free = write_avail(pos);
218   meta_->stats.bytes_written += size;
219   meta_->stats.num_writes_succeeded++;
220 
221   // We can make this a relaxed store, as this gets picked up by the acquire
222   // load in GetPointerPositions (and the release store below).
223   reinterpret_cast<std::atomic<uint32_t>*>(wr_ptr)->store(
224       0, std::memory_order_relaxed);
225 
226   // This needs to happen after the store above, so the reader never observes an
227   // incorrect byte count. This is matched by the acquire load in
228   // GetPointerPositions.
229   meta_->write_pos.fetch_add(size_with_header, std::memory_order_release);
230   return result;
231 }
232 
EndWrite(Buffer buf)233 void SharedRingBuffer::EndWrite(Buffer buf) {
234   if (!buf)
235     return;
236   uint8_t* wr_ptr = buf.data - kHeaderSize;
237   PERFETTO_DCHECK(reinterpret_cast<uintptr_t>(wr_ptr) % kAlignment == 0);
238 
239   // This needs to release to make sure the reader sees the payload written
240   // between the BeginWrite and EndWrite calls.
241   //
242   // This is matched by the acquire load in BeginRead where it reads the
243   // record's size.
244   reinterpret_cast<std::atomic<uint32_t>*>(wr_ptr)->store(
245       static_cast<uint32_t>(buf.size), std::memory_order_release);
246 }
247 
BeginRead()248 SharedRingBuffer::Buffer SharedRingBuffer::BeginRead() {
249   base::Optional<PointerPositions> opt_pos = GetPointerPositions();
250   if (!opt_pos) {
251     meta_->stats.num_reads_corrupt++;
252     errno = EBADF;
253     return Buffer();
254   }
255   auto pos = opt_pos.value();
256 
257   size_t avail_read = read_avail(pos);
258 
259   if (avail_read < kHeaderSize) {
260     meta_->stats.num_reads_nodata++;
261     errno = EAGAIN;
262     return Buffer();  // No data
263   }
264 
265   uint8_t* rd_ptr = at(pos.read_pos);
266   PERFETTO_DCHECK(reinterpret_cast<uintptr_t>(rd_ptr) % kAlignment == 0);
267   const size_t size = reinterpret_cast<std::atomic<uint32_t>*>(rd_ptr)->load(
268       std::memory_order_acquire);
269   if (size == 0) {
270     meta_->stats.num_reads_nodata++;
271     errno = EAGAIN;
272     return Buffer();
273   }
274   const size_t size_with_header = base::AlignUp<kAlignment>(size + kHeaderSize);
275 
276   if (size_with_header > avail_read) {
277     PERFETTO_ELOG(
278         "Corrupted header detected, size=%zu"
279         ", read_avail=%zu, rd=%" PRIu64 ", wr=%" PRIu64,
280         size, avail_read, pos.read_pos, pos.write_pos);
281     meta_->stats.num_reads_corrupt++;
282     errno = EBADF;
283     return Buffer();
284   }
285 
286   rd_ptr += kHeaderSize;
287   PERFETTO_DCHECK(reinterpret_cast<uintptr_t>(rd_ptr) % kAlignment == 0);
288   return Buffer(rd_ptr, size, write_avail(pos));
289 }
290 
EndRead(Buffer buf)291 void SharedRingBuffer::EndRead(Buffer buf) {
292   if (!buf)
293     return;
294   size_t size_with_header = base::AlignUp<kAlignment>(buf.size + kHeaderSize);
295   meta_->read_pos.fetch_add(size_with_header, std::memory_order_relaxed);
296   meta_->stats.num_reads_succeeded++;
297 }
298 
IsCorrupt(const PointerPositions & pos)299 bool SharedRingBuffer::IsCorrupt(const PointerPositions& pos) {
300   if (pos.write_pos < pos.read_pos || pos.write_pos - pos.read_pos > size_ ||
301       pos.write_pos % kAlignment || pos.read_pos % kAlignment) {
302     PERFETTO_ELOG("Ring buffer corrupted, rd=%" PRIu64 ", wr=%" PRIu64
303                   ", size=%zu",
304                   pos.read_pos, pos.write_pos, size_);
305     return true;
306   }
307   return false;
308 }
309 
SharedRingBuffer(SharedRingBuffer && other)310 SharedRingBuffer::SharedRingBuffer(SharedRingBuffer&& other) noexcept {
311   *this = std::move(other);
312 }
313 
operator =(SharedRingBuffer && other)314 SharedRingBuffer& SharedRingBuffer::operator=(
315     SharedRingBuffer&& other) noexcept {
316   mem_fd_ = std::move(other.mem_fd_);
317   std::tie(meta_, mem_, size_, size_mask_) =
318       std::tie(other.meta_, other.mem_, other.size_, other.size_mask_);
319   std::tie(other.meta_, other.mem_, other.size_, other.size_mask_) =
320       std::make_tuple(nullptr, nullptr, 0, 0);
321   return *this;
322 }
323 
324 // static
Create(size_t size)325 base::Optional<SharedRingBuffer> SharedRingBuffer::Create(size_t size) {
326   auto buf = SharedRingBuffer(CreateFlag(), size);
327   if (!buf.is_valid())
328     return base::nullopt;
329   return base::make_optional(std::move(buf));
330 }
331 
332 // static
Attach(base::ScopedFile mem_fd)333 base::Optional<SharedRingBuffer> SharedRingBuffer::Attach(
334     base::ScopedFile mem_fd) {
335   auto buf = SharedRingBuffer(AttachFlag(), std::move(mem_fd));
336   if (!buf.is_valid())
337     return base::nullopt;
338   return base::make_optional(std::move(buf));
339 }
340 
341 }  // namespace profiling
342 }  // namespace perfetto
343