• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! libfmq Rust wrapper
2 
3 /*
4 * Copyright (C) 2024 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *      http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18 
19 use fmq_bindgen::{
20     convertDesc, convertGrantor, descFlags, descGrantors, descHandleFDs, descHandleInts,
21     descHandleNumFDs, descHandleNumInts, descNumGrantors, descQuantum, freeDesc,
22     ndk_ScopedFileDescriptor, ErasedMessageQueue, ErasedMessageQueueDesc, GrantorDescriptor,
23     MQDescriptor, MemTransaction, NativeHandle, ParcelFileDescriptor, SynchronizedReadWrite,
24 };
25 use log::error;
26 use zerocopy::{FromBytes, Immutable, IntoBytes};
27 
28 use std::ptr::addr_of_mut;
29 
30 /// A trait indicating that a type is safe to pass through shared memory.
31 ///
32 /// # Safety
33 ///
34 /// This requires that the type must not contain any capabilities such as file
35 /// descriptors or heap allocations, and that it must be permitted to access
36 /// all bytes of its representation (so it must not contain any padding bytes).
37 ///
38 /// Because being stored in shared memory the allows the type to be accessed
39 /// from different processes, it may also be accessed from different threads in
40 /// the same process. As such, `Share` is a supertrait of `Sync`.
41 pub unsafe trait Share: Sync {}
42 
43 // SAFETY: All types implementing the zerocopy `Immutable`, `IntoBytes` and `FromBytes` traits
44 // implement `Share`, because that implies that they don't have any interior mutability and can be
45 // treated as just a slice of bytes.
46 unsafe impl<T: Immutable + IntoBytes + FromBytes + Send + Sync> Share for T {}
47 
48 /// An IPC message queue for values of type T.
49 pub struct MessageQueue<T> {
50     inner: ErasedMessageQueue,
51     ty: core::marker::PhantomData<T>,
52 }
53 
54 /** A write completion from the MessageQueue::write() method.
55 
56 This completion mutably borrows the MessageQueue to prevent concurrent writes;
57 these must be forbidden because the underlying AidlMessageQueue only stores the
58 number of outstanding writes, not which have and have not completed, so they
59 must complete in order. */
60 #[must_use]
61 pub struct WriteCompletion<'a, T: Share> {
62     inner: MemTransaction,
63     queue: &'a mut MessageQueue<T>,
64     n_elems: usize,
65     n_written: usize,
66 }
67 
68 impl<T: Share> WriteCompletion<'_, T> {
69     /// Obtain a pointer to the location at which the idx'th item should be
70     /// stored.
71     ///
72     /// The returned pointer is only valid while `self` has not been dropped and
73     /// is invalidated by any call to `self.write`. The pointer should be used
74     /// with `std::ptr::write` or a DMA API to initialize the underlying storage
75     /// before calling `assume_written` to indicate how many elements were
76     /// written.
77     ///
78     /// It is only permitted to access at most `contiguous_count(idx)` items
79     /// via offsets from the returned address.
80     ///
81     /// Calling this method with a greater `idx` may return a pointer to another
82     /// memory region of different size than the first.
ptr(&self, idx: usize) -> *mut T83     pub fn ptr(&self, idx: usize) -> *mut T {
84         if idx >= self.n_elems {
85             panic!(
86                 "indexing out of bound: ReadCompletion for {} elements but idx {} accessed",
87                 self.n_elems, idx
88             )
89         }
90         ptr(&self.inner, idx)
91     }
92 
93     /// Return the number of contiguous elements that may be stored starting at
94     /// the given index in the backing buffer corresponding to the given index.
95     ///
96     /// Intended for use with the `ptr` method.
97     ///
98     /// Returns 0 if `idx` is greater than or equal to the completion's element
99     /// count.
contiguous_count(&self, idx: usize) -> usize100     pub fn contiguous_count(&self, idx: usize) -> usize {
101         contiguous_count(&self.inner, idx, self.n_elems)
102     }
103 
104     /// Returns how many elements still must be written to this WriteCompletion
105     /// before dropping it.
required_elements(&self) -> usize106     pub fn required_elements(&self) -> usize {
107         assert!(self.n_written <= self.n_elems);
108         self.n_elems - self.n_written
109     }
110 
111     /// Write one item to `self`. Fails and returns the item if `self` is full.
write(&mut self, data: T) -> Result<(), T>112     pub fn write(&mut self, data: T) -> Result<(), T> {
113         if self.required_elements() > 0 {
114             // SAFETY: `self.ptr(self.n_written)` is known to be uninitialized.
115             // The dtor of data, if any, will not run because `data` is moved
116             // out of here.
117             unsafe { self.ptr(self.n_written).write(data) };
118             self.n_written += 1;
119             Ok(())
120         } else {
121             Err(data)
122         }
123     }
124 
125     /// Promise to the `WriteCompletion` that `n_newly_written` elements have
126     /// been written with unsafe code or DMA to the pointer returned by the
127     /// `ptr` method.
128     ///
129     /// Panics if `n_newly_written` exceeds the number of elements yet required.
130     ///
131     /// # Safety
132     /// It is UB to call this method except after calling the `ptr` method and
133     /// writing the specified number of values of type T to that location.
assume_written(&mut self, n_newly_written: usize)134     pub unsafe fn assume_written(&mut self, n_newly_written: usize) {
135         assert!(n_newly_written < self.required_elements());
136         self.n_written += n_newly_written;
137     }
138 }
139 
140 impl<T: Share> Drop for WriteCompletion<'_, T> {
drop(&mut self)141     fn drop(&mut self) {
142         if self.n_written < self.n_elems {
143             error!(
144                 "WriteCompletion dropped without writing to all elements ({}/{} written)",
145                 self.n_written, self.n_elems
146             );
147         }
148         let txn = std::mem::take(&mut self.inner);
149         self.queue.commit_write(txn);
150     }
151 }
152 
153 impl<T: Share> MessageQueue<T> {
type_size() -> usize154     const fn type_size() -> usize {
155         std::mem::size_of::<T>()
156     }
157 
158     /// Create a new MessageQueue with capacity for `elems` elements.
new(elems: usize, event_word: bool) -> Self159     pub fn new(elems: usize, event_word: bool) -> Self {
160         Self {
161             // SAFETY: Calling bindgen'd constructor. The only argument that
162             // can't be validated by the implementation is the quantum, which
163             // must equal the element size.
164             inner: unsafe { ErasedMessageQueue::new1(elems, event_word, Self::type_size()) },
165             ty: core::marker::PhantomData,
166         }
167     }
168 
169     /// Create a MessageQueue connected to another existing instance from its
170     /// descriptor.
from_desc(desc: &MQDescriptor<T, SynchronizedReadWrite>, reset_pointers: bool) -> Self171     pub fn from_desc(desc: &MQDescriptor<T, SynchronizedReadWrite>, reset_pointers: bool) -> Self {
172         let mut grantors = desc
173             .grantors
174             .iter()
175             // SAFETY: this just forwards the integers to the GrantorDescriptor
176             // constructor; GrantorDescriptor is POD.
177             .map(|g| unsafe { convertGrantor(g.fdIndex, g.offset, g.extent) })
178             .collect::<Vec<_>>();
179 
180         // SAFETY: These pointer/length pairs come from Vecs that will outlive
181         // this function call, and the call itself copies all data it needs out
182         // of them.
183         let cpp_desc = unsafe {
184             convertDesc(
185                 grantors.as_mut_ptr(),
186                 grantors.len(),
187                 desc.handle.fds.as_ptr().cast(),
188                 desc.handle.fds.len(),
189                 desc.handle.ints.as_ptr(),
190                 desc.handle.ints.len(),
191                 desc.quantum,
192                 desc.flags,
193             )
194         };
195         // SAFETY: Calling bindgen'd constructor which does not store cpp_desc,
196         // but just passes it to the initializer of AidlMQDescriptorShim, which
197         // deep-copies it.
198         let inner = unsafe { ErasedMessageQueue::new(cpp_desc, reset_pointers) };
199         // SAFETY: we must free the desc returned by convertDesc; the pointer
200         // was just returned above so we know it is valid.
201         unsafe { freeDesc(cpp_desc) };
202         Self { inner, ty: core::marker::PhantomData }
203     }
204 
205     /// Obtain a copy of the MessageQueue's descriptor, which may be used to
206     /// access it remotely.
dupe_desc(&self) -> MQDescriptor<T, SynchronizedReadWrite>207     pub fn dupe_desc(&self) -> MQDescriptor<T, SynchronizedReadWrite> {
208         // SAFETY: dupeDesc may be called on any valid ErasedMessageQueue; it
209         // simply forwards to dupeDesc on the inner AidlMessageQueue and wraps
210         // in a heap allocation.
211         let erased_desc: *mut ErasedMessageQueueDesc = unsafe { self.inner.dupeDesc() };
212         let grantor_to_rust =
213             |g: &fmq_bindgen::aidl_android_hardware_common_fmq_GrantorDescriptor| {
214                 GrantorDescriptor { fdIndex: g.fdIndex, offset: g.offset, extent: g.extent }
215             };
216 
217         let scoped_to_parcel_fd = |fd: &ndk_ScopedFileDescriptor| {
218             use std::os::fd::{BorrowedFd, FromRawFd, OwnedFd};
219             // SAFETY: the fd is already open as an invariant of ndk::ScopedFileDescriptor, so
220             // it will not be -1, as required by BorrowedFd.
221             let borrowed = unsafe { BorrowedFd::borrow_raw(fd._base as i32) };
222             ParcelFileDescriptor::new(match borrowed.try_clone_to_owned() {
223                 Ok(fd) => fd,
224                 Err(e) => {
225                     error!("could not dup NativeHandle fd {}: {}", fd._base, e);
226                     // SAFETY: OwnedFd requires the fd is not -1. If we failed to dup the fd,
227                     // other code downstream will fail, but we can do no better than pass it on.
228                     unsafe { OwnedFd::from_raw_fd(fd._base as i32) }
229                 }
230             })
231         };
232 
233         // First, we create a desc with the wrong type, because we cannot create one whole cloth of
234         // our desired return type unless T implements Default. This Default requirement is
235         // superfluous (T::default() is never called), so we then transmute to our desired type.
236         let desc = MQDescriptor::<(), SynchronizedReadWrite>::default();
237         // SAFETY: This transmute changes only the element type parameter of the MQDescriptor. The
238         // layout of an MQDescriptor does not depend on T as T appears in it only in PhantomData.
239         let mut desc: MQDescriptor<T, SynchronizedReadWrite> = unsafe { std::mem::transmute(desc) };
240         // SAFETY: These slices are created out of the pointer and length pairs exposed by the
241         // individual descFoo accessors, so we know they are valid pointer/lengths and point to
242         // data that will continue to exist as long as the desc does.
243         //
244         // Calls to the descFoo accessors on erased_desc are sound because we know inner.dupeDesc
245         // returns a valid pointer to a new heap-allocated ErasedMessageQueueDesc.
246         let (grantors, fds, ints, quantum, flags) = unsafe {
247             let grantors = slice_from_raw_parts_or_empty(
248                 descGrantors(erased_desc),
249                 descNumGrantors(erased_desc),
250             );
251             let fds = slice_from_raw_parts_or_empty(
252                 descHandleFDs(erased_desc),
253                 descHandleNumFDs(erased_desc),
254             );
255             let ints = slice_from_raw_parts_or_empty(
256                 descHandleInts(erased_desc),
257                 descHandleNumInts(erased_desc),
258             );
259             let quantum = descQuantum(erased_desc);
260             let flags = descFlags(erased_desc);
261             (grantors, fds, ints, quantum, flags)
262         };
263         let fds = fds.iter().map(scoped_to_parcel_fd).collect();
264         let ints = ints.to_vec();
265         desc.grantors = grantors.iter().map(grantor_to_rust).collect();
266         desc.handle = NativeHandle { fds, ints };
267         desc.quantum = quantum;
268         desc.flags = flags;
269         // SAFETY: we must free the desc returned by dupeDesc; the pointer was
270         // just returned above so we know it is valid.
271         unsafe { freeDesc(erased_desc) };
272         desc
273     }
274 
275     /// Begin a write transaction. The returned WriteCompletion can be used to
276     /// write into the region allocated for the transaction.
write(&mut self) -> Option<WriteCompletion<T>>277     pub fn write(&mut self) -> Option<WriteCompletion<T>> {
278         self.write_many(1)
279     }
280 
281     /// Begin a write transaction for multiple items. See `MessageQueue::write`.
write_many(&mut self, n: usize) -> Option<WriteCompletion<T>>282     pub fn write_many(&mut self, n: usize) -> Option<WriteCompletion<T>> {
283         let txn = self.begin_write(n)?;
284         Some(WriteCompletion { inner: txn, queue: self, n_elems: n, n_written: 0 })
285     }
286 
commit_write(&mut self, txn: MemTransaction) -> bool287     fn commit_write(&mut self, txn: MemTransaction) -> bool {
288         // SAFETY: simply calls commitWrite with the txn length. The txn must
289         // only use its first MemRegion.
290         unsafe { self.inner.commitWrite(txn.first.length + txn.second.length) }
291     }
292 
begin_write(&self, n: usize) -> Option<MemTransaction>293     fn begin_write(&self, n: usize) -> Option<MemTransaction> {
294         let mut txn: MemTransaction = Default::default();
295         // SAFETY: we pass a raw pointer to txn, which is used only during the
296         // call to beginWrite to write the txn's MemRegion fields, which are raw
297         // pointers and lengths pointing into the queue. The pointer to txn is
298         // not stored.
299         unsafe { self.inner.beginWrite(n, addr_of_mut!(txn)) }.then_some(txn)
300     }
301 }
302 
303 /// Forms a slice from a pointer and a length.
304 ///
305 /// Returns an empty slice when `data` is a null pointer and `len` is zero.
306 ///
307 /// # Safety
308 ///
309 /// This function has the same safety requirements as [`std::slice::from_raw_parts`],
310 /// but unlike that function, does not exhibit undefined behavior when `data` is a
311 /// null pointer and `len` is zero. In this case, it returns an empty slice.
slice_from_raw_parts_or_empty<'a, T>(data: *const T, len: usize) -> &'a [T]312 unsafe fn slice_from_raw_parts_or_empty<'a, T>(data: *const T, len: usize) -> &'a [T] {
313     if data.is_null() && len == 0 {
314         &[]
315     } else {
316         // SAFETY: The caller must guarantee to satisfy the safety requirements
317         // of the standard library function [`std::slice::from_raw_parts`].
318         unsafe { std::slice::from_raw_parts(data, len) }
319     }
320 }
321 
322 #[inline(always)]
ptr<T: Share>(txn: &MemTransaction, idx: usize) -> *mut T323 fn ptr<T: Share>(txn: &MemTransaction, idx: usize) -> *mut T {
324     let (base, region_idx) = if idx < txn.first.length {
325         (txn.first.address, idx)
326     } else {
327         (txn.second.address, idx - txn.first.length)
328     };
329     let byte_count: usize = region_idx.checked_mul(MessageQueue::<T>::type_size()).unwrap();
330     base.wrapping_byte_offset(byte_count.try_into().unwrap()) as *mut T
331 }
332 
333 #[inline(always)]
contiguous_count(txn: &MemTransaction, idx: usize, n_elems: usize) -> usize334 fn contiguous_count(txn: &MemTransaction, idx: usize, n_elems: usize) -> usize {
335     if idx > n_elems {
336         return 0;
337     }
338     let region_len = if idx < txn.first.length { txn.first.length } else { txn.second.length };
339     region_len - idx
340 }
341 
342 /** A read completion from the MessageQueue::read() method.
343 
344 This completion mutably borrows the MessageQueue to prevent concurrent reads;
345 these must be forbidden because the underlying AidlMessageQueue only stores the
346 number of outstanding reads, not which have and have not completed, so they
347 must complete in order. */
348 #[must_use]
349 pub struct ReadCompletion<'a, T: Share> {
350     inner: MemTransaction,
351     queue: &'a mut MessageQueue<T>,
352     n_elems: usize,
353     n_read: usize,
354 }
355 
356 impl<T: Share> ReadCompletion<'_, T> {
357     /// Obtain a pointer to the location at which the idx'th item is located.
358     ///
359     /// The returned pointer is only valid while `self` has not been dropped and
360     /// is invalidated by any call to `self.read`. The pointer should be used
361     /// with `std::ptr::read` or a DMA API before calling `assume_read` to
362     /// indicate how many elements were read.
363     ///
364     /// It is only permitted to access at most `contiguous_count(idx)` items
365     /// via offsets from the returned address.
366     ///
367     /// Calling this method with a greater `idx` may return a pointer to another
368     /// memory region of different size than the first.
ptr(&self, idx: usize) -> *mut T369     pub fn ptr(&self, idx: usize) -> *mut T {
370         if idx >= self.n_elems {
371             panic!(
372                 "indexing out of bound: ReadCompletion for {} elements but idx {} accessed",
373                 self.n_elems, idx
374             )
375         }
376         ptr(&self.inner, idx)
377     }
378 
379     /// Return the number of contiguous elements located starting at the given
380     /// index in the backing buffer corresponding to the given index.
381     ///
382     /// Intended for use with the `ptr` method.
383     ///
384     /// Returns 0 if `idx` is greater than or equal to the completion's element
385     /// count.
contiguous_count(&self, idx: usize) -> usize386     pub fn contiguous_count(&self, idx: usize) -> usize {
387         contiguous_count(&self.inner, idx, self.n_elems)
388     }
389 
390     /// Returns how many elements still must be read from `self` before dropping
391     /// it.
unread_elements(&self) -> usize392     pub fn unread_elements(&self) -> usize {
393         assert!(self.n_read <= self.n_elems);
394         self.n_elems - self.n_read
395     }
396 
397     /// Read one item from the `self`. Fails and returns `()` if `self` is empty.
read(&mut self) -> Option<T>398     pub fn read(&mut self) -> Option<T> {
399         if self.unread_elements() > 0 {
400             // SAFETY: `self.ptr(self.n_read)`is known to be filled with a valid
401             // instance of type `T`.
402             let data = unsafe { self.ptr(self.n_read).read() };
403             self.n_read += 1;
404             Some(data)
405         } else {
406             None
407         }
408     }
409 
410     /// Promise to the `ReadCompletion` that `n_newly_read` elements have
411     /// been read with unsafe code or DMA from the pointer returned by the
412     /// `ptr` method.
413     ///
414     /// Panics if `n_newly_read` exceeds the number of elements still unread.
415     ///
416     /// Calling this method without actually reading the elements will result
417     /// in them being leaked without destructors (if any) running.
assume_read(&mut self, n_newly_read: usize)418     pub fn assume_read(&mut self, n_newly_read: usize) {
419         assert!(n_newly_read < self.unread_elements());
420         self.n_read += n_newly_read;
421     }
422 }
423 
424 impl<T: Share> Drop for ReadCompletion<'_, T> {
drop(&mut self)425     fn drop(&mut self) {
426         if self.n_read < self.n_elems {
427             error!(
428                 "ReadCompletion dropped without reading all elements ({}/{} read)",
429                 self.n_read, self.n_elems
430             );
431         }
432         let txn = std::mem::take(&mut self.inner);
433         self.queue.commit_read(txn);
434     }
435 }
436 
437 impl<T: Share> MessageQueue<T> {
438     /// Begin a read transaction. The returned `ReadCompletion` can be used to
439     /// write into the region allocated for the transaction.
read(&mut self) -> Option<ReadCompletion<T>>440     pub fn read(&mut self) -> Option<ReadCompletion<T>> {
441         self.read_many(1)
442     }
443 
444     /// Begin a read transaction for multiple items. See `MessageQueue::read`.
read_many(&mut self, n: usize) -> Option<ReadCompletion<T>>445     pub fn read_many(&mut self, n: usize) -> Option<ReadCompletion<T>> {
446         let txn = self.begin_read(n)?;
447         Some(ReadCompletion { inner: txn, queue: self, n_elems: n, n_read: 0 })
448     }
449 
commit_read(&mut self, txn: MemTransaction) -> bool450     fn commit_read(&mut self, txn: MemTransaction) -> bool {
451         // SAFETY: simply calls commitRead with the txn length. The txn must
452         // only use its first MemRegion.
453         unsafe { self.inner.commitRead(txn.first.length + txn.second.length) }
454     }
455 
begin_read(&self, n: usize) -> Option<MemTransaction>456     fn begin_read(&self, n: usize) -> Option<MemTransaction> {
457         let mut txn: MemTransaction = Default::default();
458         // SAFETY: we pass a raw pointer to txn, which is used only during the
459         // call to beginRead to write the txn's MemRegion fields, which are raw
460         // pointers and lengths pointing into the queue. The pointer to txn is
461         // not stored.
462         unsafe { self.inner.beginRead(n, addr_of_mut!(txn)) }.then_some(txn)
463     }
464 }
465 
466 #[cfg(test)]
467 mod test {
468     use super::*;
469 
470     #[test]
slice_from_raw_parts_or_empty_with_nonempty()471     fn slice_from_raw_parts_or_empty_with_nonempty() {
472         const SLICE: &[u8] = &[1, 2, 3, 4, 5, 6];
473         // SAFETY: We are constructing a slice from the pointer and length of valid slice.
474         let from_raw_parts = unsafe {
475             let ptr = SLICE.as_ptr();
476             let len = SLICE.len();
477             slice_from_raw_parts_or_empty(ptr, len)
478         };
479         assert_eq!(SLICE, from_raw_parts);
480     }
481 
482     #[test]
slice_from_raw_parts_or_empty_with_null_pointer_zero_length()483     fn slice_from_raw_parts_or_empty_with_null_pointer_zero_length() {
484         // SAFETY: Calling `slice_from_raw_parts_or_empty` with a null pointer
485         // and a zero length is explicitly allowed by its safety requirements.
486         // In this case, `std::slice::from_raw_parts` has undefined behavior.
487         let empty_from_raw_parts = unsafe {
488             let ptr: *const u8 = std::ptr::null();
489             let len = 0;
490             slice_from_raw_parts_or_empty(ptr, len)
491         };
492         assert_eq!(&[] as &[u8], empty_from_raw_parts);
493     }
494 }
495