• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::cmp::min;
6 use std::fs::File;
7 use std::intrinsics::copy_nonoverlapping;
8 use std::mem::size_of;
9 use std::ptr::read_unaligned;
10 use std::ptr::read_volatile;
11 use std::ptr::write_unaligned;
12 use std::ptr::write_volatile;
13 use std::sync::atomic::fence;
14 use std::sync::atomic::Ordering;
15 
16 use data_model::volatile_memory::*;
17 use libc::c_int;
18 use serde::Deserialize;
19 use serde::Serialize;
20 use zerocopy::AsBytes;
21 use zerocopy::FromBytes;
22 
23 use crate::descriptor::AsRawDescriptor;
24 use crate::descriptor::SafeDescriptor;
25 use crate::platform::MemoryMapping as PlatformMmap;
26 use crate::platform::MmapError as Error;
27 use crate::platform::PROT_READ;
28 use crate::platform::PROT_WRITE;
29 use crate::SharedMemory;
30 
31 pub type Result<T> = std::result::Result<T, Error>;
32 
33 /// Memory access type for anonymous shared memory mapping.
34 #[derive(Copy, Clone, Eq, PartialEq, Serialize, Deserialize, Debug)]
35 pub struct Protection(c_int);
36 impl Protection {
37     /// Returns Protection allowing read/write access.
38     #[inline(always)]
read_write() -> Protection39     pub fn read_write() -> Protection {
40         Protection(PROT_READ | PROT_WRITE)
41     }
42 
43     /// Returns Protection allowing read access.
44     #[inline(always)]
read() -> Protection45     pub fn read() -> Protection {
46         Protection(PROT_READ)
47     }
48 
49     /// Returns Protection allowing write access.
50     #[inline(always)]
write() -> Protection51     pub fn write() -> Protection {
52         Protection(PROT_WRITE)
53     }
54 
55     /// Set read events.
56     #[inline(always)]
set_read(self) -> Protection57     pub fn set_read(self) -> Protection {
58         Protection(self.0 | PROT_READ)
59     }
60 
61     /// Set write events.
62     #[inline(always)]
set_write(self) -> Protection63     pub fn set_write(self) -> Protection {
64         Protection(self.0 | PROT_WRITE)
65     }
66 
67     /// Returns true if all access allowed by |other| is also allowed by |self|.
68     #[inline(always)]
allows(&self, other: &Protection) -> bool69     pub fn allows(&self, other: &Protection) -> bool {
70         (self.0 & PROT_READ) >= (other.0 & PROT_READ)
71             && (self.0 & PROT_WRITE) >= (other.0 & PROT_WRITE)
72     }
73 }
74 
75 impl From<c_int> for Protection {
from(f: c_int) -> Self76     fn from(f: c_int) -> Self {
77         Protection(f)
78     }
79 }
80 
81 impl From<Protection> for c_int {
from(p: Protection) -> c_int82     fn from(p: Protection) -> c_int {
83         p.0
84     }
85 }
86 
87 /// See [MemoryMapping](crate::platform::MemoryMapping) for struct- and method-level
88 /// documentation.
89 #[derive(Debug)]
90 pub struct MemoryMapping {
91     pub(crate) mapping: PlatformMmap,
92 
93     // File backed mappings on Windows need to keep the underlying file open while the mapping is
94     // open.
95     // This will be a None in non-windows case. The variable will not be read so the '^_'.
96     //
97     // TODO(b:230902713) There was a concern about relying on the kernel's refcounting to keep the
98     // file object's locks (e.g. exclusive read/write) in place. We need to revisit/validate that
99     // concern.
100     pub(crate) _file_descriptor: Option<SafeDescriptor>,
101 }
102 
103 impl MemoryMapping {
write_slice(&self, buf: &[u8], offset: usize) -> Result<usize>104     pub fn write_slice(&self, buf: &[u8], offset: usize) -> Result<usize> {
105         match self.mapping.size().checked_sub(offset) {
106             Some(size_past_offset) => {
107                 let bytes_copied = min(size_past_offset, buf.len());
108                 // The bytes_copied equation above ensures we don't copy bytes out of range of
109                 // either buf or this slice. We also know that the buffers do not overlap because
110                 // slices can never occupy the same memory as a volatile slice.
111                 unsafe {
112                     copy_nonoverlapping(buf.as_ptr(), self.as_ptr().add(offset), bytes_copied);
113                 }
114                 Ok(bytes_copied)
115             }
116             None => Err(Error::InvalidAddress),
117         }
118     }
119 
read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize>120     pub fn read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize> {
121         match self.size().checked_sub(offset) {
122             Some(size_past_offset) => {
123                 let bytes_copied = min(size_past_offset, buf.len());
124                 // The bytes_copied equation above ensures we don't copy bytes out of range of
125                 // either buf or this slice. We also know that the buffers do not overlap because
126                 // slices can never occupy the same memory as a volatile slice.
127                 unsafe {
128                     copy_nonoverlapping(self.as_ptr().add(offset), buf.as_mut_ptr(), bytes_copied);
129                 }
130                 Ok(bytes_copied)
131             }
132             None => Err(Error::InvalidAddress),
133         }
134     }
135 
136     /// Writes an object to the memory region at the specified offset.
137     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
138     ///
139     /// This method is for writing to regular memory. If writing to a mapped
140     /// I/O region, use [`MemoryMapping::write_obj_volatile`].
141     ///
142     /// # Examples
143     /// * Write a u64 at offset 16.
144     ///
145     /// ```
146     /// #   use base::MemoryMappingBuilder;
147     /// #   use base::SharedMemory;
148     /// #   let shm = SharedMemory::new("test", 1024).unwrap();
149     /// #   let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
150     ///     let res = mem_map.write_obj(55u64, 16);
151     ///     assert!(res.is_ok());
152     /// ```
write_obj<T: AsBytes>(&self, val: T, offset: usize) -> Result<()>153     pub fn write_obj<T: AsBytes>(&self, val: T, offset: usize) -> Result<()> {
154         self.mapping.range_end(offset, size_of::<T>())?;
155         // This is safe because we checked the bounds above.
156         unsafe {
157             write_unaligned(self.as_ptr().add(offset) as *mut T, val);
158         }
159         Ok(())
160     }
161 
162     /// Reads on object from the memory region at the given offset.
163     /// Reading from a volatile area isn't strictly safe as it could change
164     /// mid-read.  However, as long as the type T is plain old data and can
165     /// handle random initialization, everything will be OK.
166     ///
167     /// This method is for reading from regular memory. If reading from a
168     /// mapped I/O region, use [`MemoryMapping::read_obj_volatile`].
169     ///
170     /// # Examples
171     /// * Read a u64 written to offset 32.
172     ///
173     /// ```
174     /// #   use base::MemoryMappingBuilder;
175     /// #   let mut mem_map = MemoryMappingBuilder::new(1024).build().unwrap();
176     ///     let res = mem_map.write_obj(55u64, 32);
177     ///     assert!(res.is_ok());
178     ///     let num: u64 = mem_map.read_obj(32).unwrap();
179     ///     assert_eq!(55, num);
180     /// ```
read_obj<T: FromBytes>(&self, offset: usize) -> Result<T>181     pub fn read_obj<T: FromBytes>(&self, offset: usize) -> Result<T> {
182         self.mapping.range_end(offset, size_of::<T>())?;
183         // This is safe because by definition Copy types can have their bits set arbitrarily and
184         // still be valid.
185         unsafe {
186             Ok(read_unaligned(
187                 self.as_ptr().add(offset) as *const u8 as *const T
188             ))
189         }
190     }
191 
192     /// Writes an object to the memory region at the specified offset.
193     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
194     ///
195     /// The write operation will be volatile, i.e. it will not be reordered by
196     /// the compiler and is suitable for I/O, but must be aligned. When writing
197     /// to regular memory, prefer [`MemoryMapping::write_obj`].
198     ///
199     /// # Examples
200     /// * Write a u32 at offset 16.
201     ///
202     /// ```
203     /// #   use base::MemoryMappingBuilder;
204     /// #   use base::SharedMemory;
205     /// #   let shm = SharedMemory::new("test", 1024).unwrap();
206     /// #   let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
207     ///     let res = mem_map.write_obj_volatile(0xf00u32, 16);
208     ///     assert!(res.is_ok());
209     /// ```
write_obj_volatile<T: AsBytes>(&self, val: T, offset: usize) -> Result<()>210     pub fn write_obj_volatile<T: AsBytes>(&self, val: T, offset: usize) -> Result<()> {
211         self.mapping.range_end(offset, size_of::<T>())?;
212         // Make sure writes to memory have been committed before performing I/O that could
213         // potentially depend on them.
214         fence(Ordering::SeqCst);
215         // This is safe because we checked the bounds above.
216         unsafe {
217             write_volatile(self.as_ptr().add(offset) as *mut T, val);
218         }
219         Ok(())
220     }
221 
222     /// Reads on object from the memory region at the given offset.
223     /// Reading from a volatile area isn't strictly safe as it could change
224     /// mid-read.  However, as long as the type T is plain old data and can
225     /// handle random initialization, everything will be OK.
226     ///
227     /// The read operation will be volatile, i.e. it will not be reordered by
228     /// the compiler and is suitable for I/O, but must be aligned. When reading
229     /// from regular memory, prefer [`MemoryMapping::read_obj`].
230     ///
231     /// # Examples
232     /// * Read a u32 written to offset 16.
233     ///
234     /// ```
235     /// #   use base::MemoryMappingBuilder;
236     /// #   use base::SharedMemory;
237     /// #   let shm = SharedMemory::new("test", 1024).unwrap();
238     /// #   let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
239     ///     let res = mem_map.write_obj(0xf00u32, 16);
240     ///     assert!(res.is_ok());
241     ///     let num: u32 = mem_map.read_obj_volatile(16).unwrap();
242     ///     assert_eq!(0xf00, num);
243     /// ```
read_obj_volatile<T: FromBytes>(&self, offset: usize) -> Result<T>244     pub fn read_obj_volatile<T: FromBytes>(&self, offset: usize) -> Result<T> {
245         self.mapping.range_end(offset, size_of::<T>())?;
246         // This is safe because by definition Copy types can have their bits set arbitrarily and
247         // still be valid.
248         unsafe {
249             Ok(read_volatile(
250                 self.as_ptr().add(offset) as *const u8 as *const T
251             ))
252         }
253     }
254 
msync(&self) -> Result<()>255     pub fn msync(&self) -> Result<()> {
256         self.mapping.msync()
257     }
258 }
259 
260 pub struct MemoryMappingBuilder<'a> {
261     pub(crate) descriptor: Option<&'a dyn AsRawDescriptor>,
262     pub(crate) is_file_descriptor: bool,
263     pub(crate) size: usize,
264     pub(crate) offset: Option<u64>,
265     pub(crate) protection: Option<Protection>,
266     pub(crate) populate: bool,
267 }
268 
269 /// Builds a MemoryMapping object from the specified arguments.
270 impl<'a> MemoryMappingBuilder<'a> {
271     /// Creates a new builder specifying size of the memory region in bytes.
new(size: usize) -> MemoryMappingBuilder<'a>272     pub fn new(size: usize) -> MemoryMappingBuilder<'a> {
273         MemoryMappingBuilder {
274             descriptor: None,
275             size,
276             is_file_descriptor: false,
277             offset: None,
278             protection: None,
279             populate: false,
280         }
281     }
282 
283     /// Build the memory mapping given the specified File to mapped memory
284     ///
285     /// Default: Create a new memory mapping.
286     ///
287     /// Note: this is a forward looking interface to accomodate platforms that
288     /// require special handling for file backed mappings.
289     #[allow(clippy::wrong_self_convention, unused_mut)]
from_file(mut self, file: &'a File) -> MemoryMappingBuilder290     pub fn from_file(mut self, file: &'a File) -> MemoryMappingBuilder {
291         // On Windows, files require special handling (next day shipping if possible).
292         self.is_file_descriptor = true;
293 
294         self.descriptor = Some(file as &dyn AsRawDescriptor);
295         self
296     }
297 
298     /// Build the memory mapping given the specified SharedMemory to mapped memory
299     ///
300     /// Default: Create a new memory mapping.
from_shared_memory(mut self, shm: &'a SharedMemory) -> MemoryMappingBuilder301     pub fn from_shared_memory(mut self, shm: &'a SharedMemory) -> MemoryMappingBuilder {
302         self.descriptor = Some(shm as &dyn AsRawDescriptor);
303         self
304     }
305 
306     /// Offset in bytes from the beginning of the mapping to start the mmap.
307     ///
308     /// Default: No offset
offset(mut self, offset: u64) -> MemoryMappingBuilder<'a>309     pub fn offset(mut self, offset: u64) -> MemoryMappingBuilder<'a> {
310         self.offset = Some(offset);
311         self
312     }
313 
314     /// Protection (e.g. readable/writable) of the memory region.
315     ///
316     /// Default: Read/write
protection(mut self, protection: Protection) -> MemoryMappingBuilder<'a>317     pub fn protection(mut self, protection: Protection) -> MemoryMappingBuilder<'a> {
318         self.protection = Some(protection);
319         self
320     }
321 
322     /// Build a MemoryMapping from the provided options at a fixed address. Note this
323     /// is a separate function from build in order to isolate unsafe behavior.
324     ///
325     /// # Safety
326     ///
327     /// Function should not be called before the caller unmaps any mmap'd regions already
328     /// present at `(addr..addr+size)`. If another MemoryMapping object holds the same
329     /// address space, the destructors of those objects will conflict and the space could
330     /// be unmapped while still in use.
331     ///
332     /// WARNING: On windows, this is not compatible with from_file.
333     /// TODO(b:230901659): Find a better way to enforce this warning in code.
build_fixed(self, addr: *mut u8) -> Result<MemoryMapping>334     pub unsafe fn build_fixed(self, addr: *mut u8) -> Result<MemoryMapping> {
335         if self.populate {
336             // Population not supported for fixed mapping.
337             return Err(Error::InvalidArgument);
338         }
339         match self.descriptor {
340             None => MemoryMappingBuilder::wrap(
341                 PlatformMmap::new_protection_fixed(
342                     addr,
343                     self.size,
344                     self.protection.unwrap_or_else(Protection::read_write),
345                 )?,
346                 None,
347             ),
348             Some(descriptor) => MemoryMappingBuilder::wrap(
349                 PlatformMmap::from_descriptor_offset_protection_fixed(
350                     addr,
351                     descriptor,
352                     self.size,
353                     self.offset.unwrap_or(0),
354                     self.protection.unwrap_or_else(Protection::read_write),
355                 )?,
356                 None,
357             ),
358         }
359     }
360 }
361 
362 impl VolatileMemory for MemoryMapping {
get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice>363     fn get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice> {
364         let mem_end = calc_offset(offset, count)?;
365         if mem_end > self.size() {
366             return Err(VolatileMemoryError::OutOfBounds { addr: mem_end });
367         }
368 
369         let new_addr =
370             (self.as_ptr() as usize)
371                 .checked_add(offset)
372                 .ok_or(VolatileMemoryError::Overflow {
373                     base: self.as_ptr() as usize,
374                     offset,
375                 })?;
376 
377         // Safe because we checked that offset + count was within our range and we only ever hand
378         // out volatile accessors.
379         Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
380     }
381 }
382 
383 /// A range of memory that can be msynced, for abstracting over different types of memory mappings.
384 ///
385 /// # Safety
386 /// Safe when implementers guarantee `ptr`..`ptr+size` is an mmaped region owned by this object that
387 /// can't be unmapped during the `MappedRegion`'s lifetime.
388 pub unsafe trait MappedRegion: Send + Sync {
389     /// Returns a pointer to the beginning of the memory region. Should only be
390     /// used for passing this region to ioctls for setting guest memory.
as_ptr(&self) -> *mut u8391     fn as_ptr(&self) -> *mut u8;
392 
393     /// Returns the size of the memory region in bytes.
size(&self) -> usize394     fn size(&self) -> usize;
395 
396     /// Maps `size` bytes starting at `fd_offset` bytes from within the given `fd`
397     /// at `offset` bytes from the start of the region with `prot` protections.
398     /// `offset` must be page aligned.
399     ///
400     /// # Arguments
401     /// * `offset` - Page aligned offset into the arena in bytes.
402     /// * `size` - Size of memory region in bytes.
403     /// * `fd` - File descriptor to mmap from.
404     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
405     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_mapping( &mut self, _offset: usize, _size: usize, _fd: &dyn AsRawDescriptor, _fd_offset: u64, _prot: Protection, ) -> Result<()>406     fn add_fd_mapping(
407         &mut self,
408         _offset: usize,
409         _size: usize,
410         _fd: &dyn AsRawDescriptor,
411         _fd_offset: u64,
412         _prot: Protection,
413     ) -> Result<()> {
414         Err(Error::AddFdMappingIsUnsupported)
415     }
416 
417     /// Remove `size`-byte mapping starting at `offset`.
remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()>418     fn remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()> {
419         Err(Error::RemoveMappingIsUnsupported)
420     }
421 }
422 
423 // Safe because it exclusively forwards calls to a safe implementation.
424 unsafe impl MappedRegion for MemoryMapping {
as_ptr(&self) -> *mut u8425     fn as_ptr(&self) -> *mut u8 {
426         self.mapping.as_ptr()
427     }
428 
size(&self) -> usize429     fn size(&self) -> usize {
430         self.mapping.size()
431     }
432 }
433 
434 #[derive(Debug, PartialEq, Eq)]
435 pub struct ExternalMapping {
436     pub ptr: u64,
437     pub size: usize,
438 }
439 
440 unsafe impl MappedRegion for ExternalMapping {
441     /// used for passing this region to ioctls for setting guest memory.
as_ptr(&self) -> *mut u8442     fn as_ptr(&self) -> *mut u8 {
443         self.ptr as *mut u8
444     }
445 
446     /// Returns the size of the memory region in bytes.
size(&self) -> usize447     fn size(&self) -> usize {
448         self.size
449     }
450 }
451