• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::io;
6 use std::io::Read;
7 use std::io::Write;
8 
9 use libc::c_int;
10 use libc::c_uint;
11 use libc::c_void;
12 use remain::sorted;
13 use win_util::create_file_mapping;
14 use win_util::duplicate_handle;
15 use winapi::um::winnt::PAGE_READWRITE;
16 
17 pub use super::mmap_platform::MemoryMappingArena;
18 use crate::AsRawDescriptor;
19 use crate::Descriptor;
20 use crate::FromRawDescriptor;
21 use crate::MappedRegion;
22 use crate::MemoryMapping as CrateMemoryMapping;
23 use crate::MemoryMappingBuilder;
24 use crate::Protection;
25 use crate::RawDescriptor;
26 use crate::SafeDescriptor;
27 
28 #[sorted]
29 #[derive(Debug, thiserror::Error)]
30 pub enum Error {
31     #[error("`add_fd_mapping` is unsupported")]
32     AddFdMappingIsUnsupported,
33     #[error("requested memory out of range")]
34     InvalidAddress,
35     #[error("invalid argument provided when creating mapping")]
36     InvalidArgument,
37     #[error("requested offset is out of range of off_t")]
38     InvalidOffset,
39     #[error("requested memory range spans past the end of the region: offset={0} count={1} region_size={2}")]
40     InvalidRange(usize, usize, usize),
41     #[error("requested memory is not page aligned")]
42     NotPageAligned,
43     #[error("failed to read from file to memory: {0}")]
44     ReadToMemory(#[source] io::Error),
45     #[error("`remove_mapping` is unsupported")]
46     RemoveMappingIsUnsupported,
47     #[error("system call failed while creating the mapping: {0}")]
48     StdSyscallFailed(io::Error),
49     #[error("mmap related system call failed: {0}")]
50     SystemCallFailed(#[source] super::Error),
51     #[error("failed to write from memory to file: {0}")]
52     WriteFromMemory(#[source] io::Error),
53 }
54 pub type Result<T> = std::result::Result<T, Error>;
55 
56 impl From<c_uint> for Protection {
from(f: c_uint) -> Self57     fn from(f: c_uint) -> Self {
58         Protection::from(f as c_int)
59     }
60 }
61 
62 impl From<Protection> for c_uint {
from(p: Protection) -> c_uint63     fn from(p: Protection) -> c_uint {
64         let i: c_int = p.into();
65         i as c_uint
66     }
67 }
68 
69 /// Validates that `offset`..`offset+range_size` lies within the bounds of a memory mapping of
70 /// `mmap_size` bytes.  Also checks for any overflow.
validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()>71 fn validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()> {
72     // Ensure offset + size doesn't overflow
73     let end_offset = offset
74         .checked_add(range_size)
75         .ok_or(Error::InvalidAddress)?;
76     // Ensure offset + size are within the mapping bounds
77     if end_offset <= mmap_size {
78         Ok(())
79     } else {
80         Err(Error::InvalidAddress)
81     }
82 }
83 
84 impl dyn MappedRegion {
85     /// Calls msync with MS_SYNC on a mapping of `size` bytes starting at `offset` from the start of
86     /// the region.  `offset`..`offset+size` must be contained within the `MappedRegion`.
msync(&self, offset: usize, size: usize) -> Result<()>87     pub fn msync(&self, offset: usize, size: usize) -> Result<()> {
88         validate_includes_range(self.size(), offset, size)?;
89 
90         // Safe because the MemoryMapping/MemoryMappingArena interface ensures our pointer and size
91         // are correct, and we've validated that `offset`..`offset+size` is in the range owned by
92         // this `MappedRegion`.
93         let ret = unsafe {
94             use winapi::um::memoryapi::FlushViewOfFile;
95             if FlushViewOfFile((self.as_ptr() as usize + offset) as *mut libc::c_void, size) == 0 {
96                 -1
97             } else {
98                 0
99             }
100         };
101         if ret != -1 {
102             Ok(())
103         } else {
104             Err(Error::SystemCallFailed(super::Error::last()))
105         }
106     }
107 }
108 
109 /// Wraps an anonymous shared memory mapping in the current process. Provides
110 /// RAII semantics including munmap when no longer needed.
111 #[derive(Debug)]
112 pub struct MemoryMapping {
113     pub(crate) addr: *mut c_void,
114     pub(crate) size: usize,
115 }
116 
117 // Send and Sync aren't automatically inherited for the raw address pointer.
118 // Accessing that pointer is only done through the stateless interface which
119 // allows the object to be shared by multiple threads without a decrease in
120 // safety.
121 unsafe impl Send for MemoryMapping {}
122 unsafe impl Sync for MemoryMapping {}
123 
124 impl MemoryMapping {
125     /// Creates an anonymous shared, read/write mapping of `size` bytes.
126     ///
127     /// # Arguments
128     /// * `size` - Size of memory region in bytes.
new(size: usize) -> Result<MemoryMapping>129     pub fn new(size: usize) -> Result<MemoryMapping> {
130         MemoryMapping::new_protection(size, Protection::read_write())
131     }
132 
133     /// Maps the first `size` bytes of the given `descriptor` as read/write.
134     ///
135     /// # Arguments
136     /// * `file_handle` - File handle to map from.
137     /// * `size` - Size of memory region in bytes.
from_descriptor( file_handle: &dyn AsRawDescriptor, size: usize, ) -> Result<MemoryMapping>138     pub fn from_descriptor(
139         file_handle: &dyn AsRawDescriptor,
140         size: usize,
141     ) -> Result<MemoryMapping> {
142         MemoryMapping::from_descriptor_offset(file_handle, size, 0)
143     }
144 
from_raw_descriptor(file_handle: RawDescriptor, size: usize) -> Result<MemoryMapping>145     pub fn from_raw_descriptor(file_handle: RawDescriptor, size: usize) -> Result<MemoryMapping> {
146         MemoryMapping::from_descriptor_offset(&Descriptor(file_handle), size, 0)
147     }
148 
from_descriptor_offset( file_handle: &dyn AsRawDescriptor, size: usize, offset: u64, ) -> Result<MemoryMapping>149     pub fn from_descriptor_offset(
150         file_handle: &dyn AsRawDescriptor,
151         size: usize,
152         offset: u64,
153     ) -> Result<MemoryMapping> {
154         MemoryMapping::from_descriptor_offset_protection(
155             file_handle,
156             size,
157             offset,
158             Protection::read_write(),
159         )
160     }
161 
162     // Check that offset+count is valid and return the sum.
range_end(&self, offset: usize, count: usize) -> Result<usize>163     pub(crate) fn range_end(&self, offset: usize, count: usize) -> Result<usize> {
164         let mem_end = offset.checked_add(count).ok_or(Error::InvalidAddress)?;
165         if mem_end > self.size() {
166             return Err(Error::InvalidAddress);
167         }
168         Ok(mem_end)
169     }
170 }
171 
172 unsafe impl MappedRegion for MemoryMapping {
as_ptr(&self) -> *mut u8173     fn as_ptr(&self) -> *mut u8 {
174         self.addr as *mut u8
175     }
176 
size(&self) -> usize177     fn size(&self) -> usize {
178         self.size
179     }
180 }
181 
182 impl CrateMemoryMapping {
read_to_memory<F: Read>( &self, mem_offset: usize, src: &mut F, count: usize, ) -> Result<()>183     pub fn read_to_memory<F: Read>(
184         &self,
185         mem_offset: usize,
186         src: &mut F,
187         count: usize,
188     ) -> Result<()> {
189         self.mapping.read_to_memory(mem_offset, src, count)
190     }
191 
write_from_memory<F: Write>( &self, mem_offset: usize, dst: &mut F, count: usize, ) -> Result<()>192     pub fn write_from_memory<F: Write>(
193         &self,
194         mem_offset: usize,
195         dst: &mut F,
196         count: usize,
197     ) -> Result<()> {
198         self.mapping.write_from_memory(mem_offset, dst, count)
199     }
200 
from_raw_ptr(addr: RawDescriptor, size: usize) -> Result<CrateMemoryMapping>201     pub fn from_raw_ptr(addr: RawDescriptor, size: usize) -> Result<CrateMemoryMapping> {
202         MemoryMapping::from_raw_ptr(addr, size).map(|mapping| CrateMemoryMapping {
203             mapping,
204             _file_descriptor: None,
205         })
206     }
207 }
208 
209 pub trait MemoryMappingBuilderWindows<'a> {
210     /// Build the memory mapping given the specified descriptor to mapped memory
211     ///
212     /// Default: Create a new memory mapping.
213     ///
214     /// descriptor MUST be a mapping handle. Files MUST use `MemoryMappingBuilder::from_file`
215     /// instead.
216     #[allow(clippy::wrong_self_convention)]
from_descriptor(self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder217     fn from_descriptor(self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder;
218 }
219 
220 impl<'a> MemoryMappingBuilderWindows<'a> for MemoryMappingBuilder<'a> {
221     /// See MemoryMappingBuilderWindows.
from_descriptor(mut self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder222     fn from_descriptor(mut self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder {
223         self.descriptor = Some(descriptor);
224         self
225     }
226 }
227 
228 impl<'a> MemoryMappingBuilder<'a> {
229     /// Build a MemoryMapping from the provided options.
build(self) -> Result<CrateMemoryMapping>230     pub fn build(self) -> Result<CrateMemoryMapping> {
231         match self.descriptor {
232             Some(descriptor) => {
233                 let mapping_descriptor = if self.is_file_descriptor {
234                     // On Windows, a file cannot be mmapped directly. We have to create a mapping
235                     // handle for it first. That handle is then provided to Self::wrap, which
236                     // performs the actual mmap (creating a mapped view).
237                     //
238                     // Safe because self.descriptor is guaranteed to be a valid handle.
239                     let mapping_handle = unsafe {
240                         create_file_mapping(
241                             Some(descriptor.as_raw_descriptor()),
242                             self.size as u64,
243                             PAGE_READWRITE,
244                             None,
245                         )
246                     }
247                     .map_err(Error::StdSyscallFailed)?;
248 
249                     // The above comment block is why the SafeDescriptor wrap is safe.
250                     Some(unsafe { SafeDescriptor::from_raw_descriptor(mapping_handle) })
251                 } else {
252                     None
253                 };
254 
255                 MemoryMappingBuilder::wrap(
256                     MemoryMapping::from_descriptor_offset_protection(
257                         match mapping_descriptor.as_ref() {
258                             Some(descriptor) => descriptor as &dyn AsRawDescriptor,
259                             None => descriptor,
260                         },
261                         self.size,
262                         self.offset.unwrap_or(0),
263                         self.protection.unwrap_or_else(Protection::read_write),
264                     )?,
265                     if self.is_file_descriptor {
266                         self.descriptor
267                     } else {
268                         None
269                     },
270                 )
271             }
272             None => MemoryMappingBuilder::wrap(
273                 MemoryMapping::new_protection(
274                     self.size,
275                     self.protection.unwrap_or_else(Protection::read_write),
276                 )?,
277                 None,
278             ),
279         }
280     }
wrap( mapping: MemoryMapping, file_descriptor: Option<&'a dyn AsRawDescriptor>, ) -> Result<CrateMemoryMapping>281     pub fn wrap(
282         mapping: MemoryMapping,
283         file_descriptor: Option<&'a dyn AsRawDescriptor>,
284     ) -> Result<CrateMemoryMapping> {
285         let file_descriptor = match file_descriptor {
286             // Safe because `duplicate_handle` will return a handle or at least error out.
287             Some(descriptor) => unsafe {
288                 Some(SafeDescriptor::from_raw_descriptor(
289                     duplicate_handle(descriptor.as_raw_descriptor())
290                         .map_err(Error::StdSyscallFailed)?,
291                 ))
292             },
293             None => None,
294         };
295 
296         Ok(CrateMemoryMapping {
297             mapping,
298             _file_descriptor: file_descriptor,
299         })
300     }
301 }
302 
303 #[cfg(test)]
304 mod tests {
305     use std::ffi::CString;
306 
307     use data_model::VolatileMemory;
308     use data_model::VolatileMemoryError;
309 
310     use super::super::shm::SharedMemory;
311     use super::*;
312 
313     // get_slice() and other methods are only available on crate::MemoryMapping.
to_crate_mmap(mapping: MemoryMapping) -> crate::MemoryMapping314     fn to_crate_mmap(mapping: MemoryMapping) -> crate::MemoryMapping {
315         crate::MemoryMapping {
316             mapping,
317             _file_descriptor: None,
318         }
319     }
320 
321     #[test]
basic_map()322     fn basic_map() {
323         let shm = SharedMemory::new(&CString::new("test").unwrap(), 1028).unwrap();
324         let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 1024).unwrap());
325         assert_eq!(1024, m.size());
326     }
327 
328     #[test]
test_write_past_end()329     fn test_write_past_end() {
330         let shm = SharedMemory::new(&CString::new("test").unwrap(), 1028).unwrap();
331         let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
332         let res = m.write_slice(&[1, 2, 3, 4, 5, 6], 0);
333         assert!(res.is_ok());
334         assert_eq!(res.unwrap(), 5);
335     }
336 
337     #[test]
slice_size()338     fn slice_size() {
339         let shm = SharedMemory::new(&CString::new("test").unwrap(), 1028).unwrap();
340         let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
341         let s = m.get_slice(2, 3).unwrap();
342         assert_eq!(s.size(), 3);
343     }
344 
345     #[test]
slice_addr()346     fn slice_addr() {
347         let shm = SharedMemory::new(&CString::new("test").unwrap(), 1028).unwrap();
348         let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
349         let s = m.get_slice(2, 3).unwrap();
350         assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
351     }
352 
353     #[test]
slice_overflow_error()354     fn slice_overflow_error() {
355         let shm = SharedMemory::new(&CString::new("test").unwrap(), 1028).unwrap();
356         let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
357         let res = m.get_slice(std::usize::MAX, 3).unwrap_err();
358         assert_eq!(
359             res,
360             VolatileMemoryError::Overflow {
361                 base: std::usize::MAX,
362                 offset: 3,
363             }
364         );
365     }
366     #[test]
slice_oob_error()367     fn slice_oob_error() {
368         let shm = SharedMemory::new(&CString::new("test").unwrap(), 1028).unwrap();
369         let m = to_crate_mmap(MemoryMapping::from_descriptor(&shm, 5).unwrap());
370         let res = m.get_slice(3, 3).unwrap_err();
371         assert_eq!(res, VolatileMemoryError::OutOfBounds { addr: 6 });
372     }
373 }
374