• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! The mmap module provides a safe interface to mmap memory and ensures unmap is called when the
6 //! mmap object leaves scope.
7 
8 use std::{
9     cmp::min,
10     io,
11     mem::size_of,
12     ptr::{copy_nonoverlapping, null_mut, read_unaligned, write_unaligned},
13 };
14 
15 use crate::external_mapping::ExternalMapping;
16 use crate::AsRawDescriptor;
17 use libc::{
18     c_int, c_void, read, write, {self},
19 };
20 use remain::sorted;
21 
22 use data_model::{volatile_memory::*, DataInit};
23 
24 use super::{pagesize, Error as ErrnoError};
25 
26 #[sorted]
27 #[derive(Debug, thiserror::Error)]
28 pub enum Error {
29     #[error("`add_fd_mapping` is unsupported")]
30     AddFdMappingIsUnsupported,
31     #[error("requested memory out of range")]
32     InvalidAddress,
33     #[error("invalid argument provided when creating mapping")]
34     InvalidArgument,
35     #[error("requested offset is out of range of off_t")]
36     InvalidOffset,
37     #[error("requested memory range spans past the end of the region: offset={0} count={1} region_size={2}")]
38     InvalidRange(usize, usize, usize),
39     #[error("requested memory is not page aligned")]
40     NotPageAligned,
41     #[error("failed to read from file to memory: {0}")]
42     ReadToMemory(#[source] io::Error),
43     #[error("`remove_mapping` is unsupported")]
44     RemoveMappingIsUnsupported,
45     #[error("mmap related system call failed: {0}")]
46     SystemCallFailed(#[source] ErrnoError),
47     #[error("failed to write from memory to file: {0}")]
48     WriteFromMemory(#[source] io::Error),
49 }
50 pub type Result<T> = std::result::Result<T, Error>;
51 
52 /// Memory access type for anonymous shared memory mapping.
53 #[derive(Copy, Clone, Eq, PartialEq)]
54 pub struct Protection(c_int);
55 impl Protection {
56     /// Returns Protection allowing no access.
57     #[inline(always)]
none() -> Protection58     pub fn none() -> Protection {
59         Protection(libc::PROT_NONE)
60     }
61 
62     /// Returns Protection allowing read/write access.
63     #[inline(always)]
read_write() -> Protection64     pub fn read_write() -> Protection {
65         Protection(libc::PROT_READ | libc::PROT_WRITE)
66     }
67 
68     /// Returns Protection allowing read access.
69     #[inline(always)]
read() -> Protection70     pub fn read() -> Protection {
71         Protection(libc::PROT_READ)
72     }
73 
74     /// Set read events.
75     #[inline(always)]
set_read(self) -> Protection76     pub fn set_read(self) -> Protection {
77         Protection(self.0 | libc::PROT_READ)
78     }
79 
80     /// Set write events.
81     #[inline(always)]
set_write(self) -> Protection82     pub fn set_write(self) -> Protection {
83         Protection(self.0 | libc::PROT_WRITE)
84     }
85 }
86 
87 impl From<c_int> for Protection {
from(f: c_int) -> Self88     fn from(f: c_int) -> Self {
89         Protection(f)
90     }
91 }
92 
93 impl From<Protection> for c_int {
from(p: Protection) -> c_int94     fn from(p: Protection) -> c_int {
95         p.0
96     }
97 }
98 
99 /// Validates that `offset`..`offset+range_size` lies within the bounds of a memory mapping of
100 /// `mmap_size` bytes.  Also checks for any overflow.
validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()>101 fn validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()> {
102     // Ensure offset + size doesn't overflow
103     let end_offset = offset
104         .checked_add(range_size)
105         .ok_or(Error::InvalidAddress)?;
106     // Ensure offset + size are within the mapping bounds
107     if end_offset <= mmap_size {
108         Ok(())
109     } else {
110         Err(Error::InvalidAddress)
111     }
112 }
113 
114 /// A range of memory that can be msynced, for abstracting over different types of memory mappings.
115 ///
116 /// Safe when implementers guarantee `ptr`..`ptr+size` is an mmaped region owned by this object that
117 /// can't be unmapped during the `MappedRegion`'s lifetime.
118 pub unsafe trait MappedRegion: Send + Sync {
119     /// Returns a pointer to the beginning of the memory region. Should only be
120     /// used for passing this region to ioctls for setting guest memory.
as_ptr(&self) -> *mut u8121     fn as_ptr(&self) -> *mut u8;
122 
123     /// Returns the size of the memory region in bytes.
size(&self) -> usize124     fn size(&self) -> usize;
125 
126     /// Maps `size` bytes starting at `fd_offset` bytes from within the given `fd`
127     /// at `offset` bytes from the start of the region with `prot` protections.
128     /// `offset` must be page aligned.
129     ///
130     /// # Arguments
131     /// * `offset` - Page aligned offset into the arena in bytes.
132     /// * `size` - Size of memory region in bytes.
133     /// * `fd` - File descriptor to mmap from.
134     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
135     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_mapping( &mut self, _offset: usize, _size: usize, _fd: &dyn AsRawDescriptor, _fd_offset: u64, _prot: Protection, ) -> Result<()>136     fn add_fd_mapping(
137         &mut self,
138         _offset: usize,
139         _size: usize,
140         _fd: &dyn AsRawDescriptor,
141         _fd_offset: u64,
142         _prot: Protection,
143     ) -> Result<()> {
144         Err(Error::AddFdMappingIsUnsupported)
145     }
146 
147     /// Remove `size`-byte mapping starting at `offset`.
remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()>148     fn remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()> {
149         Err(Error::RemoveMappingIsUnsupported)
150     }
151 }
152 
153 impl dyn MappedRegion {
154     /// Calls msync with MS_SYNC on a mapping of `size` bytes starting at `offset` from the start of
155     /// the region.  `offset`..`offset+size` must be contained within the `MappedRegion`.
msync(&self, offset: usize, size: usize) -> Result<()>156     pub fn msync(&self, offset: usize, size: usize) -> Result<()> {
157         validate_includes_range(self.size(), offset, size)?;
158 
159         // Safe because the MemoryMapping/MemoryMappingArena interface ensures our pointer and size
160         // are correct, and we've validated that `offset`..`offset+size` is in the range owned by
161         // this `MappedRegion`.
162         let ret = unsafe {
163             libc::msync(
164                 (self.as_ptr() as usize + offset) as *mut libc::c_void,
165                 size,
166                 libc::MS_SYNC,
167             )
168         };
169         if ret != -1 {
170             Ok(())
171         } else {
172             Err(Error::SystemCallFailed(ErrnoError::last()))
173         }
174     }
175 }
176 
177 unsafe impl MappedRegion for ExternalMapping {
as_ptr(&self) -> *mut u8178     fn as_ptr(&self) -> *mut u8 {
179         self.as_ptr()
180     }
181 
182     /// Returns the size of the memory region in bytes.
size(&self) -> usize183     fn size(&self) -> usize {
184         self.size()
185     }
186 }
187 
188 /// Wraps an anonymous shared memory mapping in the current process. Provides
189 /// RAII semantics including munmap when no longer needed.
190 #[derive(Debug)]
191 pub struct MemoryMapping {
192     addr: *mut u8,
193     size: usize,
194 }
195 
196 // Send and Sync aren't automatically inherited for the raw address pointer.
197 // Accessing that pointer is only done through the stateless interface which
198 // allows the object to be shared by multiple threads without a decrease in
199 // safety.
200 unsafe impl Send for MemoryMapping {}
201 unsafe impl Sync for MemoryMapping {}
202 
203 impl MemoryMapping {
204     /// Creates an anonymous shared, read/write mapping of `size` bytes.
205     ///
206     /// # Arguments
207     /// * `size` - Size of memory region in bytes.
new(size: usize) -> Result<MemoryMapping>208     pub fn new(size: usize) -> Result<MemoryMapping> {
209         MemoryMapping::new_protection(size, Protection::read_write())
210     }
211 
212     /// Creates an anonymous shared mapping of `size` bytes with `prot` protection.
213     ///
214     /// # Arguments
215     /// * `size` - Size of memory region in bytes.
216     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
new_protection(size: usize, prot: Protection) -> Result<MemoryMapping>217     pub fn new_protection(size: usize, prot: Protection) -> Result<MemoryMapping> {
218         // This is safe because we are creating an anonymous mapping in a place not already used by
219         // any other area in this process.
220         unsafe {
221             MemoryMapping::try_mmap(
222                 None,
223                 size,
224                 prot.into(),
225                 libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
226                 None,
227             )
228         }
229     }
230 
231     /// Maps the first `size` bytes of the given `fd` as read/write.
232     ///
233     /// # Arguments
234     /// * `fd` - File descriptor to mmap from.
235     /// * `size` - Size of memory region in bytes.
from_fd(fd: &dyn AsRawDescriptor, size: usize) -> Result<MemoryMapping>236     pub fn from_fd(fd: &dyn AsRawDescriptor, size: usize) -> Result<MemoryMapping> {
237         MemoryMapping::from_fd_offset(fd, size, 0)
238     }
239 
from_fd_offset( fd: &dyn AsRawDescriptor, size: usize, offset: u64, ) -> Result<MemoryMapping>240     pub fn from_fd_offset(
241         fd: &dyn AsRawDescriptor,
242         size: usize,
243         offset: u64,
244     ) -> Result<MemoryMapping> {
245         MemoryMapping::from_fd_offset_protection(fd, size, offset, Protection::read_write())
246     }
247 
248     /// Maps the `size` bytes starting at `offset` bytes of the given `fd` as read/write.
249     ///
250     /// # Arguments
251     /// * `fd` - File descriptor to mmap from.
252     /// * `size` - Size of memory region in bytes.
253     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
254     /// * `flags` - flags passed directly to mmap.
255     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
from_fd_offset_flags( fd: &dyn AsRawDescriptor, size: usize, offset: u64, flags: c_int, prot: Protection, ) -> Result<MemoryMapping>256     fn from_fd_offset_flags(
257         fd: &dyn AsRawDescriptor,
258         size: usize,
259         offset: u64,
260         flags: c_int,
261         prot: Protection,
262     ) -> Result<MemoryMapping> {
263         unsafe {
264             // This is safe because we are creating an anonymous mapping in a place not already used
265             // by any other area in this process.
266             MemoryMapping::try_mmap(None, size, prot.into(), flags, Some((fd, offset)))
267         }
268     }
269 
270     /// Maps the `size` bytes starting at `offset` bytes of the given `fd` as read/write.
271     ///
272     /// # Arguments
273     /// * `fd` - File descriptor to mmap from.
274     /// * `size` - Size of memory region in bytes.
275     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
276     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
from_fd_offset_protection( fd: &dyn AsRawDescriptor, size: usize, offset: u64, prot: Protection, ) -> Result<MemoryMapping>277     pub fn from_fd_offset_protection(
278         fd: &dyn AsRawDescriptor,
279         size: usize,
280         offset: u64,
281         prot: Protection,
282     ) -> Result<MemoryMapping> {
283         MemoryMapping::from_fd_offset_flags(fd, size, offset, libc::MAP_SHARED, prot)
284     }
285 
286     /// Maps `size` bytes starting at `offset` from the given `fd` as read/write, and requests
287     /// that the pages are pre-populated.
288     /// # Arguments
289     /// * `fd` - File descriptor to mmap from.
290     /// * `size` - Size of memory region in bytes.
291     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
from_fd_offset_protection_populate( fd: &dyn AsRawDescriptor, size: usize, offset: u64, prot: Protection, populate: bool, ) -> Result<MemoryMapping>292     pub fn from_fd_offset_protection_populate(
293         fd: &dyn AsRawDescriptor,
294         size: usize,
295         offset: u64,
296         prot: Protection,
297         populate: bool,
298     ) -> Result<MemoryMapping> {
299         let mut flags = libc::MAP_SHARED;
300         if populate {
301             flags |= libc::MAP_POPULATE;
302         }
303         MemoryMapping::from_fd_offset_flags(fd, size, offset, flags, prot)
304     }
305 
306     /// Creates an anonymous shared mapping of `size` bytes with `prot` protection.
307     ///
308     /// # Arguments
309     ///
310     /// * `addr` - Memory address to mmap at.
311     /// * `size` - Size of memory region in bytes.
312     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
313     ///
314     /// # Safety
315     ///
316     /// This function should not be called before the caller unmaps any mmap'd regions already
317     /// present at `(addr..addr+size)`.
new_protection_fixed( addr: *mut u8, size: usize, prot: Protection, ) -> Result<MemoryMapping>318     pub unsafe fn new_protection_fixed(
319         addr: *mut u8,
320         size: usize,
321         prot: Protection,
322     ) -> Result<MemoryMapping> {
323         MemoryMapping::try_mmap(
324             Some(addr),
325             size,
326             prot.into(),
327             libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
328             None,
329         )
330     }
331 
332     /// Maps the `size` bytes starting at `offset` bytes of the given `fd` with
333     /// `prot` protections.
334     ///
335     /// # Arguments
336     ///
337     /// * `addr` - Memory address to mmap at.
338     /// * `fd` - File descriptor to mmap from.
339     /// * `size` - Size of memory region in bytes.
340     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
341     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
342     ///
343     /// # Safety
344     ///
345     /// This function should not be called before the caller unmaps any mmap'd regions already
346     /// present at `(addr..addr+size)`.
from_fd_offset_protection_fixed( addr: *mut u8, fd: &dyn AsRawDescriptor, size: usize, offset: u64, prot: Protection, ) -> Result<MemoryMapping>347     pub unsafe fn from_fd_offset_protection_fixed(
348         addr: *mut u8,
349         fd: &dyn AsRawDescriptor,
350         size: usize,
351         offset: u64,
352         prot: Protection,
353     ) -> Result<MemoryMapping> {
354         MemoryMapping::try_mmap(
355             Some(addr),
356             size,
357             prot.into(),
358             libc::MAP_SHARED | libc::MAP_NORESERVE,
359             Some((fd, offset)),
360         )
361     }
362 
363     /// Helper wrapper around libc::mmap that does some basic validation, and calls
364     /// madvise with MADV_DONTDUMP on the created mmap
try_mmap( addr: Option<*mut u8>, size: usize, prot: c_int, flags: c_int, fd: Option<(&dyn AsRawDescriptor, u64)>, ) -> Result<MemoryMapping>365     unsafe fn try_mmap(
366         addr: Option<*mut u8>,
367         size: usize,
368         prot: c_int,
369         flags: c_int,
370         fd: Option<(&dyn AsRawDescriptor, u64)>,
371     ) -> Result<MemoryMapping> {
372         let mut flags = flags;
373         // If addr is provided, set the FIXED flag, and validate addr alignment
374         let addr = match addr {
375             Some(addr) => {
376                 if (addr as usize) % pagesize() != 0 {
377                     return Err(Error::NotPageAligned);
378                 }
379                 flags |= libc::MAP_FIXED;
380                 addr as *mut libc::c_void
381             }
382             None => null_mut(),
383         };
384         // If fd is provided, validate fd offset is within bounds
385         let (fd, offset) = match fd {
386             Some((fd, offset)) => {
387                 if offset > libc::off_t::max_value() as u64 {
388                     return Err(Error::InvalidOffset);
389                 }
390                 (fd.as_raw_descriptor(), offset as libc::off_t)
391             }
392             None => (-1, 0),
393         };
394         let addr = libc::mmap(addr, size, prot, flags, fd, offset);
395         if addr == libc::MAP_FAILED {
396             return Err(Error::SystemCallFailed(ErrnoError::last()));
397         }
398         // This is safe because we call madvise with a valid address and size.
399         let _ = libc::madvise(addr, size, libc::MADV_DONTDUMP);
400 
401         Ok(MemoryMapping {
402             addr: addr as *mut u8,
403             size,
404         })
405     }
406 
407     /// Madvise the kernel to use Huge Pages for this mapping.
use_hugepages(&self) -> Result<()>408     pub fn use_hugepages(&self) -> Result<()> {
409         const SZ_2M: usize = 2 * 1024 * 1024;
410 
411         // THP uses 2M pages, so use THP only on mappings that are at least
412         // 2M in size.
413         if self.size() < SZ_2M {
414             return Ok(());
415         }
416 
417         // This is safe because we call madvise with a valid address and size, and we check the
418         // return value.
419         let ret = unsafe {
420             libc::madvise(
421                 self.as_ptr() as *mut libc::c_void,
422                 self.size(),
423                 libc::MADV_HUGEPAGE,
424             )
425         };
426         if ret == -1 {
427             Err(Error::SystemCallFailed(ErrnoError::last()))
428         } else {
429             Ok(())
430         }
431     }
432 
433     /// Calls msync with MS_SYNC on the mapping.
msync(&self) -> Result<()>434     pub fn msync(&self) -> Result<()> {
435         // This is safe since we use the exact address and length of a known
436         // good memory mapping.
437         let ret = unsafe {
438             libc::msync(
439                 self.as_ptr() as *mut libc::c_void,
440                 self.size(),
441                 libc::MS_SYNC,
442             )
443         };
444         if ret == -1 {
445             return Err(Error::SystemCallFailed(ErrnoError::last()));
446         }
447         Ok(())
448     }
449 
450     /// Writes a slice to the memory region at the specified offset.
451     /// Returns the number of bytes written.  The number of bytes written can
452     /// be less than the length of the slice if there isn't enough room in the
453     /// memory region.
454     ///
455     /// # Examples
456     /// * Write a slice at offset 256.
457     ///
458     /// ```
459     /// #   use crate::platform::MemoryMapping;
460     /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
461     ///     let res = mem_map.write_slice(&[1,2,3,4,5], 256);
462     ///     assert!(res.is_ok());
463     ///     assert_eq!(res.unwrap(), 5);
464     /// ```
write_slice(&self, buf: &[u8], offset: usize) -> Result<usize>465     pub fn write_slice(&self, buf: &[u8], offset: usize) -> Result<usize> {
466         match self.size.checked_sub(offset) {
467             Some(size_past_offset) => {
468                 let bytes_copied = min(size_past_offset, buf.len());
469                 // The bytes_copied equation above ensures we don't copy bytes out of range of
470                 // either buf or this slice. We also know that the buffers do not overlap because
471                 // slices can never occupy the same memory as a volatile slice.
472                 unsafe {
473                     copy_nonoverlapping(buf.as_ptr(), self.as_ptr().add(offset), bytes_copied);
474                 }
475                 Ok(bytes_copied)
476             }
477             None => Err(Error::InvalidAddress),
478         }
479     }
480 
481     /// Reads to a slice from the memory region at the specified offset.
482     /// Returns the number of bytes read.  The number of bytes read can
483     /// be less than the length of the slice if there isn't enough room in the
484     /// memory region.
485     ///
486     /// # Examples
487     /// * Read a slice of size 16 at offset 256.
488     ///
489     /// ```
490     /// #   use crate::platform::MemoryMapping;
491     /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
492     ///     let buf = &mut [0u8; 16];
493     ///     let res = mem_map.read_slice(buf, 256);
494     ///     assert!(res.is_ok());
495     ///     assert_eq!(res.unwrap(), 16);
496     /// ```
read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize>497     pub fn read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize> {
498         match self.size.checked_sub(offset) {
499             Some(size_past_offset) => {
500                 let bytes_copied = min(size_past_offset, buf.len());
501                 // The bytes_copied equation above ensures we don't copy bytes out of range of
502                 // either buf or this slice. We also know that the buffers do not overlap because
503                 // slices can never occupy the same memory as a volatile slice.
504                 unsafe {
505                     copy_nonoverlapping(
506                         self.as_ptr().add(offset) as *const u8,
507                         buf.as_mut_ptr(),
508                         bytes_copied,
509                     );
510                 }
511                 Ok(bytes_copied)
512             }
513             None => Err(Error::InvalidAddress),
514         }
515     }
516 
517     /// Writes an object to the memory region at the specified offset.
518     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
519     ///
520     /// # Examples
521     /// * Write a u64 at offset 16.
522     ///
523     /// ```
524     /// #   use crate::platform::MemoryMapping;
525     /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
526     ///     let res = mem_map.write_obj(55u64, 16);
527     ///     assert!(res.is_ok());
528     /// ```
write_obj<T: DataInit>(&self, val: T, offset: usize) -> Result<()>529     pub fn write_obj<T: DataInit>(&self, val: T, offset: usize) -> Result<()> {
530         self.range_end(offset, size_of::<T>())?;
531         // This is safe because we checked the bounds above.
532         unsafe {
533             write_unaligned(self.as_ptr().add(offset) as *mut T, val);
534         }
535         Ok(())
536     }
537 
538     /// Reads on object from the memory region at the given offset.
539     /// Reading from a volatile area isn't strictly safe as it could change
540     /// mid-read.  However, as long as the type T is plain old data and can
541     /// handle random initialization, everything will be OK.
542     ///
543     /// # Examples
544     /// * Read a u64 written to offset 32.
545     ///
546     /// ```
547     /// #   use crate::platform::MemoryMapping;
548     /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
549     ///     let res = mem_map.write_obj(55u64, 32);
550     ///     assert!(res.is_ok());
551     ///     let num: u64 = mem_map.read_obj(32).unwrap();
552     ///     assert_eq!(55, num);
553     /// ```
read_obj<T: DataInit>(&self, offset: usize) -> Result<T>554     pub fn read_obj<T: DataInit>(&self, offset: usize) -> Result<T> {
555         self.range_end(offset, size_of::<T>())?;
556         // This is safe because by definition Copy types can have their bits set arbitrarily and
557         // still be valid.
558         unsafe {
559             Ok(read_unaligned(
560                 self.as_ptr().add(offset) as *const u8 as *const T
561             ))
562         }
563     }
564 
565     /// Reads data from a file descriptor and writes it to guest memory.
566     ///
567     /// # Arguments
568     /// * `mem_offset` - Begin writing memory at this offset.
569     /// * `src` - Read from `src` to memory.
570     /// * `count` - Read `count` bytes from `src` to memory.
571     ///
572     /// # Examples
573     ///
574     /// * Read bytes from /dev/urandom
575     ///
576     /// ```
577     /// # use crate::platform::MemoryMapping;
578     /// # use std::fs::File;
579     /// # use std::path::Path;
580     /// # fn test_read_random() -> Result<u32, ()> {
581     /// #     let mut mem_map = MemoryMapping::new(1024).unwrap();
582     ///       let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
583     ///       mem_map.read_to_memory(32, &mut file, 128).map_err(|_| ())?;
584     ///       let rand_val: u32 =  mem_map.read_obj(40).map_err(|_| ())?;
585     /// #     Ok(rand_val)
586     /// # }
587     /// ```
read_to_memory( &self, mut mem_offset: usize, src: &dyn AsRawDescriptor, mut count: usize, ) -> Result<()>588     pub fn read_to_memory(
589         &self,
590         mut mem_offset: usize,
591         src: &dyn AsRawDescriptor,
592         mut count: usize,
593     ) -> Result<()> {
594         self.range_end(mem_offset, count)
595             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
596         while count > 0 {
597             // The check above ensures that no memory outside this slice will get accessed by this
598             // read call.
599             match unsafe {
600                 read(
601                     src.as_raw_descriptor(),
602                     self.as_ptr().add(mem_offset) as *mut c_void,
603                     count,
604                 )
605             } {
606                 0 => {
607                     return Err(Error::ReadToMemory(io::Error::from(
608                         io::ErrorKind::UnexpectedEof,
609                     )))
610                 }
611                 r if r < 0 => return Err(Error::ReadToMemory(io::Error::last_os_error())),
612                 ret => {
613                     let bytes_read = ret as usize;
614                     match count.checked_sub(bytes_read) {
615                         Some(count_remaining) => count = count_remaining,
616                         None => break,
617                     }
618                     mem_offset += ret as usize;
619                 }
620             }
621         }
622         Ok(())
623     }
624 
625     /// Writes data from memory to a file descriptor.
626     ///
627     /// # Arguments
628     /// * `mem_offset` - Begin reading memory from this offset.
629     /// * `dst` - Write from memory to `dst`.
630     /// * `count` - Read `count` bytes from memory to `src`.
631     ///
632     /// # Examples
633     ///
634     /// * Write 128 bytes to /dev/null
635     ///
636     /// ```
637     /// # use crate::platform::MemoryMapping;
638     /// # use std::fs::File;
639     /// # use std::path::Path;
640     /// # fn test_write_null() -> Result<(), ()> {
641     /// #     let mut mem_map = MemoryMapping::new(1024).unwrap();
642     ///       let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
643     ///       mem_map.write_from_memory(32, &mut file, 128).map_err(|_| ())?;
644     /// #     Ok(())
645     /// # }
646     /// ```
write_from_memory( &self, mut mem_offset: usize, dst: &dyn AsRawDescriptor, mut count: usize, ) -> Result<()>647     pub fn write_from_memory(
648         &self,
649         mut mem_offset: usize,
650         dst: &dyn AsRawDescriptor,
651         mut count: usize,
652     ) -> Result<()> {
653         self.range_end(mem_offset, count)
654             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
655         while count > 0 {
656             // The check above ensures that no memory outside this slice will get accessed by this
657             // write call.
658             match unsafe {
659                 write(
660                     dst.as_raw_descriptor(),
661                     self.as_ptr().add(mem_offset) as *const c_void,
662                     count,
663                 )
664             } {
665                 0 => {
666                     return Err(Error::WriteFromMemory(io::Error::from(
667                         io::ErrorKind::WriteZero,
668                     )))
669                 }
670                 ret if ret < 0 => return Err(Error::WriteFromMemory(io::Error::last_os_error())),
671                 ret => {
672                     let bytes_written = ret as usize;
673                     match count.checked_sub(bytes_written) {
674                         Some(count_remaining) => count = count_remaining,
675                         None => break,
676                     }
677                     mem_offset += ret as usize;
678                 }
679             }
680         }
681         Ok(())
682     }
683 
684     /// Uses madvise to tell the kernel to remove the specified range.  Subsequent reads
685     /// to the pages in the range will return zero bytes.
remove_range(&self, mem_offset: usize, count: usize) -> Result<()>686     pub fn remove_range(&self, mem_offset: usize, count: usize) -> Result<()> {
687         self.range_end(mem_offset, count)
688             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
689         let ret = unsafe {
690             // madvising away the region is the same as the guest changing it.
691             // Next time it is read, it may return zero pages.
692             libc::madvise(
693                 (self.addr as usize + mem_offset) as *mut _,
694                 count,
695                 libc::MADV_REMOVE,
696             )
697         };
698         if ret < 0 {
699             Err(Error::InvalidRange(mem_offset, count, self.size()))
700         } else {
701             Ok(())
702         }
703     }
704 
705     // Check that offset+count is valid and return the sum.
range_end(&self, offset: usize, count: usize) -> Result<usize>706     fn range_end(&self, offset: usize, count: usize) -> Result<usize> {
707         let mem_end = offset.checked_add(count).ok_or(Error::InvalidAddress)?;
708         if mem_end > self.size() {
709             return Err(Error::InvalidAddress);
710         }
711         Ok(mem_end)
712     }
713 }
714 
715 // Safe because the pointer and size point to a memory range owned by this MemoryMapping that won't
716 // be unmapped until it's Dropped.
717 unsafe impl MappedRegion for MemoryMapping {
as_ptr(&self) -> *mut u8718     fn as_ptr(&self) -> *mut u8 {
719         self.addr
720     }
721 
size(&self) -> usize722     fn size(&self) -> usize {
723         self.size
724     }
725 }
726 
727 impl VolatileMemory for MemoryMapping {
get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice>728     fn get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice> {
729         let mem_end = calc_offset(offset, count)?;
730         if mem_end > self.size {
731             return Err(VolatileMemoryError::OutOfBounds { addr: mem_end });
732         }
733 
734         let new_addr =
735             (self.as_ptr() as usize)
736                 .checked_add(offset)
737                 .ok_or(VolatileMemoryError::Overflow {
738                     base: self.as_ptr() as usize,
739                     offset,
740                 })?;
741 
742         // Safe because we checked that offset + count was within our range and we only ever hand
743         // out volatile accessors.
744         Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
745     }
746 }
747 
748 impl Drop for MemoryMapping {
drop(&mut self)749     fn drop(&mut self) {
750         // This is safe because we mmap the area at addr ourselves, and nobody
751         // else is holding a reference to it.
752         unsafe {
753             libc::munmap(self.addr as *mut libc::c_void, self.size);
754         }
755     }
756 }
757 
758 /// Tracks Fixed Memory Maps within an anonymous memory-mapped fixed-sized arena
759 /// in the current process.
760 pub struct MemoryMappingArena {
761     addr: *mut u8,
762     size: usize,
763 }
764 
765 // Send and Sync aren't automatically inherited for the raw address pointer.
766 // Accessing that pointer is only done through the stateless interface which
767 // allows the object to be shared by multiple threads without a decrease in
768 // safety.
769 unsafe impl Send for MemoryMappingArena {}
770 unsafe impl Sync for MemoryMappingArena {}
771 
772 impl MemoryMappingArena {
773     /// Creates an mmap arena of `size` bytes.
774     ///
775     /// # Arguments
776     /// * `size` - Size of memory region in bytes.
new(size: usize) -> Result<MemoryMappingArena>777     pub fn new(size: usize) -> Result<MemoryMappingArena> {
778         // Reserve the arena's memory using an anonymous read-only mmap.
779         MemoryMapping::new_protection(size, Protection::none().set_read()).map(From::from)
780     }
781 
782     /// Anonymously maps `size` bytes at `offset` bytes from the start of the arena
783     /// with `prot` protections. `offset` must be page aligned.
784     ///
785     /// # Arguments
786     /// * `offset` - Page aligned offset into the arena in bytes.
787     /// * `size` - Size of memory region in bytes.
788     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_anon_protection( &mut self, offset: usize, size: usize, prot: Protection, ) -> Result<()>789     pub fn add_anon_protection(
790         &mut self,
791         offset: usize,
792         size: usize,
793         prot: Protection,
794     ) -> Result<()> {
795         self.try_add(offset, size, prot, None)
796     }
797 
798     /// Anonymously maps `size` bytes at `offset` bytes from the start of the arena.
799     /// `offset` must be page aligned.
800     ///
801     /// # Arguments
802     /// * `offset` - Page aligned offset into the arena in bytes.
803     /// * `size` - Size of memory region in bytes.
add_anon(&mut self, offset: usize, size: usize) -> Result<()>804     pub fn add_anon(&mut self, offset: usize, size: usize) -> Result<()> {
805         self.add_anon_protection(offset, size, Protection::read_write())
806     }
807 
808     /// Maps `size` bytes from the start of the given `fd` at `offset` bytes from
809     /// the start of the arena. `offset` must be page aligned.
810     ///
811     /// # Arguments
812     /// * `offset` - Page aligned offset into the arena in bytes.
813     /// * `size` - Size of memory region in bytes.
814     /// * `fd` - File descriptor to mmap from.
add_fd(&mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor) -> Result<()>815     pub fn add_fd(&mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor) -> Result<()> {
816         self.add_fd_offset(offset, size, fd, 0)
817     }
818 
819     /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
820     /// at `offset` bytes from the start of the arena. `offset` must be page aligned.
821     ///
822     /// # Arguments
823     /// * `offset` - Page aligned offset into the arena in bytes.
824     /// * `size` - Size of memory region in bytes.
825     /// * `fd` - File descriptor to mmap from.
826     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
add_fd_offset( &mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, ) -> Result<()>827     pub fn add_fd_offset(
828         &mut self,
829         offset: usize,
830         size: usize,
831         fd: &dyn AsRawDescriptor,
832         fd_offset: u64,
833     ) -> Result<()> {
834         self.add_fd_offset_protection(offset, size, fd, fd_offset, Protection::read_write())
835     }
836 
837     /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
838     /// at `offset` bytes from the start of the arena with `prot` protections.
839     /// `offset` must be page aligned.
840     ///
841     /// # Arguments
842     /// * `offset` - Page aligned offset into the arena in bytes.
843     /// * `size` - Size of memory region in bytes.
844     /// * `fd` - File descriptor to mmap from.
845     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
846     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_offset_protection( &mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>847     pub fn add_fd_offset_protection(
848         &mut self,
849         offset: usize,
850         size: usize,
851         fd: &dyn AsRawDescriptor,
852         fd_offset: u64,
853         prot: Protection,
854     ) -> Result<()> {
855         self.try_add(offset, size, prot, Some((fd, fd_offset)))
856     }
857 
858     /// Helper method that calls appropriate MemoryMapping constructor and adds
859     /// the resulting map into the arena.
try_add( &mut self, offset: usize, size: usize, prot: Protection, fd: Option<(&dyn AsRawDescriptor, u64)>, ) -> Result<()>860     fn try_add(
861         &mut self,
862         offset: usize,
863         size: usize,
864         prot: Protection,
865         fd: Option<(&dyn AsRawDescriptor, u64)>,
866     ) -> Result<()> {
867         // Ensure offset is page-aligned
868         if offset % pagesize() != 0 {
869             return Err(Error::NotPageAligned);
870         }
871         validate_includes_range(self.size(), offset, size)?;
872 
873         // This is safe since the range has been validated.
874         let mmap = unsafe {
875             match fd {
876                 Some((fd, fd_offset)) => MemoryMapping::from_fd_offset_protection_fixed(
877                     self.addr.add(offset),
878                     fd,
879                     size,
880                     fd_offset,
881                     prot,
882                 )?,
883                 None => MemoryMapping::new_protection_fixed(self.addr.add(offset), size, prot)?,
884             }
885         };
886 
887         // This mapping will get automatically removed when we drop the whole arena.
888         std::mem::forget(mmap);
889         Ok(())
890     }
891 
892     /// Removes `size` bytes at `offset` bytes from the start of the arena. `offset` must be page
893     /// aligned.
894     ///
895     /// # Arguments
896     /// * `offset` - Page aligned offset into the arena in bytes.
897     /// * `size` - Size of memory region in bytes.
remove(&mut self, offset: usize, size: usize) -> Result<()>898     pub fn remove(&mut self, offset: usize, size: usize) -> Result<()> {
899         self.try_add(offset, size, Protection::read(), None)
900     }
901 }
902 
903 // Safe because the pointer and size point to a memory range owned by this MemoryMappingArena that
904 // won't be unmapped until it's Dropped.
905 unsafe impl MappedRegion for MemoryMappingArena {
as_ptr(&self) -> *mut u8906     fn as_ptr(&self) -> *mut u8 {
907         self.addr
908     }
909 
size(&self) -> usize910     fn size(&self) -> usize {
911         self.size
912     }
913 
add_fd_mapping( &mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>914     fn add_fd_mapping(
915         &mut self,
916         offset: usize,
917         size: usize,
918         fd: &dyn AsRawDescriptor,
919         fd_offset: u64,
920         prot: Protection,
921     ) -> Result<()> {
922         self.add_fd_offset_protection(offset, size, fd, fd_offset, prot)
923     }
924 
remove_mapping(&mut self, offset: usize, size: usize) -> Result<()>925     fn remove_mapping(&mut self, offset: usize, size: usize) -> Result<()> {
926         self.remove(offset, size)
927     }
928 }
929 
930 impl From<MemoryMapping> for MemoryMappingArena {
from(mmap: MemoryMapping) -> Self931     fn from(mmap: MemoryMapping) -> Self {
932         let addr = mmap.as_ptr();
933         let size = mmap.size();
934 
935         // Forget the original mapping because the `MemoryMappingArena` will take care of calling
936         // `munmap` when it is dropped.
937         std::mem::forget(mmap);
938         MemoryMappingArena { addr, size }
939     }
940 }
941 
942 impl Drop for MemoryMappingArena {
drop(&mut self)943     fn drop(&mut self) {
944         // This is safe because we own this memory range, and nobody else is holding a reference to
945         // it.
946         unsafe {
947             libc::munmap(self.addr as *mut libc::c_void, self.size);
948         }
949     }
950 }
951 
952 #[cfg(test)]
953 mod tests {
954     use super::*;
955     use crate::descriptor::Descriptor;
956     use data_model::{VolatileMemory, VolatileMemoryError};
957     use tempfile::tempfile;
958 
959     #[test]
basic_map()960     fn basic_map() {
961         let m = MemoryMapping::new(1024).unwrap();
962         assert_eq!(1024, m.size());
963     }
964 
965     #[test]
map_invalid_size()966     fn map_invalid_size() {
967         let res = MemoryMapping::new(0).unwrap_err();
968         if let Error::SystemCallFailed(e) = res {
969             assert_eq!(e.errno(), libc::EINVAL);
970         } else {
971             panic!("unexpected error: {}", res);
972         }
973     }
974 
975     #[test]
map_invalid_fd()976     fn map_invalid_fd() {
977         let fd = Descriptor(-1);
978         let res = MemoryMapping::from_fd(&fd, 1024).unwrap_err();
979         if let Error::SystemCallFailed(e) = res {
980             assert_eq!(e.errno(), libc::EBADF);
981         } else {
982             panic!("unexpected error: {}", res);
983         }
984     }
985 
986     #[test]
test_write_past_end()987     fn test_write_past_end() {
988         let m = MemoryMapping::new(5).unwrap();
989         let res = m.write_slice(&[1, 2, 3, 4, 5, 6], 0);
990         assert!(res.is_ok());
991         assert_eq!(res.unwrap(), 5);
992     }
993 
994     #[test]
slice_size()995     fn slice_size() {
996         let m = MemoryMapping::new(5).unwrap();
997         let s = m.get_slice(2, 3).unwrap();
998         assert_eq!(s.size(), 3);
999     }
1000 
1001     #[test]
slice_addr()1002     fn slice_addr() {
1003         let m = MemoryMapping::new(5).unwrap();
1004         let s = m.get_slice(2, 3).unwrap();
1005         assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
1006     }
1007 
1008     #[test]
slice_store()1009     fn slice_store() {
1010         let m = MemoryMapping::new(5).unwrap();
1011         let r = m.get_ref(2).unwrap();
1012         r.store(9u16);
1013         assert_eq!(m.read_obj::<u16>(2).unwrap(), 9);
1014     }
1015 
1016     #[test]
slice_overflow_error()1017     fn slice_overflow_error() {
1018         let m = MemoryMapping::new(5).unwrap();
1019         let res = m.get_slice(std::usize::MAX, 3).unwrap_err();
1020         assert_eq!(
1021             res,
1022             VolatileMemoryError::Overflow {
1023                 base: std::usize::MAX,
1024                 offset: 3,
1025             }
1026         );
1027     }
1028     #[test]
slice_oob_error()1029     fn slice_oob_error() {
1030         let m = MemoryMapping::new(5).unwrap();
1031         let res = m.get_slice(3, 3).unwrap_err();
1032         assert_eq!(res, VolatileMemoryError::OutOfBounds { addr: 6 });
1033     }
1034 
1035     #[test]
from_fd_offset_invalid()1036     fn from_fd_offset_invalid() {
1037         let fd = tempfile().unwrap();
1038         let res = MemoryMapping::from_fd_offset(&fd, 4096, (libc::off_t::max_value() as u64) + 1)
1039             .unwrap_err();
1040         match res {
1041             Error::InvalidOffset => {}
1042             e => panic!("unexpected error: {}", e),
1043         }
1044     }
1045 
1046     #[test]
arena_new()1047     fn arena_new() {
1048         let m = MemoryMappingArena::new(0x40000).unwrap();
1049         assert_eq!(m.size(), 0x40000);
1050     }
1051 
1052     #[test]
arena_add()1053     fn arena_add() {
1054         let mut m = MemoryMappingArena::new(0x40000).unwrap();
1055         assert!(m.add_anon(0, pagesize() * 4).is_ok());
1056     }
1057 
1058     #[test]
arena_remove()1059     fn arena_remove() {
1060         let mut m = MemoryMappingArena::new(0x40000).unwrap();
1061         assert!(m.add_anon(0, pagesize() * 4).is_ok());
1062         assert!(m.remove(0, pagesize()).is_ok());
1063         assert!(m.remove(0, pagesize() * 2).is_ok());
1064     }
1065 
1066     #[test]
arena_add_alignment_error()1067     fn arena_add_alignment_error() {
1068         let mut m = MemoryMappingArena::new(pagesize() * 2).unwrap();
1069         assert!(m.add_anon(0, 0x100).is_ok());
1070         let res = m.add_anon(pagesize() + 1, 0x100).unwrap_err();
1071         match res {
1072             Error::NotPageAligned => {}
1073             e => panic!("unexpected error: {}", e),
1074         }
1075     }
1076 
1077     #[test]
arena_add_oob_error()1078     fn arena_add_oob_error() {
1079         let mut m = MemoryMappingArena::new(pagesize()).unwrap();
1080         let res = m.add_anon(0, pagesize() + 1).unwrap_err();
1081         match res {
1082             Error::InvalidAddress => {}
1083             e => panic!("unexpected error: {}", e),
1084         }
1085     }
1086 
1087     #[test]
arena_add_overlapping()1088     fn arena_add_overlapping() {
1089         let ps = pagesize();
1090         let mut m =
1091             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1092         m.add_anon(ps * 4, ps * 4)
1093             .expect("failed to add sub-mapping");
1094 
1095         // Overlap in the front.
1096         m.add_anon(ps * 2, ps * 3)
1097             .expect("failed to add front overlapping sub-mapping");
1098 
1099         // Overlap in the back.
1100         m.add_anon(ps * 7, ps * 3)
1101             .expect("failed to add back overlapping sub-mapping");
1102 
1103         // Overlap the back of the first mapping, all of the middle mapping, and the front of the
1104         // last mapping.
1105         m.add_anon(ps * 3, ps * 6)
1106             .expect("failed to add mapping that overlaps several mappings");
1107     }
1108 
1109     #[test]
arena_remove_overlapping()1110     fn arena_remove_overlapping() {
1111         let ps = pagesize();
1112         let mut m =
1113             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1114         m.add_anon(ps * 4, ps * 4)
1115             .expect("failed to add sub-mapping");
1116         m.add_anon(ps * 2, ps * 2)
1117             .expect("failed to add front overlapping sub-mapping");
1118         m.add_anon(ps * 8, ps * 2)
1119             .expect("failed to add back overlapping sub-mapping");
1120 
1121         // Remove the back of the first mapping and the front of the second.
1122         m.remove(ps * 3, ps * 2)
1123             .expect("failed to remove front overlapping mapping");
1124 
1125         // Remove the back of the second mapping and the front of the third.
1126         m.remove(ps * 7, ps * 2)
1127             .expect("failed to remove back overlapping mapping");
1128 
1129         // Remove a mapping that completely overlaps the middle mapping.
1130         m.remove(ps * 5, ps * 2)
1131             .expect("failed to remove fully overlapping mapping");
1132     }
1133 
1134     #[test]
arena_remove_unaligned()1135     fn arena_remove_unaligned() {
1136         let ps = pagesize();
1137         let mut m =
1138             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1139 
1140         m.add_anon(0, ps).expect("failed to add mapping");
1141         m.remove(0, ps - 1)
1142             .expect("failed to remove unaligned mapping");
1143     }
1144 
1145     #[test]
arena_msync()1146     fn arena_msync() {
1147         let size = 0x40000;
1148         let m = MemoryMappingArena::new(size).unwrap();
1149         let ps = pagesize();
1150         <dyn MappedRegion>::msync(&m, 0, ps).unwrap();
1151         <dyn MappedRegion>::msync(&m, 0, size).unwrap();
1152         <dyn MappedRegion>::msync(&m, ps, size - ps).unwrap();
1153         let res = <dyn MappedRegion>::msync(&m, ps, size).unwrap_err();
1154         match res {
1155             Error::InvalidAddress => {}
1156             e => panic!("unexpected error: {}", e),
1157         }
1158     }
1159 }
1160