• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! The mmap module provides a safe interface to mmap memory and ensures unmap is called when the
6 //! mmap object leaves scope.
7 
8 use std::cmp::min;
9 use std::fmt::{self, Display};
10 use std::io;
11 use std::mem::size_of;
12 use std::os::unix::io::AsRawFd;
13 use std::ptr::{copy_nonoverlapping, null_mut, read_unaligned, write_unaligned};
14 
15 use libc::{self, c_int, c_void, read, write};
16 
17 use data_model::volatile_memory::*;
18 use data_model::DataInit;
19 
20 use crate::{errno, pagesize};
21 
22 #[derive(Debug)]
23 pub enum Error {
24     /// `add_fd_mapping` is not supported.
25     AddFdMappingIsUnsupported,
26     /// Requested memory out of range.
27     InvalidAddress,
28     /// Invalid argument provided when building mmap.
29     InvalidArgument,
30     /// Requested offset is out of range of `libc::off_t`.
31     InvalidOffset,
32     /// Requested mapping is not page aligned
33     NotPageAligned,
34     /// Requested memory range spans past the end of the region.
35     InvalidRange(usize, usize, usize),
36     /// `mmap` returned the given error.
37     SystemCallFailed(errno::Error),
38     /// Writing to memory failed
39     ReadToMemory(io::Error),
40     /// `remove_mapping` is not supported
41     RemoveMappingIsUnsupported,
42     /// Reading from memory failed
43     WriteFromMemory(io::Error),
44 }
45 pub type Result<T> = std::result::Result<T, Error>;
46 
47 impl Display for Error {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result48     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
49         use self::Error::*;
50 
51         match self {
52             AddFdMappingIsUnsupported => write!(f, "`add_fd_mapping` is unsupported"),
53             InvalidAddress => write!(f, "requested memory out of range"),
54             InvalidArgument => write!(f, "invalid argument provided when creating mapping"),
55             InvalidOffset => write!(f, "requested offset is out of range of off_t"),
56             NotPageAligned => write!(f, "requested memory is not page aligned"),
57             InvalidRange(offset, count, region_size) => write!(
58                 f,
59                 "requested memory range spans past the end of the region: offset={} count={} region_size={}",
60                 offset, count, region_size,
61             ),
62             SystemCallFailed(e) => write!(f, "mmap related system call failed: {}", e),
63             ReadToMemory(e) => write!(f, "failed to read from file to memory: {}", e),
64             RemoveMappingIsUnsupported => write!(f, "`remove_mapping` is unsupported"),
65             WriteFromMemory(e) => write!(f, "failed to write from memory to file: {}", e),
66         }
67     }
68 }
69 
70 /// Memory access type for anonymous shared memory mapping.
71 #[derive(Copy, Clone, Eq, PartialEq)]
72 pub struct Protection(c_int);
73 impl Protection {
74     /// Returns Protection allowing no access.
75     #[inline(always)]
none() -> Protection76     pub fn none() -> Protection {
77         Protection(libc::PROT_NONE)
78     }
79 
80     /// Returns Protection allowing read/write access.
81     #[inline(always)]
read_write() -> Protection82     pub fn read_write() -> Protection {
83         Protection(libc::PROT_READ | libc::PROT_WRITE)
84     }
85 
86     /// Returns Protection allowing read access.
87     #[inline(always)]
read() -> Protection88     pub fn read() -> Protection {
89         Protection(libc::PROT_READ)
90     }
91 
92     /// Set read events.
93     #[inline(always)]
set_read(self) -> Protection94     pub fn set_read(self) -> Protection {
95         Protection(self.0 | libc::PROT_READ)
96     }
97 
98     /// Set write events.
99     #[inline(always)]
set_write(self) -> Protection100     pub fn set_write(self) -> Protection {
101         Protection(self.0 | libc::PROT_WRITE)
102     }
103 }
104 
105 impl From<c_int> for Protection {
from(f: c_int) -> Self106     fn from(f: c_int) -> Self {
107         Protection(f)
108     }
109 }
110 
111 impl From<Protection> for c_int {
from(p: Protection) -> c_int112     fn from(p: Protection) -> c_int {
113         p.0
114     }
115 }
116 
117 /// Validates that `offset`..`offset+range_size` lies within the bounds of a memory mapping of
118 /// `mmap_size` bytes.  Also checks for any overflow.
validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()>119 fn validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()> {
120     // Ensure offset + size doesn't overflow
121     let end_offset = offset
122         .checked_add(range_size)
123         .ok_or(Error::InvalidAddress)?;
124     // Ensure offset + size are within the mapping bounds
125     if end_offset <= mmap_size {
126         Ok(())
127     } else {
128         Err(Error::InvalidAddress)
129     }
130 }
131 
132 /// A range of memory that can be msynced, for abstracting over different types of memory mappings.
133 ///
134 /// Safe when implementers guarantee `ptr`..`ptr+size` is an mmaped region owned by this object that
135 /// can't be unmapped during the `MappedRegion`'s lifetime.
136 pub unsafe trait MappedRegion: Send + Sync {
137     /// Returns a pointer to the beginning of the memory region. Should only be
138     /// used for passing this region to ioctls for setting guest memory.
as_ptr(&self) -> *mut u8139     fn as_ptr(&self) -> *mut u8;
140 
141     /// Returns the size of the memory region in bytes.
size(&self) -> usize142     fn size(&self) -> usize;
143 
144     /// Maps `size` bytes starting at `fd_offset` bytes from within the given `fd`
145     /// at `offset` bytes from the start of the region with `prot` protections.
146     /// `offset` must be page aligned.
147     ///
148     /// # Arguments
149     /// * `offset` - Page aligned offset into the arena in bytes.
150     /// * `size` - Size of memory region in bytes.
151     /// * `fd` - File descriptor to mmap from.
152     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
153     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_mapping( &mut self, _offset: usize, _size: usize, _fd: &dyn AsRawFd, _fd_offset: u64, _prot: Protection, ) -> Result<()>154     fn add_fd_mapping(
155         &mut self,
156         _offset: usize,
157         _size: usize,
158         _fd: &dyn AsRawFd,
159         _fd_offset: u64,
160         _prot: Protection,
161     ) -> Result<()> {
162         Err(Error::AddFdMappingIsUnsupported)
163     }
164 
165     /// Remove `size`-byte mapping starting at `offset`.
remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()>166     fn remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()> {
167         Err(Error::RemoveMappingIsUnsupported)
168     }
169 }
170 
171 impl dyn MappedRegion {
172     /// Calls msync with MS_SYNC on a mapping of `size` bytes starting at `offset` from the start of
173     /// the region.  `offset`..`offset+size` must be contained within the `MappedRegion`.
msync(&self, offset: usize, size: usize) -> Result<()>174     pub fn msync(&self, offset: usize, size: usize) -> Result<()> {
175         validate_includes_range(self.size(), offset, size)?;
176 
177         // Safe because the MemoryMapping/MemoryMappingArena interface ensures our pointer and size
178         // are correct, and we've validated that `offset`..`offset+size` is in the range owned by
179         // this `MappedRegion`.
180         let ret = unsafe {
181             libc::msync(
182                 (self.as_ptr() as usize + offset) as *mut libc::c_void,
183                 size,
184                 libc::MS_SYNC,
185             )
186         };
187         if ret != -1 {
188             Ok(())
189         } else {
190             Err(Error::SystemCallFailed(errno::Error::last()))
191         }
192     }
193 }
194 
195 /// Wraps an anonymous shared memory mapping in the current process.
196 #[derive(Debug)]
197 pub struct MemoryMapping {
198     addr: *mut u8,
199     size: usize,
200 }
201 
202 // Send and Sync aren't automatically inherited for the raw address pointer.
203 // Accessing that pointer is only done through the stateless interface which
204 // allows the object to be shared by multiple threads without a decrease in
205 // safety.
206 unsafe impl Send for MemoryMapping {}
207 unsafe impl Sync for MemoryMapping {}
208 
209 impl MemoryMapping {
210     /// Creates an anonymous shared, read/write mapping of `size` bytes.
211     ///
212     /// # Arguments
213     /// * `size` - Size of memory region in bytes.
new(size: usize) -> Result<MemoryMapping>214     pub fn new(size: usize) -> Result<MemoryMapping> {
215         MemoryMapping::new_protection(size, Protection::read_write())
216     }
217 
218     /// Creates an anonymous shared mapping of `size` bytes with `prot` protection.
219     ///
220     /// # Arguments
221     /// * `size` - Size of memory region in bytes.
222     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
new_protection(size: usize, prot: Protection) -> Result<MemoryMapping>223     pub fn new_protection(size: usize, prot: Protection) -> Result<MemoryMapping> {
224         // This is safe because we are creating an anonymous mapping in a place not already used by
225         // any other area in this process.
226         unsafe {
227             MemoryMapping::try_mmap(
228                 None,
229                 size,
230                 prot.into(),
231                 libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
232                 None,
233             )
234         }
235     }
236 
237     /// Maps the first `size` bytes of the given `fd` as read/write.
238     ///
239     /// # Arguments
240     /// * `fd` - File descriptor to mmap from.
241     /// * `size` - Size of memory region in bytes.
from_fd(fd: &dyn AsRawFd, size: usize) -> Result<MemoryMapping>242     pub fn from_fd(fd: &dyn AsRawFd, size: usize) -> Result<MemoryMapping> {
243         MemoryMapping::from_fd_offset(fd, size, 0)
244     }
245 
from_fd_offset(fd: &dyn AsRawFd, size: usize, offset: u64) -> Result<MemoryMapping>246     pub fn from_fd_offset(fd: &dyn AsRawFd, size: usize, offset: u64) -> Result<MemoryMapping> {
247         MemoryMapping::from_fd_offset_protection(fd, size, offset, Protection::read_write())
248     }
249 
250     /// Maps the `size` bytes starting at `offset` bytes of the given `fd` as read/write.
251     ///
252     /// # Arguments
253     /// * `fd` - File descriptor to mmap from.
254     /// * `size` - Size of memory region in bytes.
255     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
256     /// * `flags` - flags passed directly to mmap.
257     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
from_fd_offset_flags( fd: &dyn AsRawFd, size: usize, offset: u64, flags: c_int, prot: Protection, ) -> Result<MemoryMapping>258     fn from_fd_offset_flags(
259         fd: &dyn AsRawFd,
260         size: usize,
261         offset: u64,
262         flags: c_int,
263         prot: Protection,
264     ) -> Result<MemoryMapping> {
265         unsafe {
266             // This is safe because we are creating an anonymous mapping in a place not already used
267             // by any other area in this process.
268             MemoryMapping::try_mmap(None, size, prot.into(), flags, Some((fd, offset)))
269         }
270     }
271 
272     /// Maps the `size` bytes starting at `offset` bytes of the given `fd` as read/write.
273     ///
274     /// # Arguments
275     /// * `fd` - File descriptor to mmap from.
276     /// * `size` - Size of memory region in bytes.
277     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
278     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
from_fd_offset_protection( fd: &dyn AsRawFd, size: usize, offset: u64, prot: Protection, ) -> Result<MemoryMapping>279     pub fn from_fd_offset_protection(
280         fd: &dyn AsRawFd,
281         size: usize,
282         offset: u64,
283         prot: Protection,
284     ) -> Result<MemoryMapping> {
285         MemoryMapping::from_fd_offset_flags(fd, size, offset, libc::MAP_SHARED, prot)
286     }
287 
288     /// Maps `size` bytes starting at `offset` from the given `fd` as read/write, and requests
289     /// that the pages are pre-populated.
290     /// # Arguments
291     /// * `fd` - File descriptor to mmap from.
292     /// * `size` - Size of memory region in bytes.
293     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
from_fd_offset_protection_populate( fd: &dyn AsRawFd, size: usize, offset: u64, prot: Protection, populate: bool, ) -> Result<MemoryMapping>294     pub fn from_fd_offset_protection_populate(
295         fd: &dyn AsRawFd,
296         size: usize,
297         offset: u64,
298         prot: Protection,
299         populate: bool,
300     ) -> Result<MemoryMapping> {
301         let mut flags = libc::MAP_SHARED;
302         if populate {
303             flags |= libc::MAP_POPULATE;
304         }
305         MemoryMapping::from_fd_offset_flags(fd, size, offset, flags, prot)
306     }
307 
308     /// Creates an anonymous shared mapping of `size` bytes with `prot` protection.
309     ///
310     /// # Arguments
311     ///
312     /// * `addr` - Memory address to mmap at.
313     /// * `size` - Size of memory region in bytes.
314     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
315     ///
316     /// # Safety
317     ///
318     /// This function should not be called before the caller unmaps any mmap'd regions already
319     /// present at `(addr..addr+size)`.
new_protection_fixed( addr: *mut u8, size: usize, prot: Protection, ) -> Result<MemoryMapping>320     pub unsafe fn new_protection_fixed(
321         addr: *mut u8,
322         size: usize,
323         prot: Protection,
324     ) -> Result<MemoryMapping> {
325         MemoryMapping::try_mmap(
326             Some(addr),
327             size,
328             prot.into(),
329             libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
330             None,
331         )
332     }
333 
334     /// Maps the `size` bytes starting at `offset` bytes of the given `fd` with
335     /// `prot` protections.
336     ///
337     /// # Arguments
338     ///
339     /// * `addr` - Memory address to mmap at.
340     /// * `fd` - File descriptor to mmap from.
341     /// * `size` - Size of memory region in bytes.
342     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
343     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
344     ///
345     /// # Safety
346     ///
347     /// This function should not be called before the caller unmaps any mmap'd regions already
348     /// present at `(addr..addr+size)`.
from_fd_offset_protection_fixed( addr: *mut u8, fd: &dyn AsRawFd, size: usize, offset: u64, prot: Protection, ) -> Result<MemoryMapping>349     pub unsafe fn from_fd_offset_protection_fixed(
350         addr: *mut u8,
351         fd: &dyn AsRawFd,
352         size: usize,
353         offset: u64,
354         prot: Protection,
355     ) -> Result<MemoryMapping> {
356         MemoryMapping::try_mmap(
357             Some(addr),
358             size,
359             prot.into(),
360             libc::MAP_SHARED | libc::MAP_NORESERVE,
361             Some((fd, offset)),
362         )
363     }
364 
365     /// Helper wrapper around libc::mmap that does some basic validation, and calls
366     /// madvise with MADV_DONTDUMP on the created mmap
try_mmap( addr: Option<*mut u8>, size: usize, prot: c_int, flags: c_int, fd: Option<(&dyn AsRawFd, u64)>, ) -> Result<MemoryMapping>367     unsafe fn try_mmap(
368         addr: Option<*mut u8>,
369         size: usize,
370         prot: c_int,
371         flags: c_int,
372         fd: Option<(&dyn AsRawFd, u64)>,
373     ) -> Result<MemoryMapping> {
374         let mut flags = flags;
375         // If addr is provided, set the FIXED flag, and validate addr alignment
376         let addr = match addr {
377             Some(addr) => {
378                 if (addr as usize) % pagesize() != 0 {
379                     return Err(Error::NotPageAligned);
380                 }
381                 flags |= libc::MAP_FIXED;
382                 addr as *mut libc::c_void
383             }
384             None => null_mut(),
385         };
386         // If fd is provided, validate fd offset is within bounds
387         let (fd, offset) = match fd {
388             Some((fd, offset)) => {
389                 if offset > libc::off_t::max_value() as u64 {
390                     return Err(Error::InvalidOffset);
391                 }
392                 (fd.as_raw_fd(), offset as libc::off_t)
393             }
394             None => (-1, 0),
395         };
396         let addr = libc::mmap(addr, size, prot, flags, fd, offset);
397         if addr == libc::MAP_FAILED {
398             return Err(Error::SystemCallFailed(errno::Error::last()));
399         }
400         // This is safe because we call madvise with a valid address and size, and we check the
401         // return value. We only warn about an error because failure here is not fatal to the mmap.
402         if libc::madvise(addr, size, libc::MADV_DONTDUMP) == -1 {
403             warn!(
404                 "failed madvise(MADV_DONTDUMP) on mmap: {}",
405                 errno::Error::last()
406             );
407         }
408         Ok(MemoryMapping {
409             addr: addr as *mut u8,
410             size,
411         })
412     }
413 
414     /// Madvise the kernel to use Huge Pages for this mapping.
use_hugepages(&self) -> Result<()>415     pub fn use_hugepages(&self) -> Result<()> {
416         const SZ_2M: usize = 2 * 1024 * 1024;
417 
418         // THP uses 2M pages, so use THP only on mappings that are at least
419         // 2M in size.
420         if self.size() < SZ_2M {
421             return Ok(());
422         }
423 
424         // This is safe because we call madvise with a valid address and size, and we check the
425         // return value.
426         let ret = unsafe {
427             libc::madvise(
428                 self.as_ptr() as *mut libc::c_void,
429                 self.size(),
430                 libc::MADV_HUGEPAGE,
431             )
432         };
433         if ret == -1 {
434             Err(Error::SystemCallFailed(errno::Error::last()))
435         } else {
436             Ok(())
437         }
438     }
439 
440     /// Calls msync with MS_SYNC on the mapping.
msync(&self) -> Result<()>441     pub fn msync(&self) -> Result<()> {
442         // This is safe since we use the exact address and length of a known
443         // good memory mapping.
444         let ret = unsafe {
445             libc::msync(
446                 self.as_ptr() as *mut libc::c_void,
447                 self.size(),
448                 libc::MS_SYNC,
449             )
450         };
451         if ret == -1 {
452             return Err(Error::SystemCallFailed(errno::Error::last()));
453         }
454         Ok(())
455     }
456 
457     /// Writes a slice to the memory region at the specified offset.
458     /// Returns the number of bytes written.  The number of bytes written can
459     /// be less than the length of the slice if there isn't enough room in the
460     /// memory region.
461     ///
462     /// # Examples
463     /// * Write a slice at offset 256.
464     ///
465     /// ```
466     /// #   use sys_util::MemoryMapping;
467     /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
468     ///     let res = mem_map.write_slice(&[1,2,3,4,5], 256);
469     ///     assert!(res.is_ok());
470     ///     assert_eq!(res.unwrap(), 5);
471     /// ```
write_slice(&self, buf: &[u8], offset: usize) -> Result<usize>472     pub fn write_slice(&self, buf: &[u8], offset: usize) -> Result<usize> {
473         match self.size.checked_sub(offset) {
474             Some(size_past_offset) => {
475                 let bytes_copied = min(size_past_offset, buf.len());
476                 // The bytes_copied equation above ensures we don't copy bytes out of range of
477                 // either buf or this slice. We also know that the buffers do not overlap because
478                 // slices can never occupy the same memory as a volatile slice.
479                 unsafe {
480                     copy_nonoverlapping(buf.as_ptr(), self.as_ptr().add(offset), bytes_copied);
481                 }
482                 Ok(bytes_copied)
483             }
484             None => Err(Error::InvalidAddress),
485         }
486     }
487 
488     /// Reads to a slice from the memory region at the specified offset.
489     /// Returns the number of bytes read.  The number of bytes read can
490     /// be less than the length of the slice if there isn't enough room in the
491     /// memory region.
492     ///
493     /// # Examples
494     /// * Read a slice of size 16 at offset 256.
495     ///
496     /// ```
497     /// #   use sys_util::MemoryMapping;
498     /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
499     ///     let buf = &mut [0u8; 16];
500     ///     let res = mem_map.read_slice(buf, 256);
501     ///     assert!(res.is_ok());
502     ///     assert_eq!(res.unwrap(), 16);
503     /// ```
read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize>504     pub fn read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize> {
505         match self.size.checked_sub(offset) {
506             Some(size_past_offset) => {
507                 let bytes_copied = min(size_past_offset, buf.len());
508                 // The bytes_copied equation above ensures we don't copy bytes out of range of
509                 // either buf or this slice. We also know that the buffers do not overlap because
510                 // slices can never occupy the same memory as a volatile slice.
511                 unsafe {
512                     copy_nonoverlapping(
513                         self.as_ptr().add(offset) as *const u8,
514                         buf.as_mut_ptr(),
515                         bytes_copied,
516                     );
517                 }
518                 Ok(bytes_copied)
519             }
520             None => Err(Error::InvalidAddress),
521         }
522     }
523 
524     /// Writes an object to the memory region at the specified offset.
525     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
526     ///
527     /// # Examples
528     /// * Write a u64 at offset 16.
529     ///
530     /// ```
531     /// #   use sys_util::MemoryMapping;
532     /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
533     ///     let res = mem_map.write_obj(55u64, 16);
534     ///     assert!(res.is_ok());
535     /// ```
write_obj<T: DataInit>(&self, val: T, offset: usize) -> Result<()>536     pub fn write_obj<T: DataInit>(&self, val: T, offset: usize) -> Result<()> {
537         self.range_end(offset, size_of::<T>())?;
538         // This is safe because we checked the bounds above.
539         unsafe {
540             write_unaligned(self.as_ptr().add(offset) as *mut T, val);
541         }
542         Ok(())
543     }
544 
545     /// Reads on object from the memory region at the given offset.
546     /// Reading from a volatile area isn't strictly safe as it could change
547     /// mid-read.  However, as long as the type T is plain old data and can
548     /// handle random initialization, everything will be OK.
549     ///
550     /// # Examples
551     /// * Read a u64 written to offset 32.
552     ///
553     /// ```
554     /// #   use sys_util::MemoryMapping;
555     /// #   let mut mem_map = MemoryMapping::new(1024).unwrap();
556     ///     let res = mem_map.write_obj(55u64, 32);
557     ///     assert!(res.is_ok());
558     ///     let num: u64 = mem_map.read_obj(32).unwrap();
559     ///     assert_eq!(55, num);
560     /// ```
read_obj<T: DataInit>(&self, offset: usize) -> Result<T>561     pub fn read_obj<T: DataInit>(&self, offset: usize) -> Result<T> {
562         self.range_end(offset, size_of::<T>())?;
563         // This is safe because by definition Copy types can have their bits set arbitrarily and
564         // still be valid.
565         unsafe {
566             Ok(read_unaligned(
567                 self.as_ptr().add(offset) as *const u8 as *const T
568             ))
569         }
570     }
571 
572     /// Reads data from a file descriptor and writes it to guest memory.
573     ///
574     /// # Arguments
575     /// * `mem_offset` - Begin writing memory at this offset.
576     /// * `src` - Read from `src` to memory.
577     /// * `count` - Read `count` bytes from `src` to memory.
578     ///
579     /// # Examples
580     ///
581     /// * Read bytes from /dev/urandom
582     ///
583     /// ```
584     /// # use sys_util::MemoryMapping;
585     /// # use std::fs::File;
586     /// # use std::path::Path;
587     /// # fn test_read_random() -> Result<u32, ()> {
588     /// #     let mut mem_map = MemoryMapping::new(1024).unwrap();
589     ///       let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
590     ///       mem_map.read_to_memory(32, &mut file, 128).map_err(|_| ())?;
591     ///       let rand_val: u32 =  mem_map.read_obj(40).map_err(|_| ())?;
592     /// #     Ok(rand_val)
593     /// # }
594     /// ```
read_to_memory( &self, mut mem_offset: usize, src: &dyn AsRawFd, mut count: usize, ) -> Result<()>595     pub fn read_to_memory(
596         &self,
597         mut mem_offset: usize,
598         src: &dyn AsRawFd,
599         mut count: usize,
600     ) -> Result<()> {
601         self.range_end(mem_offset, count)
602             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
603         while count > 0 {
604             // The check above ensures that no memory outside this slice will get accessed by this
605             // read call.
606             match unsafe {
607                 read(
608                     src.as_raw_fd(),
609                     self.as_ptr().add(mem_offset) as *mut c_void,
610                     count,
611                 )
612             } {
613                 0 => {
614                     return Err(Error::ReadToMemory(io::Error::from(
615                         io::ErrorKind::UnexpectedEof,
616                     )))
617                 }
618                 r if r < 0 => return Err(Error::ReadToMemory(io::Error::last_os_error())),
619                 ret => {
620                     let bytes_read = ret as usize;
621                     match count.checked_sub(bytes_read) {
622                         Some(count_remaining) => count = count_remaining,
623                         None => break,
624                     }
625                     mem_offset += ret as usize;
626                 }
627             }
628         }
629         Ok(())
630     }
631 
632     /// Writes data from memory to a file descriptor.
633     ///
634     /// # Arguments
635     /// * `mem_offset` - Begin reading memory from this offset.
636     /// * `dst` - Write from memory to `dst`.
637     /// * `count` - Read `count` bytes from memory to `src`.
638     ///
639     /// # Examples
640     ///
641     /// * Write 128 bytes to /dev/null
642     ///
643     /// ```
644     /// # use sys_util::MemoryMapping;
645     /// # use std::fs::File;
646     /// # use std::path::Path;
647     /// # fn test_write_null() -> Result<(), ()> {
648     /// #     let mut mem_map = MemoryMapping::new(1024).unwrap();
649     ///       let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
650     ///       mem_map.write_from_memory(32, &mut file, 128).map_err(|_| ())?;
651     /// #     Ok(())
652     /// # }
653     /// ```
write_from_memory( &self, mut mem_offset: usize, dst: &dyn AsRawFd, mut count: usize, ) -> Result<()>654     pub fn write_from_memory(
655         &self,
656         mut mem_offset: usize,
657         dst: &dyn AsRawFd,
658         mut count: usize,
659     ) -> Result<()> {
660         self.range_end(mem_offset, count)
661             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
662         while count > 0 {
663             // The check above ensures that no memory outside this slice will get accessed by this
664             // write call.
665             match unsafe {
666                 write(
667                     dst.as_raw_fd(),
668                     self.as_ptr().add(mem_offset) as *const c_void,
669                     count,
670                 )
671             } {
672                 0 => {
673                     return Err(Error::WriteFromMemory(io::Error::from(
674                         io::ErrorKind::WriteZero,
675                     )))
676                 }
677                 ret if ret < 0 => return Err(Error::WriteFromMemory(io::Error::last_os_error())),
678                 ret => {
679                     let bytes_written = ret as usize;
680                     match count.checked_sub(bytes_written) {
681                         Some(count_remaining) => count = count_remaining,
682                         None => break,
683                     }
684                     mem_offset += ret as usize;
685                 }
686             }
687         }
688         Ok(())
689     }
690 
691     /// Uses madvise to tell the kernel to remove the specified range.  Subsequent reads
692     /// to the pages in the range will return zero bytes.
remove_range(&self, mem_offset: usize, count: usize) -> Result<()>693     pub fn remove_range(&self, mem_offset: usize, count: usize) -> Result<()> {
694         self.range_end(mem_offset, count)
695             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
696         let ret = unsafe {
697             // madvising away the region is the same as the guest changing it.
698             // Next time it is read, it may return zero pages.
699             libc::madvise(
700                 (self.addr as usize + mem_offset) as *mut _,
701                 count,
702                 libc::MADV_REMOVE,
703             )
704         };
705         if ret < 0 {
706             Err(Error::InvalidRange(mem_offset, count, self.size()))
707         } else {
708             Ok(())
709         }
710     }
711 
712     // Check that offset+count is valid and return the sum.
range_end(&self, offset: usize, count: usize) -> Result<usize>713     fn range_end(&self, offset: usize, count: usize) -> Result<usize> {
714         let mem_end = offset.checked_add(count).ok_or(Error::InvalidAddress)?;
715         if mem_end > self.size() {
716             return Err(Error::InvalidAddress);
717         }
718         Ok(mem_end)
719     }
720 }
721 
722 // Safe because the pointer and size point to a memory range owned by this MemoryMapping that won't
723 // be unmapped until it's Dropped.
724 unsafe impl MappedRegion for MemoryMapping {
as_ptr(&self) -> *mut u8725     fn as_ptr(&self) -> *mut u8 {
726         self.addr
727     }
728 
size(&self) -> usize729     fn size(&self) -> usize {
730         self.size
731     }
732 }
733 
734 impl VolatileMemory for MemoryMapping {
get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice>735     fn get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice> {
736         let mem_end = calc_offset(offset, count)?;
737         if mem_end > self.size {
738             return Err(VolatileMemoryError::OutOfBounds { addr: mem_end });
739         }
740 
741         let new_addr =
742             (self.as_ptr() as usize)
743                 .checked_add(offset)
744                 .ok_or(VolatileMemoryError::Overflow {
745                     base: self.as_ptr() as usize,
746                     offset,
747                 })?;
748 
749         // Safe because we checked that offset + count was within our range and we only ever hand
750         // out volatile accessors.
751         Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
752     }
753 }
754 
755 impl Drop for MemoryMapping {
drop(&mut self)756     fn drop(&mut self) {
757         // This is safe because we mmap the area at addr ourselves, and nobody
758         // else is holding a reference to it.
759         unsafe {
760             libc::munmap(self.addr as *mut libc::c_void, self.size);
761         }
762     }
763 }
764 
765 /// Tracks Fixed Memory Maps within an anonymous memory-mapped fixed-sized arena
766 /// in the current process.
767 pub struct MemoryMappingArena {
768     addr: *mut u8,
769     size: usize,
770 }
771 
772 // Send and Sync aren't automatically inherited for the raw address pointer.
773 // Accessing that pointer is only done through the stateless interface which
774 // allows the object to be shared by multiple threads without a decrease in
775 // safety.
776 unsafe impl Send for MemoryMappingArena {}
777 unsafe impl Sync for MemoryMappingArena {}
778 
779 impl MemoryMappingArena {
780     /// Creates an mmap arena of `size` bytes.
781     ///
782     /// # Arguments
783     /// * `size` - Size of memory region in bytes.
new(size: usize) -> Result<MemoryMappingArena>784     pub fn new(size: usize) -> Result<MemoryMappingArena> {
785         // Reserve the arena's memory using an anonymous read-only mmap.
786         MemoryMapping::new_protection(size, Protection::none().set_read()).map(From::from)
787     }
788 
789     /// Anonymously maps `size` bytes at `offset` bytes from the start of the arena.
790     /// `offset` must be page aligned.
791     ///
792     /// # Arguments
793     /// * `offset` - Page aligned offset into the arena in bytes.
794     /// * `size` - Size of memory region in bytes.
795     /// * `fd` - File descriptor to mmap from.
add_anon(&mut self, offset: usize, size: usize) -> Result<()>796     pub fn add_anon(&mut self, offset: usize, size: usize) -> Result<()> {
797         self.try_add(offset, size, Protection::read_write(), None)
798     }
799 
800     /// Maps `size` bytes from the start of the given `fd` at `offset` bytes from
801     /// the start of the arena. `offset` must be page aligned.
802     ///
803     /// # Arguments
804     /// * `offset` - Page aligned offset into the arena in bytes.
805     /// * `size` - Size of memory region in bytes.
806     /// * `fd` - File descriptor to mmap from.
add_fd(&mut self, offset: usize, size: usize, fd: &dyn AsRawFd) -> Result<()>807     pub fn add_fd(&mut self, offset: usize, size: usize, fd: &dyn AsRawFd) -> Result<()> {
808         self.add_fd_offset(offset, size, fd, 0)
809     }
810 
811     /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
812     /// at `offset` bytes from the start of the arena. `offset` must be page aligned.
813     ///
814     /// # Arguments
815     /// * `offset` - Page aligned offset into the arena in bytes.
816     /// * `size` - Size of memory region in bytes.
817     /// * `fd` - File descriptor to mmap from.
818     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
add_fd_offset( &mut self, offset: usize, size: usize, fd: &dyn AsRawFd, fd_offset: u64, ) -> Result<()>819     pub fn add_fd_offset(
820         &mut self,
821         offset: usize,
822         size: usize,
823         fd: &dyn AsRawFd,
824         fd_offset: u64,
825     ) -> Result<()> {
826         self.add_fd_offset_protection(offset, size, fd, fd_offset, Protection::read_write())
827     }
828 
829     /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
830     /// at `offset` bytes from the start of the arena with `prot` protections.
831     /// `offset` must be page aligned.
832     ///
833     /// # Arguments
834     /// * `offset` - Page aligned offset into the arena in bytes.
835     /// * `size` - Size of memory region in bytes.
836     /// * `fd` - File descriptor to mmap from.
837     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
838     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_offset_protection( &mut self, offset: usize, size: usize, fd: &dyn AsRawFd, fd_offset: u64, prot: Protection, ) -> Result<()>839     pub fn add_fd_offset_protection(
840         &mut self,
841         offset: usize,
842         size: usize,
843         fd: &dyn AsRawFd,
844         fd_offset: u64,
845         prot: Protection,
846     ) -> Result<()> {
847         self.try_add(offset, size, prot, Some((fd, fd_offset)))
848     }
849 
850     /// Helper method that calls appropriate MemoryMapping constructor and adds
851     /// the resulting map into the arena.
try_add( &mut self, offset: usize, size: usize, prot: Protection, fd: Option<(&dyn AsRawFd, u64)>, ) -> Result<()>852     fn try_add(
853         &mut self,
854         offset: usize,
855         size: usize,
856         prot: Protection,
857         fd: Option<(&dyn AsRawFd, u64)>,
858     ) -> Result<()> {
859         // Ensure offset is page-aligned
860         if offset % pagesize() != 0 {
861             return Err(Error::NotPageAligned);
862         }
863         validate_includes_range(self.size(), offset, size)?;
864 
865         // This is safe since the range has been validated.
866         let mmap = unsafe {
867             match fd {
868                 Some((fd, fd_offset)) => MemoryMapping::from_fd_offset_protection_fixed(
869                     self.addr.add(offset),
870                     fd,
871                     size,
872                     fd_offset,
873                     prot,
874                 )?,
875                 None => MemoryMapping::new_protection_fixed(self.addr.add(offset), size, prot)?,
876             }
877         };
878 
879         // This mapping will get automatically removed when we drop the whole arena.
880         std::mem::forget(mmap);
881         Ok(())
882     }
883 
884     /// Removes `size` bytes at `offset` bytes from the start of the arena. `offset` must be page
885     /// aligned.
886     ///
887     /// # Arguments
888     /// * `offset` - Page aligned offset into the arena in bytes.
889     /// * `size` - Size of memory region in bytes.
remove(&mut self, offset: usize, size: usize) -> Result<()>890     pub fn remove(&mut self, offset: usize, size: usize) -> Result<()> {
891         self.try_add(offset, size, Protection::read(), None)
892     }
893 }
894 
895 // Safe because the pointer and size point to a memory range owned by this MemoryMappingArena that
896 // won't be unmapped until it's Dropped.
897 unsafe impl MappedRegion for MemoryMappingArena {
as_ptr(&self) -> *mut u8898     fn as_ptr(&self) -> *mut u8 {
899         self.addr
900     }
901 
size(&self) -> usize902     fn size(&self) -> usize {
903         self.size
904     }
905 
add_fd_mapping( &mut self, offset: usize, size: usize, fd: &dyn AsRawFd, fd_offset: u64, prot: Protection, ) -> Result<()>906     fn add_fd_mapping(
907         &mut self,
908         offset: usize,
909         size: usize,
910         fd: &dyn AsRawFd,
911         fd_offset: u64,
912         prot: Protection,
913     ) -> Result<()> {
914         self.add_fd_offset_protection(offset, size, fd, fd_offset, prot)
915     }
916 
remove_mapping(&mut self, offset: usize, size: usize) -> Result<()>917     fn remove_mapping(&mut self, offset: usize, size: usize) -> Result<()> {
918         self.remove(offset, size)
919     }
920 }
921 
922 impl From<MemoryMapping> for MemoryMappingArena {
from(mmap: MemoryMapping) -> Self923     fn from(mmap: MemoryMapping) -> Self {
924         let addr = mmap.as_ptr();
925         let size = mmap.size();
926 
927         // Forget the original mapping because the `MemoryMappingArena` will take care of calling
928         // `munmap` when it is dropped.
929         std::mem::forget(mmap);
930         MemoryMappingArena { addr, size }
931     }
932 }
933 
934 impl Drop for MemoryMappingArena {
drop(&mut self)935     fn drop(&mut self) {
936         // This is safe because we own this memory range, and nobody else is holding a reference to
937         // it.
938         unsafe {
939             libc::munmap(self.addr as *mut libc::c_void, self.size);
940         }
941     }
942 }
943 
944 #[cfg(test)]
945 mod tests {
946     use super::*;
947     use crate::Descriptor;
948     use data_model::{VolatileMemory, VolatileMemoryError};
949     use tempfile::tempfile;
950 
951     #[test]
basic_map()952     fn basic_map() {
953         let m = MemoryMapping::new(1024).unwrap();
954         assert_eq!(1024, m.size());
955     }
956 
957     #[test]
map_invalid_size()958     fn map_invalid_size() {
959         let res = MemoryMapping::new(0).unwrap_err();
960         if let Error::SystemCallFailed(e) = res {
961             assert_eq!(e.errno(), libc::EINVAL);
962         } else {
963             panic!("unexpected error: {}", res);
964         }
965     }
966 
967     #[test]
map_invalid_fd()968     fn map_invalid_fd() {
969         let fd = Descriptor(-1);
970         let res = MemoryMapping::from_fd(&fd, 1024).unwrap_err();
971         if let Error::SystemCallFailed(e) = res {
972             assert_eq!(e.errno(), libc::EBADF);
973         } else {
974             panic!("unexpected error: {}", res);
975         }
976     }
977 
978     #[test]
test_write_past_end()979     fn test_write_past_end() {
980         let m = MemoryMapping::new(5).unwrap();
981         let res = m.write_slice(&[1, 2, 3, 4, 5, 6], 0);
982         assert!(res.is_ok());
983         assert_eq!(res.unwrap(), 5);
984     }
985 
986     #[test]
slice_size()987     fn slice_size() {
988         let m = MemoryMapping::new(5).unwrap();
989         let s = m.get_slice(2, 3).unwrap();
990         assert_eq!(s.size(), 3);
991     }
992 
993     #[test]
slice_addr()994     fn slice_addr() {
995         let m = MemoryMapping::new(5).unwrap();
996         let s = m.get_slice(2, 3).unwrap();
997         assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
998     }
999 
1000     #[test]
slice_store()1001     fn slice_store() {
1002         let m = MemoryMapping::new(5).unwrap();
1003         let r = m.get_ref(2).unwrap();
1004         r.store(9u16);
1005         assert_eq!(m.read_obj::<u16>(2).unwrap(), 9);
1006     }
1007 
1008     #[test]
slice_overflow_error()1009     fn slice_overflow_error() {
1010         let m = MemoryMapping::new(5).unwrap();
1011         let res = m.get_slice(std::usize::MAX, 3).unwrap_err();
1012         assert_eq!(
1013             res,
1014             VolatileMemoryError::Overflow {
1015                 base: std::usize::MAX,
1016                 offset: 3,
1017             }
1018         );
1019     }
1020     #[test]
slice_oob_error()1021     fn slice_oob_error() {
1022         let m = MemoryMapping::new(5).unwrap();
1023         let res = m.get_slice(3, 3).unwrap_err();
1024         assert_eq!(res, VolatileMemoryError::OutOfBounds { addr: 6 });
1025     }
1026 
1027     #[test]
from_fd_offset_invalid()1028     fn from_fd_offset_invalid() {
1029         let fd = tempfile().unwrap();
1030         let res = MemoryMapping::from_fd_offset(&fd, 4096, (libc::off_t::max_value() as u64) + 1)
1031             .unwrap_err();
1032         match res {
1033             Error::InvalidOffset => {}
1034             e => panic!("unexpected error: {}", e),
1035         }
1036     }
1037 
1038     #[test]
arena_new()1039     fn arena_new() {
1040         let m = MemoryMappingArena::new(0x40000).unwrap();
1041         assert_eq!(m.size(), 0x40000);
1042     }
1043 
1044     #[test]
arena_add()1045     fn arena_add() {
1046         let mut m = MemoryMappingArena::new(0x40000).unwrap();
1047         assert!(m.add_anon(0, pagesize() * 4).is_ok());
1048     }
1049 
1050     #[test]
arena_remove()1051     fn arena_remove() {
1052         let mut m = MemoryMappingArena::new(0x40000).unwrap();
1053         assert!(m.add_anon(0, pagesize() * 4).is_ok());
1054         assert!(m.remove(0, pagesize()).is_ok());
1055         assert!(m.remove(0, pagesize() * 2).is_ok());
1056     }
1057 
1058     #[test]
arena_add_alignment_error()1059     fn arena_add_alignment_error() {
1060         let mut m = MemoryMappingArena::new(pagesize() * 2).unwrap();
1061         assert!(m.add_anon(0, 0x100).is_ok());
1062         let res = m.add_anon(pagesize() + 1, 0x100).unwrap_err();
1063         match res {
1064             Error::NotPageAligned => {}
1065             e => panic!("unexpected error: {}", e),
1066         }
1067     }
1068 
1069     #[test]
arena_add_oob_error()1070     fn arena_add_oob_error() {
1071         let mut m = MemoryMappingArena::new(pagesize()).unwrap();
1072         let res = m.add_anon(0, pagesize() + 1).unwrap_err();
1073         match res {
1074             Error::InvalidAddress => {}
1075             e => panic!("unexpected error: {}", e),
1076         }
1077     }
1078 
1079     #[test]
arena_add_overlapping()1080     fn arena_add_overlapping() {
1081         let ps = pagesize();
1082         let mut m =
1083             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1084         m.add_anon(ps * 4, ps * 4)
1085             .expect("failed to add sub-mapping");
1086 
1087         // Overlap in the front.
1088         m.add_anon(ps * 2, ps * 3)
1089             .expect("failed to add front overlapping sub-mapping");
1090 
1091         // Overlap in the back.
1092         m.add_anon(ps * 7, ps * 3)
1093             .expect("failed to add back overlapping sub-mapping");
1094 
1095         // Overlap the back of the first mapping, all of the middle mapping, and the front of the
1096         // last mapping.
1097         m.add_anon(ps * 3, ps * 6)
1098             .expect("failed to add mapping that overlaps several mappings");
1099     }
1100 
1101     #[test]
arena_remove_overlapping()1102     fn arena_remove_overlapping() {
1103         let ps = pagesize();
1104         let mut m =
1105             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1106         m.add_anon(ps * 4, ps * 4)
1107             .expect("failed to add sub-mapping");
1108         m.add_anon(ps * 2, ps * 2)
1109             .expect("failed to add front overlapping sub-mapping");
1110         m.add_anon(ps * 8, ps * 2)
1111             .expect("failed to add back overlapping sub-mapping");
1112 
1113         // Remove the back of the first mapping and the front of the second.
1114         m.remove(ps * 3, ps * 2)
1115             .expect("failed to remove front overlapping mapping");
1116 
1117         // Remove the back of the second mapping and the front of the third.
1118         m.remove(ps * 7, ps * 2)
1119             .expect("failed to remove back overlapping mapping");
1120 
1121         // Remove a mapping that completely overlaps the middle mapping.
1122         m.remove(ps * 5, ps * 2)
1123             .expect("failed to remove fully overlapping mapping");
1124     }
1125 
1126     #[test]
arena_remove_unaligned()1127     fn arena_remove_unaligned() {
1128         let ps = pagesize();
1129         let mut m =
1130             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1131 
1132         m.add_anon(0, ps).expect("failed to add mapping");
1133         m.remove(0, ps - 1)
1134             .expect("failed to remove unaligned mapping");
1135     }
1136 
1137     #[test]
arena_msync()1138     fn arena_msync() {
1139         let size = 0x40000;
1140         let m = MemoryMappingArena::new(size).unwrap();
1141         let ps = pagesize();
1142         MappedRegion::msync(&m, 0, ps).unwrap();
1143         MappedRegion::msync(&m, 0, size).unwrap();
1144         MappedRegion::msync(&m, ps, size - ps).unwrap();
1145         let res = MappedRegion::msync(&m, ps, size).unwrap_err();
1146         match res {
1147             Error::InvalidAddress => {}
1148             e => panic!("unexpected error: {}", e),
1149         }
1150     }
1151 }
1152