• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! The mmap module provides a safe interface to mmap memory and ensures unmap is called when the
6 //! mmap object leaves scope.
7 
8 use std::io;
9 use std::ptr::null_mut;
10 
11 use libc::c_int;
12 use libc::c_void;
13 use libc::read;
14 use libc::write;
15 use log::warn;
16 use remain::sorted;
17 
18 use super::pagesize;
19 use super::Error as ErrnoError;
20 use crate::AsRawDescriptor;
21 use crate::Descriptor;
22 use crate::MappedRegion;
23 use crate::MemoryMapping as CrateMemoryMapping;
24 use crate::MemoryMappingBuilder;
25 use crate::Protection;
26 use crate::RawDescriptor;
27 use crate::SafeDescriptor;
28 
29 #[sorted]
30 #[derive(Debug, thiserror::Error)]
31 pub enum Error {
32     #[error("`add_fd_mapping` is unsupported")]
33     AddFdMappingIsUnsupported,
34     #[error("requested memory out of range")]
35     InvalidAddress,
36     #[error("invalid argument provided when creating mapping")]
37     InvalidArgument,
38     #[error("requested offset is out of range of off64_t")]
39     InvalidOffset,
40     #[error("requested memory range spans past the end of the region: offset={0} count={1} region_size={2}")]
41     InvalidRange(usize, usize, usize),
42     #[error("requested memory is not page aligned")]
43     NotPageAligned,
44     #[error("failed to read from file to memory: {0}")]
45     ReadToMemory(#[source] io::Error),
46     #[error("`remove_mapping` is unsupported")]
47     RemoveMappingIsUnsupported,
48     #[error("mmap related system call failed: {0}")]
49     SystemCallFailed(#[source] ErrnoError),
50     #[error("failed to write from memory to file: {0}")]
51     WriteFromMemory(#[source] io::Error),
52 }
53 pub type Result<T> = std::result::Result<T, Error>;
54 
55 /// Validates that `offset`..`offset+range_size` lies within the bounds of a memory mapping of
56 /// `mmap_size` bytes.  Also checks for any overflow.
validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()>57 fn validate_includes_range(mmap_size: usize, offset: usize, range_size: usize) -> Result<()> {
58     // Ensure offset + size doesn't overflow
59     let end_offset = offset
60         .checked_add(range_size)
61         .ok_or(Error::InvalidAddress)?;
62     // Ensure offset + size are within the mapping bounds
63     if end_offset <= mmap_size {
64         Ok(())
65     } else {
66         Err(Error::InvalidAddress)
67     }
68 }
69 
70 impl dyn MappedRegion {
71     /// Calls msync with MS_SYNC on a mapping of `size` bytes starting at `offset` from the start of
72     /// the region.  `offset`..`offset+size` must be contained within the `MappedRegion`.
msync(&self, offset: usize, size: usize) -> Result<()>73     pub fn msync(&self, offset: usize, size: usize) -> Result<()> {
74         validate_includes_range(self.size(), offset, size)?;
75 
76         // Safe because the MemoryMapping/MemoryMappingArena interface ensures our pointer and size
77         // are correct, and we've validated that `offset`..`offset+size` is in the range owned by
78         // this `MappedRegion`.
79         let ret = unsafe {
80             libc::msync(
81                 (self.as_ptr() as usize + offset) as *mut libc::c_void,
82                 size,
83                 libc::MS_SYNC,
84             )
85         };
86         if ret != -1 {
87             Ok(())
88         } else {
89             Err(Error::SystemCallFailed(ErrnoError::last()))
90         }
91     }
92 }
93 
94 /// Wraps an anonymous shared memory mapping in the current process. Provides
95 /// RAII semantics including munmap when no longer needed.
96 #[derive(Debug)]
97 pub struct MemoryMapping {
98     addr: *mut u8,
99     size: usize,
100 }
101 
102 // Send and Sync aren't automatically inherited for the raw address pointer.
103 // Accessing that pointer is only done through the stateless interface which
104 // allows the object to be shared by multiple threads without a decrease in
105 // safety.
106 unsafe impl Send for MemoryMapping {}
107 unsafe impl Sync for MemoryMapping {}
108 
109 impl MemoryMapping {
110     /// Creates an anonymous shared, read/write mapping of `size` bytes.
111     ///
112     /// # Arguments
113     /// * `size` - Size of memory region in bytes.
new(size: usize) -> Result<MemoryMapping>114     pub fn new(size: usize) -> Result<MemoryMapping> {
115         MemoryMapping::new_protection(size, Protection::read_write())
116     }
117 
118     /// Creates an anonymous shared mapping of `size` bytes with `prot` protection.
119     ///
120     /// # Arguments
121     /// * `size` - Size of memory region in bytes.
122     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
new_protection(size: usize, prot: Protection) -> Result<MemoryMapping>123     pub fn new_protection(size: usize, prot: Protection) -> Result<MemoryMapping> {
124         // This is safe because we are creating an anonymous mapping in a place not already used by
125         // any other area in this process.
126         unsafe {
127             MemoryMapping::try_mmap(
128                 None,
129                 size,
130                 prot.into(),
131                 libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
132                 None,
133             )
134         }
135     }
136 
137     /// Maps the first `size` bytes of the given `fd` as read/write.
138     ///
139     /// # Arguments
140     /// * `fd` - File descriptor to mmap from.
141     /// * `size` - Size of memory region in bytes.
from_fd(fd: &dyn AsRawDescriptor, size: usize) -> Result<MemoryMapping>142     pub fn from_fd(fd: &dyn AsRawDescriptor, size: usize) -> Result<MemoryMapping> {
143         MemoryMapping::from_fd_offset(fd, size, 0)
144     }
145 
from_fd_offset( fd: &dyn AsRawDescriptor, size: usize, offset: u64, ) -> Result<MemoryMapping>146     pub fn from_fd_offset(
147         fd: &dyn AsRawDescriptor,
148         size: usize,
149         offset: u64,
150     ) -> Result<MemoryMapping> {
151         MemoryMapping::from_fd_offset_protection(fd, size, offset, Protection::read_write())
152     }
153 
154     /// Maps the `size` bytes starting at `offset` bytes of the given `fd` as read/write.
155     ///
156     /// # Arguments
157     /// * `fd` - File descriptor to mmap from.
158     /// * `size` - Size of memory region in bytes.
159     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
160     /// * `flags` - flags passed directly to mmap.
161     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
from_fd_offset_flags( fd: &dyn AsRawDescriptor, size: usize, offset: u64, flags: c_int, prot: Protection, ) -> Result<MemoryMapping>162     fn from_fd_offset_flags(
163         fd: &dyn AsRawDescriptor,
164         size: usize,
165         offset: u64,
166         flags: c_int,
167         prot: Protection,
168     ) -> Result<MemoryMapping> {
169         unsafe {
170             // This is safe because we are creating an anonymous mapping in a place not already used
171             // by any other area in this process.
172             MemoryMapping::try_mmap(None, size, prot.into(), flags, Some((fd, offset)))
173         }
174     }
175 
176     /// Maps the `size` bytes starting at `offset` bytes of the given `fd` as read/write.
177     ///
178     /// # Arguments
179     /// * `fd` - File descriptor to mmap from.
180     /// * `size` - Size of memory region in bytes.
181     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
182     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
from_fd_offset_protection( fd: &dyn AsRawDescriptor, size: usize, offset: u64, prot: Protection, ) -> Result<MemoryMapping>183     pub fn from_fd_offset_protection(
184         fd: &dyn AsRawDescriptor,
185         size: usize,
186         offset: u64,
187         prot: Protection,
188     ) -> Result<MemoryMapping> {
189         MemoryMapping::from_fd_offset_flags(fd, size, offset, libc::MAP_SHARED, prot)
190     }
191 
192     /// Maps `size` bytes starting at `offset` from the given `fd` as read/write, and requests
193     /// that the pages are pre-populated.
194     /// # Arguments
195     /// * `fd` - File descriptor to mmap from.
196     /// * `size` - Size of memory region in bytes.
197     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
from_fd_offset_protection_populate( fd: &dyn AsRawDescriptor, size: usize, offset: u64, prot: Protection, populate: bool, ) -> Result<MemoryMapping>198     pub fn from_fd_offset_protection_populate(
199         fd: &dyn AsRawDescriptor,
200         size: usize,
201         offset: u64,
202         prot: Protection,
203         populate: bool,
204     ) -> Result<MemoryMapping> {
205         let mut flags = libc::MAP_SHARED;
206         if populate {
207             flags |= libc::MAP_POPULATE;
208         }
209         MemoryMapping::from_fd_offset_flags(fd, size, offset, flags, prot)
210     }
211 
212     /// Creates an anonymous shared mapping of `size` bytes with `prot` protection.
213     ///
214     /// # Arguments
215     ///
216     /// * `addr` - Memory address to mmap at.
217     /// * `size` - Size of memory region in bytes.
218     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
219     ///
220     /// # Safety
221     ///
222     /// This function should not be called before the caller unmaps any mmap'd regions already
223     /// present at `(addr..addr+size)`.
new_protection_fixed( addr: *mut u8, size: usize, prot: Protection, ) -> Result<MemoryMapping>224     pub unsafe fn new_protection_fixed(
225         addr: *mut u8,
226         size: usize,
227         prot: Protection,
228     ) -> Result<MemoryMapping> {
229         MemoryMapping::try_mmap(
230             Some(addr),
231             size,
232             prot.into(),
233             libc::MAP_ANONYMOUS | libc::MAP_SHARED | libc::MAP_NORESERVE,
234             None,
235         )
236     }
237 
238     /// Maps the `size` bytes starting at `offset` bytes of the given `fd` with
239     /// `prot` protections.
240     ///
241     /// # Arguments
242     ///
243     /// * `addr` - Memory address to mmap at.
244     /// * `fd` - File descriptor to mmap from.
245     /// * `size` - Size of memory region in bytes.
246     /// * `offset` - Offset in bytes from the beginning of `fd` to start the mmap.
247     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
248     ///
249     /// # Safety
250     ///
251     /// This function should not be called before the caller unmaps any mmap'd regions already
252     /// present at `(addr..addr+size)`.
from_descriptor_offset_protection_fixed( addr: *mut u8, fd: &dyn AsRawDescriptor, size: usize, offset: u64, prot: Protection, ) -> Result<MemoryMapping>253     pub unsafe fn from_descriptor_offset_protection_fixed(
254         addr: *mut u8,
255         fd: &dyn AsRawDescriptor,
256         size: usize,
257         offset: u64,
258         prot: Protection,
259     ) -> Result<MemoryMapping> {
260         MemoryMapping::try_mmap(
261             Some(addr),
262             size,
263             prot.into(),
264             libc::MAP_SHARED | libc::MAP_NORESERVE,
265             Some((fd, offset)),
266         )
267     }
268 
269     /// Helper wrapper around libc::mmap that does some basic validation, and calls
270     /// madvise with MADV_DONTDUMP on the created mmap
try_mmap( addr: Option<*mut u8>, size: usize, prot: c_int, flags: c_int, fd: Option<(&dyn AsRawDescriptor, u64)>, ) -> Result<MemoryMapping>271     unsafe fn try_mmap(
272         addr: Option<*mut u8>,
273         size: usize,
274         prot: c_int,
275         flags: c_int,
276         fd: Option<(&dyn AsRawDescriptor, u64)>,
277     ) -> Result<MemoryMapping> {
278         let mut flags = flags;
279         // If addr is provided, set the FIXED flag, and validate addr alignment
280         let addr = match addr {
281             Some(addr) => {
282                 if (addr as usize) % pagesize() != 0 {
283                     return Err(Error::NotPageAligned);
284                 }
285                 flags |= libc::MAP_FIXED;
286                 addr as *mut libc::c_void
287             }
288             None => null_mut(),
289         };
290         // If fd is provided, validate fd offset is within bounds
291         let (fd, offset) = match fd {
292             Some((fd, offset)) => {
293                 if offset > libc::off64_t::max_value() as u64 {
294                     return Err(Error::InvalidOffset);
295                 }
296                 (fd.as_raw_descriptor(), offset as libc::off64_t)
297             }
298             None => (-1, 0),
299         };
300         let addr = libc::mmap64(addr, size, prot, flags, fd, offset);
301         if addr == libc::MAP_FAILED {
302             return Err(Error::SystemCallFailed(ErrnoError::last()));
303         }
304         // This is safe because we call madvise with a valid address and size.
305         let _ = libc::madvise(addr, size, libc::MADV_DONTDUMP);
306 
307         // This is safe because KSM's only userspace visible effects are timing
308         // and memory consumption; it doesn't affect rust safety semantics.
309         // KSM is also disabled by default, and this flag is only a hint.
310         let _ = libc::madvise(addr, size, libc::MADV_MERGEABLE);
311 
312         Ok(MemoryMapping {
313             addr: addr as *mut u8,
314             size,
315         })
316     }
317 
318     /// Madvise the kernel to unmap on fork.
use_dontfork(&self) -> Result<()>319     pub fn use_dontfork(&self) -> Result<()> {
320         // This is safe because we call madvise with a valid address and size, and we check the
321         // return value.
322         let ret = unsafe {
323             libc::madvise(
324                 self.as_ptr() as *mut libc::c_void,
325                 self.size(),
326                 libc::MADV_DONTFORK,
327             )
328         };
329         if ret == -1 {
330             Err(Error::SystemCallFailed(ErrnoError::last()))
331         } else {
332             Ok(())
333         }
334     }
335 
336     /// Madvise the kernel to use Huge Pages for this mapping.
use_hugepages(&self) -> Result<()>337     pub fn use_hugepages(&self) -> Result<()> {
338         const SZ_2M: usize = 2 * 1024 * 1024;
339 
340         // THP uses 2M pages, so use THP only on mappings that are at least
341         // 2M in size.
342         if self.size() < SZ_2M {
343             return Ok(());
344         }
345 
346         // This is safe because we call madvise with a valid address and size, and we check the
347         // return value.
348         let ret = unsafe {
349             libc::madvise(
350                 self.as_ptr() as *mut libc::c_void,
351                 self.size(),
352                 libc::MADV_HUGEPAGE,
353             )
354         };
355         if ret == -1 {
356             Err(Error::SystemCallFailed(ErrnoError::last()))
357         } else {
358             Ok(())
359         }
360     }
361 
362     /// Calls msync with MS_SYNC on the mapping.
msync(&self) -> Result<()>363     pub fn msync(&self) -> Result<()> {
364         // This is safe since we use the exact address and length of a known
365         // good memory mapping.
366         let ret = unsafe {
367             libc::msync(
368                 self.as_ptr() as *mut libc::c_void,
369                 self.size(),
370                 libc::MS_SYNC,
371             )
372         };
373         if ret == -1 {
374             return Err(Error::SystemCallFailed(ErrnoError::last()));
375         }
376         Ok(())
377     }
378 
379     /// Reads data from a file descriptor and writes it to guest memory.
380     ///
381     /// # Arguments
382     /// * `mem_offset` - Begin writing memory at this offset.
383     /// * `src` - Read from `src` to memory.
384     /// * `count` - Read `count` bytes from `src` to memory.
385     ///
386     /// # Examples
387     ///
388     /// * Read bytes from /dev/urandom
389     ///
390     /// ```
391     /// # use base::MemoryMappingBuilder;
392     /// # use std::fs::File;
393     /// # use std::path::Path;
394     /// # fn test_read_random() -> Result<u32, ()> {
395     /// #     let mut mem_map = MemoryMappingBuilder::new(1024).build().unwrap();
396     ///       let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
397     ///       mem_map.read_to_memory(32, &mut file, 128).map_err(|_| ())?;
398     ///       let rand_val: u32 =  mem_map.read_obj(40).map_err(|_| ())?;
399     /// #     Ok(rand_val)
400     /// # }
401     /// ```
read_to_memory( &self, mut mem_offset: usize, src: &dyn AsRawDescriptor, mut count: usize, ) -> Result<()>402     pub fn read_to_memory(
403         &self,
404         mut mem_offset: usize,
405         src: &dyn AsRawDescriptor,
406         mut count: usize,
407     ) -> Result<()> {
408         self.range_end(mem_offset, count)
409             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
410         while count > 0 {
411             // The check above ensures that no memory outside this slice will get accessed by this
412             // read call.
413             match unsafe {
414                 read(
415                     src.as_raw_descriptor(),
416                     self.as_ptr().add(mem_offset) as *mut c_void,
417                     count,
418                 )
419             } {
420                 0 => {
421                     return Err(Error::ReadToMemory(io::Error::from(
422                         io::ErrorKind::UnexpectedEof,
423                     )))
424                 }
425                 r if r < 0 => return Err(Error::ReadToMemory(io::Error::last_os_error())),
426                 ret => {
427                     let bytes_read = ret as usize;
428                     match count.checked_sub(bytes_read) {
429                         Some(count_remaining) => count = count_remaining,
430                         None => break,
431                     }
432                     mem_offset += ret as usize;
433                 }
434             }
435         }
436         Ok(())
437     }
438 
439     /// Writes data from memory to a file descriptor.
440     ///
441     /// # Arguments
442     /// * `mem_offset` - Begin reading memory from this offset.
443     /// * `dst` - Write from memory to `dst`.
444     /// * `count` - Read `count` bytes from memory to `src`.
445     ///
446     /// # Examples
447     ///
448     /// * Write 128 bytes to /dev/null
449     ///
450     /// ```
451     /// # use base::platform::MemoryMapping;
452     /// # use std::fs::File;
453     /// # use std::path::Path;
454     /// # fn test_write_null() -> Result<(), ()> {
455     /// #     let mut mem_map = MemoryMapping::new(1024).unwrap();
456     ///       let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
457     ///       mem_map.write_from_memory(32, &mut file, 128).map_err(|_| ())?;
458     /// #     Ok(())
459     /// # }
460     /// ```
write_from_memory( &self, mut mem_offset: usize, dst: &dyn AsRawDescriptor, mut count: usize, ) -> Result<()>461     pub fn write_from_memory(
462         &self,
463         mut mem_offset: usize,
464         dst: &dyn AsRawDescriptor,
465         mut count: usize,
466     ) -> Result<()> {
467         self.range_end(mem_offset, count)
468             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
469         while count > 0 {
470             // The check above ensures that no memory outside this slice will get accessed by this
471             // write call.
472             match unsafe {
473                 write(
474                     dst.as_raw_descriptor(),
475                     self.as_ptr().add(mem_offset) as *const c_void,
476                     count,
477                 )
478             } {
479                 0 => {
480                     return Err(Error::WriteFromMemory(io::Error::from(
481                         io::ErrorKind::WriteZero,
482                     )))
483                 }
484                 ret if ret < 0 => return Err(Error::WriteFromMemory(io::Error::last_os_error())),
485                 ret => {
486                     let bytes_written = ret as usize;
487                     match count.checked_sub(bytes_written) {
488                         Some(count_remaining) => count = count_remaining,
489                         None => break,
490                     }
491                     mem_offset += ret as usize;
492                 }
493             }
494         }
495         Ok(())
496     }
497 
498     /// Uses madvise to tell the kernel to remove the specified range.  Subsequent reads
499     /// to the pages in the range will return zero bytes.
remove_range(&self, mem_offset: usize, count: usize) -> Result<()>500     pub fn remove_range(&self, mem_offset: usize, count: usize) -> Result<()> {
501         self.range_end(mem_offset, count)
502             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
503         let ret = unsafe {
504             // madvising away the region is the same as the guest changing it.
505             // Next time it is read, it may return zero pages.
506             libc::madvise(
507                 (self.addr as usize + mem_offset) as *mut _,
508                 count,
509                 libc::MADV_REMOVE,
510             )
511         };
512         if ret < 0 {
513             Err(Error::SystemCallFailed(super::Error::last()))
514         } else {
515             Ok(())
516         }
517     }
518 
519     /// Tell the kernel to readahead the range.
520     ///
521     /// This does not block the thread by I/O wait from reading the backed file. This does not
522     /// guarantee that the pages are surely present unless the pages are mlock(2)ed by
523     /// `lock_on_fault_unchecked()`.
524     ///
525     /// The `mem_offset` and `count` must be validated by caller.
526     ///
527     /// # Arguments
528     ///
529     /// * `mem_offset` - The offset of the head of the range.
530     /// * `count` - The size in bytes of the range.
async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()>531     pub fn async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()> {
532         // Validation
533         self.range_end(mem_offset, count)
534             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
535         // Safe because populating the pages from the backed file does not affect the Rust memory
536         // safety.
537         let ret = unsafe {
538             libc::madvise(
539                 (self.addr as usize + mem_offset) as *mut _,
540                 count,
541                 libc::MADV_WILLNEED,
542             )
543         };
544         if ret < 0 {
545             Err(Error::SystemCallFailed(super::Error::last()))
546         } else {
547             Ok(())
548         }
549     }
550 
551     /// Tell the kernel to drop the page cache.
552     ///
553     /// This cannot be applied to locked pages.
554     ///
555     /// The `mem_offset` and `count` must be validated by caller.
556     ///
557     /// NOTE: This function has destructive semantics. It throws away data in the page cache without
558     /// writing it to the backing file. If the data is important, the caller should ensure it is
559     /// written to disk before calling this function or should use MADV_PAGEOUT instead.
560     ///
561     /// # Arguments
562     ///
563     /// * `mem_offset` - The offset of the head of the range.
564     /// * `count` - The size in bytes of the range.
drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()>565     pub fn drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()> {
566         // Validation
567         self.range_end(mem_offset, count)
568             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
569         // Safe because dropping the page cache does not affect the Rust memory safety.
570         let ret = unsafe {
571             libc::madvise(
572                 (self.addr as usize + mem_offset) as *mut _,
573                 count,
574                 libc::MADV_DONTNEED,
575             )
576         };
577         if ret < 0 {
578             Err(Error::SystemCallFailed(super::Error::last()))
579         } else {
580             Ok(())
581         }
582     }
583 
584     /// Lock the resident pages in the range not to be swapped out.
585     ///
586     /// The remaining nonresident page are locked when they are populated.
587     ///
588     /// The `mem_offset` and `count` must be validated by caller.
589     ///
590     /// # Arguments
591     ///
592     /// * `mem_offset` - The offset of the head of the range.
593     /// * `count` - The size in bytes of the range.
lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()>594     pub fn lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()> {
595         // Validation
596         self.range_end(mem_offset, count)
597             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
598         let addr = self.addr as usize + mem_offset;
599         // Safe because MLOCK_ONFAULT only affects the swap behavior of the kernel, so it has no
600         // impact on rust semantics.
601         // let ret = unsafe { libc::mlock2(addr as *mut _, count, libc::MLOCK_ONFAULT) };
602         // ANDROID(b/274805769): android glibc doesn't have mlock2, so we need to make the syscall directly.
603         let ret = unsafe {
604             libc::syscall(
605                 libc::SYS_mlock2,
606                 addr as *mut libc::c_void,
607                 count,
608                 libc::MLOCK_ONFAULT,
609             )
610         };
611         if ret < 0 {
612             let errno = super::Error::last();
613             warn!(
614                 "failed to mlock at {:#x} with length {}: {}",
615                 addr as u64,
616                 self.size(),
617                 errno,
618             );
619             Err(Error::SystemCallFailed(errno))
620         } else {
621             Ok(())
622         }
623     }
624 
625     /// Unlock the range of pages.
626     ///
627     /// Unlocking non-locked pages does not fail.
628     ///
629     /// The `mem_offset` and `count` must be validated by caller.
630     ///
631     /// # Arguments
632     ///
633     /// * `mem_offset` - The offset of the head of the range.
634     /// * `count` - The size in bytes of the range.
unlock(&self, mem_offset: usize, count: usize) -> Result<()>635     pub fn unlock(&self, mem_offset: usize, count: usize) -> Result<()> {
636         // Validation
637         self.range_end(mem_offset, count)
638             .map_err(|_| Error::InvalidRange(mem_offset, count, self.size()))?;
639         // Safe because munlock(2) does not affect the Rust memory safety.
640         let ret = unsafe { libc::munlock((self.addr as usize + mem_offset) as *mut _, count) };
641         if ret < 0 {
642             Err(Error::SystemCallFailed(super::Error::last()))
643         } else {
644             Ok(())
645         }
646     }
647 
648     // Check that offset+count is valid and return the sum.
range_end(&self, offset: usize, count: usize) -> Result<usize>649     pub(crate) fn range_end(&self, offset: usize, count: usize) -> Result<usize> {
650         let mem_end = offset.checked_add(count).ok_or(Error::InvalidAddress)?;
651         if mem_end > self.size() {
652             return Err(Error::InvalidAddress);
653         }
654         Ok(mem_end)
655     }
656 }
657 
658 // Safe because the pointer and size point to a memory range owned by this MemoryMapping that won't
659 // be unmapped until it's Dropped.
660 unsafe impl MappedRegion for MemoryMapping {
as_ptr(&self) -> *mut u8661     fn as_ptr(&self) -> *mut u8 {
662         self.addr
663     }
664 
size(&self) -> usize665     fn size(&self) -> usize {
666         self.size
667     }
668 }
669 
670 impl Drop for MemoryMapping {
drop(&mut self)671     fn drop(&mut self) {
672         // This is safe because we mmap the area at addr ourselves, and nobody
673         // else is holding a reference to it.
674         unsafe {
675             libc::munmap(self.addr as *mut libc::c_void, self.size);
676         }
677     }
678 }
679 
680 /// Tracks Fixed Memory Maps within an anonymous memory-mapped fixed-sized arena
681 /// in the current process.
682 pub struct MemoryMappingArena {
683     addr: *mut u8,
684     size: usize,
685 }
686 
687 // Send and Sync aren't automatically inherited for the raw address pointer.
688 // Accessing that pointer is only done through the stateless interface which
689 // allows the object to be shared by multiple threads without a decrease in
690 // safety.
691 unsafe impl Send for MemoryMappingArena {}
692 unsafe impl Sync for MemoryMappingArena {}
693 
694 impl MemoryMappingArena {
695     /// Creates an mmap arena of `size` bytes.
696     ///
697     /// # Arguments
698     /// * `size` - Size of memory region in bytes.
new(size: usize) -> Result<MemoryMappingArena>699     pub fn new(size: usize) -> Result<MemoryMappingArena> {
700         // Reserve the arena's memory using an anonymous read-only mmap.
701         MemoryMapping::new_protection(size, Protection::read()).map(From::from)
702     }
703 
704     /// Anonymously maps `size` bytes at `offset` bytes from the start of the arena
705     /// with `prot` protections. `offset` must be page aligned.
706     ///
707     /// # Arguments
708     /// * `offset` - Page aligned offset into the arena in bytes.
709     /// * `size` - Size of memory region in bytes.
710     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_anon_protection( &mut self, offset: usize, size: usize, prot: Protection, ) -> Result<()>711     pub fn add_anon_protection(
712         &mut self,
713         offset: usize,
714         size: usize,
715         prot: Protection,
716     ) -> Result<()> {
717         self.try_add(offset, size, prot, None)
718     }
719 
720     /// Anonymously maps `size` bytes at `offset` bytes from the start of the arena.
721     /// `offset` must be page aligned.
722     ///
723     /// # Arguments
724     /// * `offset` - Page aligned offset into the arena in bytes.
725     /// * `size` - Size of memory region in bytes.
add_anon(&mut self, offset: usize, size: usize) -> Result<()>726     pub fn add_anon(&mut self, offset: usize, size: usize) -> Result<()> {
727         self.add_anon_protection(offset, size, Protection::read_write())
728     }
729 
730     /// Maps `size` bytes from the start of the given `fd` at `offset` bytes from
731     /// the start of the arena. `offset` must be page aligned.
732     ///
733     /// # Arguments
734     /// * `offset` - Page aligned offset into the arena in bytes.
735     /// * `size` - Size of memory region in bytes.
736     /// * `fd` - File descriptor to mmap from.
add_fd(&mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor) -> Result<()>737     pub fn add_fd(&mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor) -> Result<()> {
738         self.add_fd_offset(offset, size, fd, 0)
739     }
740 
741     /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
742     /// at `offset` bytes from the start of the arena. `offset` must be page aligned.
743     ///
744     /// # Arguments
745     /// * `offset` - Page aligned offset into the arena in bytes.
746     /// * `size` - Size of memory region in bytes.
747     /// * `fd` - File descriptor to mmap from.
748     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
add_fd_offset( &mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, ) -> Result<()>749     pub fn add_fd_offset(
750         &mut self,
751         offset: usize,
752         size: usize,
753         fd: &dyn AsRawDescriptor,
754         fd_offset: u64,
755     ) -> Result<()> {
756         self.add_fd_offset_protection(offset, size, fd, fd_offset, Protection::read_write())
757     }
758 
759     /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
760     /// at `offset` bytes from the start of the arena with `prot` protections.
761     /// `offset` must be page aligned.
762     ///
763     /// # Arguments
764     /// * `offset` - Page aligned offset into the arena in bytes.
765     /// * `size` - Size of memory region in bytes.
766     /// * `fd` - File descriptor to mmap from.
767     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
768     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_offset_protection( &mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>769     pub fn add_fd_offset_protection(
770         &mut self,
771         offset: usize,
772         size: usize,
773         fd: &dyn AsRawDescriptor,
774         fd_offset: u64,
775         prot: Protection,
776     ) -> Result<()> {
777         self.try_add(offset, size, prot, Some((fd, fd_offset)))
778     }
779 
780     /// Helper method that calls appropriate MemoryMapping constructor and adds
781     /// the resulting map into the arena.
try_add( &mut self, offset: usize, size: usize, prot: Protection, fd: Option<(&dyn AsRawDescriptor, u64)>, ) -> Result<()>782     fn try_add(
783         &mut self,
784         offset: usize,
785         size: usize,
786         prot: Protection,
787         fd: Option<(&dyn AsRawDescriptor, u64)>,
788     ) -> Result<()> {
789         // Ensure offset is page-aligned
790         if offset % pagesize() != 0 {
791             return Err(Error::NotPageAligned);
792         }
793         validate_includes_range(self.size(), offset, size)?;
794 
795         // This is safe since the range has been validated.
796         let mmap = unsafe {
797             match fd {
798                 Some((fd, fd_offset)) => MemoryMapping::from_descriptor_offset_protection_fixed(
799                     self.addr.add(offset),
800                     fd,
801                     size,
802                     fd_offset,
803                     prot,
804                 )?,
805                 None => MemoryMapping::new_protection_fixed(self.addr.add(offset), size, prot)?,
806             }
807         };
808 
809         // This mapping will get automatically removed when we drop the whole arena.
810         std::mem::forget(mmap);
811         Ok(())
812     }
813 
814     /// Removes `size` bytes at `offset` bytes from the start of the arena. `offset` must be page
815     /// aligned.
816     ///
817     /// # Arguments
818     /// * `offset` - Page aligned offset into the arena in bytes.
819     /// * `size` - Size of memory region in bytes.
remove(&mut self, offset: usize, size: usize) -> Result<()>820     pub fn remove(&mut self, offset: usize, size: usize) -> Result<()> {
821         self.try_add(offset, size, Protection::read(), None)
822     }
823 }
824 
825 // Safe because the pointer and size point to a memory range owned by this MemoryMappingArena that
826 // won't be unmapped until it's Dropped.
827 unsafe impl MappedRegion for MemoryMappingArena {
as_ptr(&self) -> *mut u8828     fn as_ptr(&self) -> *mut u8 {
829         self.addr
830     }
831 
size(&self) -> usize832     fn size(&self) -> usize {
833         self.size
834     }
835 
add_fd_mapping( &mut self, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>836     fn add_fd_mapping(
837         &mut self,
838         offset: usize,
839         size: usize,
840         fd: &dyn AsRawDescriptor,
841         fd_offset: u64,
842         prot: Protection,
843     ) -> Result<()> {
844         self.add_fd_offset_protection(offset, size, fd, fd_offset, prot)
845     }
846 
remove_mapping(&mut self, offset: usize, size: usize) -> Result<()>847     fn remove_mapping(&mut self, offset: usize, size: usize) -> Result<()> {
848         self.remove(offset, size)
849     }
850 }
851 
852 impl From<MemoryMapping> for MemoryMappingArena {
from(mmap: MemoryMapping) -> Self853     fn from(mmap: MemoryMapping) -> Self {
854         let addr = mmap.as_ptr();
855         let size = mmap.size();
856 
857         // Forget the original mapping because the `MemoryMappingArena` will take care of calling
858         // `munmap` when it is dropped.
859         std::mem::forget(mmap);
860         MemoryMappingArena { addr, size }
861     }
862 }
863 
864 impl From<CrateMemoryMapping> for MemoryMappingArena {
from(mmap: CrateMemoryMapping) -> Self865     fn from(mmap: CrateMemoryMapping) -> Self {
866         MemoryMappingArena::from(mmap.mapping)
867     }
868 }
869 
870 impl Drop for MemoryMappingArena {
drop(&mut self)871     fn drop(&mut self) {
872         // This is safe because we own this memory range, and nobody else is holding a reference to
873         // it.
874         unsafe {
875             libc::munmap(self.addr as *mut libc::c_void, self.size);
876         }
877     }
878 }
879 
880 impl CrateMemoryMapping {
use_dontfork(&self) -> Result<()>881     pub fn use_dontfork(&self) -> Result<()> {
882         self.mapping.use_dontfork()
883     }
884 
use_hugepages(&self) -> Result<()>885     pub fn use_hugepages(&self) -> Result<()> {
886         self.mapping.use_hugepages()
887     }
888 
read_to_memory( &self, mem_offset: usize, src: &dyn AsRawDescriptor, count: usize, ) -> Result<()>889     pub fn read_to_memory(
890         &self,
891         mem_offset: usize,
892         src: &dyn AsRawDescriptor,
893         count: usize,
894     ) -> Result<()> {
895         self.mapping.read_to_memory(mem_offset, src, count)
896     }
897 
write_from_memory( &self, mem_offset: usize, dst: &dyn AsRawDescriptor, count: usize, ) -> Result<()>898     pub fn write_from_memory(
899         &self,
900         mem_offset: usize,
901         dst: &dyn AsRawDescriptor,
902         count: usize,
903     ) -> Result<()> {
904         self.mapping.write_from_memory(mem_offset, dst, count)
905     }
906 
from_raw_ptr(addr: RawDescriptor, size: usize) -> Result<CrateMemoryMapping>907     pub fn from_raw_ptr(addr: RawDescriptor, size: usize) -> Result<CrateMemoryMapping> {
908         MemoryMapping::from_fd_offset(&Descriptor(addr), size, 0).map(|mapping| {
909             CrateMemoryMapping {
910                 mapping,
911                 _file_descriptor: None,
912             }
913         })
914     }
915 }
916 
917 pub trait Unix {
918     /// Remove the specified range from the mapping.
remove_range(&self, mem_offset: usize, count: usize) -> Result<()>919     fn remove_range(&self, mem_offset: usize, count: usize) -> Result<()>;
920     /// Tell the kernel to readahead the range.
async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()>921     fn async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()>;
922     /// Tell the kernel to drop the page cache.
drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()>923     fn drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()>;
924     /// Lock the resident pages in the range not to be swapped out.
lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()>925     fn lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()>;
926     /// Unlock the range of pages.
unlock(&self, mem_offset: usize, count: usize) -> Result<()>927     fn unlock(&self, mem_offset: usize, count: usize) -> Result<()>;
928     /// Disable host swap for this mapping.
lock_all(&self) -> Result<()>929     fn lock_all(&self) -> Result<()>;
930 }
931 
932 impl Unix for CrateMemoryMapping {
remove_range(&self, mem_offset: usize, count: usize) -> Result<()>933     fn remove_range(&self, mem_offset: usize, count: usize) -> Result<()> {
934         self.mapping.remove_range(mem_offset, count)
935     }
async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()>936     fn async_prefetch(&self, mem_offset: usize, count: usize) -> Result<()> {
937         self.mapping.async_prefetch(mem_offset, count)
938     }
drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()>939     fn drop_page_cache(&self, mem_offset: usize, count: usize) -> Result<()> {
940         self.mapping.drop_page_cache(mem_offset, count)
941     }
lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()>942     fn lock_on_fault(&self, mem_offset: usize, count: usize) -> Result<()> {
943         self.mapping.lock_on_fault(mem_offset, count)
944     }
unlock(&self, mem_offset: usize, count: usize) -> Result<()>945     fn unlock(&self, mem_offset: usize, count: usize) -> Result<()> {
946         self.mapping.unlock(mem_offset, count)
947     }
lock_all(&self) -> Result<()>948     fn lock_all(&self) -> Result<()> {
949         self.mapping.lock_on_fault(0, self.mapping.size())
950     }
951 }
952 
953 pub trait MemoryMappingBuilderUnix<'a> {
954     #[allow(clippy::wrong_self_convention)]
from_descriptor(self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder955     fn from_descriptor(self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder;
956 }
957 
958 impl<'a> MemoryMappingBuilderUnix<'a> for MemoryMappingBuilder<'a> {
959     /// Build the memory mapping given the specified descriptor to mapped memory
960     ///
961     /// Default: Create a new memory mapping.
962     #[allow(clippy::wrong_self_convention)]
from_descriptor(mut self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder963     fn from_descriptor(mut self, descriptor: &'a dyn AsRawDescriptor) -> MemoryMappingBuilder {
964         self.descriptor = Some(descriptor);
965         self
966     }
967 }
968 
969 impl<'a> MemoryMappingBuilder<'a> {
970     /// Request that the mapped pages are pre-populated
971     ///
972     /// Default: Do not populate
populate(mut self) -> MemoryMappingBuilder<'a>973     pub fn populate(mut self) -> MemoryMappingBuilder<'a> {
974         self.populate = true;
975         self
976     }
977 
978     /// Build a MemoryMapping from the provided options.
build(self) -> Result<CrateMemoryMapping>979     pub fn build(self) -> Result<CrateMemoryMapping> {
980         match self.descriptor {
981             None => {
982                 if self.populate {
983                     // Population not supported for new mmaps
984                     return Err(Error::InvalidArgument);
985                 }
986                 MemoryMappingBuilder::wrap(
987                     MemoryMapping::new_protection(
988                         self.size,
989                         self.protection.unwrap_or_else(Protection::read_write),
990                     )?,
991                     None,
992                 )
993             }
994             Some(descriptor) => MemoryMappingBuilder::wrap(
995                 MemoryMapping::from_fd_offset_protection_populate(
996                     descriptor,
997                     self.size,
998                     self.offset.unwrap_or(0),
999                     self.protection.unwrap_or_else(Protection::read_write),
1000                     self.populate,
1001                 )?,
1002                 None,
1003             ),
1004         }
1005     }
1006 
wrap( mapping: MemoryMapping, file_descriptor: Option<&'a dyn AsRawDescriptor>, ) -> Result<CrateMemoryMapping>1007     pub(crate) fn wrap(
1008         mapping: MemoryMapping,
1009         file_descriptor: Option<&'a dyn AsRawDescriptor>,
1010     ) -> Result<CrateMemoryMapping> {
1011         let file_descriptor = match file_descriptor {
1012             Some(descriptor) => Some(
1013                 SafeDescriptor::try_from(descriptor)
1014                     .map_err(|_| Error::SystemCallFailed(ErrnoError::last()))?,
1015             ),
1016             None => None,
1017         };
1018         Ok(CrateMemoryMapping {
1019             mapping,
1020             _file_descriptor: file_descriptor,
1021         })
1022     }
1023 }
1024 
1025 #[cfg(test)]
1026 mod tests {
1027     use data_model::VolatileMemory;
1028     use data_model::VolatileMemoryError;
1029     use tempfile::tempfile;
1030 
1031     use super::*;
1032     use crate::descriptor::Descriptor;
1033 
1034     #[test]
basic_map()1035     fn basic_map() {
1036         let m = MemoryMappingBuilder::new(1024).build().unwrap();
1037         assert_eq!(1024, m.size());
1038     }
1039 
1040     #[test]
map_invalid_size()1041     fn map_invalid_size() {
1042         let res = MemoryMappingBuilder::new(0).build().unwrap_err();
1043         if let Error::SystemCallFailed(e) = res {
1044             assert_eq!(e.errno(), libc::EINVAL);
1045         } else {
1046             panic!("unexpected error: {}", res);
1047         }
1048     }
1049 
1050     #[test]
map_invalid_fd()1051     fn map_invalid_fd() {
1052         let fd = Descriptor(-1);
1053         let res = MemoryMapping::from_fd(&fd, 1024).unwrap_err();
1054         if let Error::SystemCallFailed(e) = res {
1055             assert_eq!(e.errno(), libc::EBADF);
1056         } else {
1057             panic!("unexpected error: {}", res);
1058         }
1059     }
1060 
1061     #[test]
test_write_past_end()1062     fn test_write_past_end() {
1063         let m = MemoryMappingBuilder::new(5).build().unwrap();
1064         let res = m.write_slice(&[1, 2, 3, 4, 5, 6], 0);
1065         assert!(res.is_ok());
1066         assert_eq!(res.unwrap(), 5);
1067     }
1068 
1069     #[test]
slice_size()1070     fn slice_size() {
1071         let m = MemoryMappingBuilder::new(5).build().unwrap();
1072         let s = m.get_slice(2, 3).unwrap();
1073         assert_eq!(s.size(), 3);
1074     }
1075 
1076     #[test]
slice_addr()1077     fn slice_addr() {
1078         let m = MemoryMappingBuilder::new(5).build().unwrap();
1079         let s = m.get_slice(2, 3).unwrap();
1080         assert_eq!(s.as_ptr(), unsafe { m.as_ptr().offset(2) });
1081     }
1082 
1083     #[test]
slice_overflow_error()1084     fn slice_overflow_error() {
1085         let m = MemoryMappingBuilder::new(5).build().unwrap();
1086         let res = m.get_slice(std::usize::MAX, 3).unwrap_err();
1087         assert_eq!(
1088             res,
1089             VolatileMemoryError::Overflow {
1090                 base: std::usize::MAX,
1091                 offset: 3,
1092             }
1093         );
1094     }
1095     #[test]
slice_oob_error()1096     fn slice_oob_error() {
1097         let m = MemoryMappingBuilder::new(5).build().unwrap();
1098         let res = m.get_slice(3, 3).unwrap_err();
1099         assert_eq!(res, VolatileMemoryError::OutOfBounds { addr: 6 });
1100     }
1101 
1102     #[test]
from_fd_offset_invalid()1103     fn from_fd_offset_invalid() {
1104         let fd = tempfile().unwrap();
1105         let res = MemoryMapping::from_fd_offset(&fd, 4096, (libc::off64_t::max_value() as u64) + 1)
1106             .unwrap_err();
1107         match res {
1108             Error::InvalidOffset => {}
1109             e => panic!("unexpected error: {}", e),
1110         }
1111     }
1112 
1113     #[test]
arena_new()1114     fn arena_new() {
1115         let m = MemoryMappingArena::new(0x40000).unwrap();
1116         assert_eq!(m.size(), 0x40000);
1117     }
1118 
1119     #[test]
arena_add()1120     fn arena_add() {
1121         let mut m = MemoryMappingArena::new(0x40000).unwrap();
1122         assert!(m.add_anon(0, pagesize() * 4).is_ok());
1123     }
1124 
1125     #[test]
arena_remove()1126     fn arena_remove() {
1127         let mut m = MemoryMappingArena::new(0x40000).unwrap();
1128         assert!(m.add_anon(0, pagesize() * 4).is_ok());
1129         assert!(m.remove(0, pagesize()).is_ok());
1130         assert!(m.remove(0, pagesize() * 2).is_ok());
1131     }
1132 
1133     #[test]
arena_add_alignment_error()1134     fn arena_add_alignment_error() {
1135         let mut m = MemoryMappingArena::new(pagesize() * 2).unwrap();
1136         assert!(m.add_anon(0, 0x100).is_ok());
1137         let res = m.add_anon(pagesize() + 1, 0x100).unwrap_err();
1138         match res {
1139             Error::NotPageAligned => {}
1140             e => panic!("unexpected error: {}", e),
1141         }
1142     }
1143 
1144     #[test]
arena_add_oob_error()1145     fn arena_add_oob_error() {
1146         let mut m = MemoryMappingArena::new(pagesize()).unwrap();
1147         let res = m.add_anon(0, pagesize() + 1).unwrap_err();
1148         match res {
1149             Error::InvalidAddress => {}
1150             e => panic!("unexpected error: {}", e),
1151         }
1152     }
1153 
1154     #[test]
arena_add_overlapping()1155     fn arena_add_overlapping() {
1156         let ps = pagesize();
1157         let mut m =
1158             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1159         m.add_anon(ps * 4, ps * 4)
1160             .expect("failed to add sub-mapping");
1161 
1162         // Overlap in the front.
1163         m.add_anon(ps * 2, ps * 3)
1164             .expect("failed to add front overlapping sub-mapping");
1165 
1166         // Overlap in the back.
1167         m.add_anon(ps * 7, ps * 3)
1168             .expect("failed to add back overlapping sub-mapping");
1169 
1170         // Overlap the back of the first mapping, all of the middle mapping, and the front of the
1171         // last mapping.
1172         m.add_anon(ps * 3, ps * 6)
1173             .expect("failed to add mapping that overlaps several mappings");
1174     }
1175 
1176     #[test]
arena_remove_overlapping()1177     fn arena_remove_overlapping() {
1178         let ps = pagesize();
1179         let mut m =
1180             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1181         m.add_anon(ps * 4, ps * 4)
1182             .expect("failed to add sub-mapping");
1183         m.add_anon(ps * 2, ps * 2)
1184             .expect("failed to add front overlapping sub-mapping");
1185         m.add_anon(ps * 8, ps * 2)
1186             .expect("failed to add back overlapping sub-mapping");
1187 
1188         // Remove the back of the first mapping and the front of the second.
1189         m.remove(ps * 3, ps * 2)
1190             .expect("failed to remove front overlapping mapping");
1191 
1192         // Remove the back of the second mapping and the front of the third.
1193         m.remove(ps * 7, ps * 2)
1194             .expect("failed to remove back overlapping mapping");
1195 
1196         // Remove a mapping that completely overlaps the middle mapping.
1197         m.remove(ps * 5, ps * 2)
1198             .expect("failed to remove fully overlapping mapping");
1199     }
1200 
1201     #[test]
arena_remove_unaligned()1202     fn arena_remove_unaligned() {
1203         let ps = pagesize();
1204         let mut m =
1205             MemoryMappingArena::new(12 * ps).expect("failed to create `MemoryMappingArena`");
1206 
1207         m.add_anon(0, ps).expect("failed to add mapping");
1208         m.remove(0, ps - 1)
1209             .expect("failed to remove unaligned mapping");
1210     }
1211 
1212     #[test]
arena_msync()1213     fn arena_msync() {
1214         let size = 0x40000;
1215         let m = MemoryMappingArena::new(size).unwrap();
1216         let ps = pagesize();
1217         <dyn MappedRegion>::msync(&m, 0, ps).unwrap();
1218         <dyn MappedRegion>::msync(&m, 0, size).unwrap();
1219         <dyn MappedRegion>::msync(&m, ps, size - ps).unwrap();
1220         let res = <dyn MappedRegion>::msync(&m, ps, size).unwrap_err();
1221         match res {
1222             Error::InvalidAddress => {}
1223             e => panic!("unexpected error: {}", e),
1224         }
1225     }
1226 }
1227