• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Track memory regions that are mapped to the guest VM.
6 
7 use std::convert::AsRef;
8 use std::convert::TryFrom;
9 use std::fs::File;
10 use std::io::Read;
11 use std::io::Write;
12 use std::marker::Send;
13 use std::marker::Sync;
14 use std::result;
15 use std::sync::Arc;
16 
17 use anyhow::bail;
18 use base::pagesize;
19 use base::AsRawDescriptor;
20 use base::AsRawDescriptors;
21 use base::Error as SysError;
22 use base::MappedRegion;
23 use base::MemoryMapping;
24 use base::MemoryMappingBuilder;
25 use base::MmapError;
26 use base::RawDescriptor;
27 use base::SharedMemory;
28 use cros_async::mem;
29 use cros_async::BackingMemory;
30 use data_model::volatile_memory::*;
31 use remain::sorted;
32 use thiserror::Error;
33 use zerocopy::AsBytes;
34 use zerocopy::FromBytes;
35 
36 use crate::guest_address::GuestAddress;
37 
38 mod sys;
39 pub use sys::MemoryPolicy;
40 
41 #[sorted]
42 #[derive(Error, Debug)]
43 pub enum Error {
44     #[error("invalid guest address {0}")]
45     InvalidGuestAddress(GuestAddress),
46     #[error("invalid offset {0}")]
47     InvalidOffset(u64),
48     #[error("size {0} must not be zero")]
49     InvalidSize(usize),
50     #[error("invalid guest memory access at addr={0}: {1}")]
51     MemoryAccess(GuestAddress, #[source] MmapError),
52     #[error("failed to set seals on shm region: {0}")]
53     MemoryAddSealsFailed(#[source] SysError),
54     #[error("failed to create shm region: {0}")]
55     MemoryCreationFailed(#[source] SysError),
56     #[error("failed to map guest memory: {0}")]
57     MemoryMappingFailed(#[source] MmapError),
58     #[error("shm regions must be page aligned")]
59     MemoryNotAligned,
60     #[error("memory regions overlap")]
61     MemoryRegionOverlap,
62     #[error("memory region size {0} is too large")]
63     MemoryRegionTooLarge(u128),
64     #[error("incomplete read of {completed} instead of {expected} bytes")]
65     ShortRead { expected: usize, completed: usize },
66     #[error("incomplete write of {completed} instead of {expected} bytes")]
67     ShortWrite { expected: usize, completed: usize },
68     #[error("DescriptorChain split is out of bounds: {0}")]
69     SplitOutOfBounds(usize),
70     #[error("{0}")]
71     VolatileMemoryAccess(#[source] VolatileMemoryError),
72 }
73 
74 pub type Result<T> = result::Result<T, Error>;
75 
76 /// A file-like object backing `MemoryRegion`.
77 #[derive(Clone, Debug)]
78 pub enum BackingObject {
79     Shm(Arc<SharedMemory>),
80     File(Arc<File>),
81 }
82 
83 impl AsRawDescriptor for BackingObject {
as_raw_descriptor(&self) -> RawDescriptor84     fn as_raw_descriptor(&self) -> RawDescriptor {
85         match self {
86             Self::Shm(shm) => shm.as_raw_descriptor(),
87             Self::File(f) => f.as_raw_descriptor(),
88         }
89     }
90 }
91 
92 impl AsRef<dyn AsRawDescriptor + Sync + Send> for BackingObject {
as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static)93     fn as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static) {
94         match self {
95             BackingObject::Shm(shm) => shm.as_ref(),
96             BackingObject::File(f) => f.as_ref(),
97         }
98     }
99 }
100 
101 /// For MemoryRegion::with_regions
102 pub struct MemoryRegionInformation<'a> {
103     pub index: usize,
104     pub guest_addr: GuestAddress,
105     pub size: usize,
106     pub host_addr: usize,
107     pub shm: &'a BackingObject,
108     pub shm_offset: u64,
109     pub options: MemoryRegionOptions,
110 }
111 
112 #[sorted]
113 #[derive(Clone, Copy, Debug, Default, PartialOrd, PartialEq, Eq, Ord)]
114 pub enum MemoryRegionPurpose {
115     // General purpose guest memory
116     #[default]
117     GuestMemoryRegion,
118     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
119     ProtectedFirmwareRegion,
120     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
121     StaticSwiotlbRegion,
122 }
123 
124 #[derive(Clone, Copy, Debug, Default, PartialOrd, PartialEq, Eq, Ord)]
125 pub struct MemoryRegionOptions {
126     /// Some hypervisors (presently: Gunyah) need explicit knowledge about
127     /// which memory region is used for protected firwmare, static swiotlb,
128     /// or general purpose guest memory.
129     pub purpose: MemoryRegionPurpose,
130 }
131 
132 impl MemoryRegionOptions {
new() -> MemoryRegionOptions133     pub fn new() -> MemoryRegionOptions {
134         Default::default()
135     }
136 
purpose(mut self, purpose: MemoryRegionPurpose) -> Self137     pub fn purpose(mut self, purpose: MemoryRegionPurpose) -> Self {
138         self.purpose = purpose;
139         self
140     }
141 }
142 
143 /// A regions of memory mapped memory.
144 /// Holds the memory mapping with its offset in guest memory.
145 /// Also holds the backing object for the mapping and the offset in that object of the mapping.
146 #[derive(Debug)]
147 pub struct MemoryRegion {
148     mapping: MemoryMapping,
149     guest_base: GuestAddress,
150 
151     shared_obj: BackingObject,
152     obj_offset: u64,
153 
154     options: MemoryRegionOptions,
155 }
156 
157 impl MemoryRegion {
158     /// Creates a new MemoryRegion using the given SharedMemory object to later be attached to a VM
159     /// at `guest_base` address in the guest.
new_from_shm( size: u64, guest_base: GuestAddress, offset: u64, shm: Arc<SharedMemory>, ) -> Result<Self>160     pub fn new_from_shm(
161         size: u64,
162         guest_base: GuestAddress,
163         offset: u64,
164         shm: Arc<SharedMemory>,
165     ) -> Result<Self> {
166         let mapping = MemoryMappingBuilder::new(size as usize)
167             .from_shared_memory(shm.as_ref())
168             .offset(offset)
169             .build()
170             .map_err(Error::MemoryMappingFailed)?;
171         Ok(MemoryRegion {
172             mapping,
173             guest_base,
174             shared_obj: BackingObject::Shm(shm),
175             obj_offset: offset,
176             options: Default::default(),
177         })
178     }
179 
180     /// Creates a new MemoryRegion using the given file to get available later at `guest_base`
181     /// address in the guest.
new_from_file( size: u64, guest_base: GuestAddress, offset: u64, file: Arc<File>, ) -> Result<Self>182     pub fn new_from_file(
183         size: u64,
184         guest_base: GuestAddress,
185         offset: u64,
186         file: Arc<File>,
187     ) -> Result<Self> {
188         let mapping = MemoryMappingBuilder::new(size as usize)
189             .from_file(&file)
190             .offset(offset)
191             .build()
192             .map_err(Error::MemoryMappingFailed)?;
193         Ok(MemoryRegion {
194             mapping,
195             guest_base,
196             shared_obj: BackingObject::File(file),
197             obj_offset: offset,
198             options: Default::default(),
199         })
200     }
201 
start(&self) -> GuestAddress202     fn start(&self) -> GuestAddress {
203         self.guest_base
204     }
205 
end(&self) -> GuestAddress206     fn end(&self) -> GuestAddress {
207         // unchecked_add is safe as the region bounds were checked when it was created.
208         self.guest_base.unchecked_add(self.mapping.size() as u64)
209     }
210 
contains(&self, addr: GuestAddress) -> bool211     fn contains(&self, addr: GuestAddress) -> bool {
212         addr >= self.guest_base && addr < self.end()
213     }
214 }
215 
216 /// Tracks memory regions and where they are mapped in the guest, along with shm
217 /// descriptors of the underlying memory regions.
218 #[derive(Clone, Debug)]
219 pub struct GuestMemory {
220     regions: Arc<[MemoryRegion]>,
221 }
222 
223 impl AsRawDescriptors for GuestMemory {
224     /// USE WITH CAUTION, the descriptors returned here are not necessarily
225     /// files!
as_raw_descriptors(&self) -> Vec<RawDescriptor>226     fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
227         self.regions
228             .iter()
229             .map(|r| r.shared_obj.as_raw_descriptor())
230             .collect()
231     }
232 }
233 
234 impl GuestMemory {
235     /// Creates backing shm for GuestMemory regions
create_shm(ranges: &[(GuestAddress, u64, MemoryRegionOptions)]) -> Result<SharedMemory>236     fn create_shm(ranges: &[(GuestAddress, u64, MemoryRegionOptions)]) -> Result<SharedMemory> {
237         let mut aligned_size = 0;
238         let pg_size = pagesize();
239         for range in ranges {
240             if range.1 % pg_size as u64 != 0 {
241                 return Err(Error::MemoryNotAligned);
242             }
243 
244             aligned_size += range.1;
245         }
246 
247         // NOTE: Some tests rely on the GuestMemory's name when capturing metrics.
248         let name = "crosvm_guest";
249         // Shm must be mut even though it is only updated on Unix systems.
250         #[allow(unused_mut)]
251         let mut shm = SharedMemory::new(name, aligned_size).map_err(Error::MemoryCreationFailed)?;
252 
253         sys::finalize_shm(&mut shm)?;
254 
255         Ok(shm)
256     }
257 
258     /// Creates a container for guest memory regions.
259     /// Valid memory regions are specified as a Vec of (Address, Size, MemoryRegionOptions)
new_with_options( ranges: &[(GuestAddress, u64, MemoryRegionOptions)], ) -> Result<GuestMemory>260     pub fn new_with_options(
261         ranges: &[(GuestAddress, u64, MemoryRegionOptions)],
262     ) -> Result<GuestMemory> {
263         // Create shm
264         let shm = Arc::new(GuestMemory::create_shm(ranges)?);
265 
266         // Create memory regions
267         let mut regions = Vec::<MemoryRegion>::new();
268         let mut offset = 0;
269 
270         for range in ranges {
271             if let Some(last) = regions.last() {
272                 if last
273                     .guest_base
274                     .checked_add(last.mapping.size() as u64)
275                     .map_or(true, |a| a > range.0)
276                 {
277                     return Err(Error::MemoryRegionOverlap);
278                 }
279             }
280 
281             let size = usize::try_from(range.1)
282                 .map_err(|_| Error::MemoryRegionTooLarge(range.1 as u128))?;
283             let mapping = MemoryMappingBuilder::new(size)
284                 .from_shared_memory(shm.as_ref())
285                 .offset(offset)
286                 .build()
287                 .map_err(Error::MemoryMappingFailed)?;
288 
289             regions.push(MemoryRegion {
290                 mapping,
291                 guest_base: range.0,
292                 shared_obj: BackingObject::Shm(shm.clone()),
293                 obj_offset: offset,
294                 options: range.2,
295             });
296 
297             offset += size as u64;
298         }
299 
300         Ok(GuestMemory {
301             regions: Arc::from(regions),
302         })
303     }
304 
305     /// Creates a container for guest memory regions.
306     /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory>307     pub fn new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory> {
308         GuestMemory::new_with_options(
309             ranges
310                 .iter()
311                 .map(|(addr, size)| (*addr, *size, Default::default()))
312                 .collect::<Vec<(GuestAddress, u64, MemoryRegionOptions)>>()
313                 .as_slice(),
314         )
315     }
316 
317     /// Creates a `GuestMemory` from a collection of MemoryRegions.
from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self>318     pub fn from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self> {
319         // Sort the regions and ensure non overlap.
320         regions.sort_by(|a, b| a.guest_base.cmp(&b.guest_base));
321 
322         if regions.len() > 1 {
323             let mut prev_end = regions[0]
324                 .guest_base
325                 .checked_add(regions[0].mapping.size() as u64)
326                 .ok_or(Error::MemoryRegionOverlap)?;
327             for region in &regions[1..] {
328                 if prev_end > region.guest_base {
329                     return Err(Error::MemoryRegionOverlap);
330                 }
331                 prev_end = region
332                     .guest_base
333                     .checked_add(region.mapping.size() as u64)
334                     .ok_or(Error::MemoryRegionTooLarge(
335                         region.guest_base.0 as u128 + region.mapping.size() as u128,
336                     ))?;
337             }
338         }
339 
340         Ok(GuestMemory {
341             regions: Arc::from(regions),
342         })
343     }
344 
345     /// Returns the end address of memory.
346     ///
347     /// # Examples
348     ///
349     /// ```
350     /// # use base::MemoryMapping;
351     /// # use vm_memory::{GuestAddress, GuestMemory};
352     /// # fn test_end_addr() -> Result<(), ()> {
353     ///     let start_addr = GuestAddress(0x1000);
354     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
355     ///     assert_eq!(start_addr.checked_add(0x400), Some(gm.end_addr()));
356     ///     Ok(())
357     /// # }
358     /// ```
end_addr(&self) -> GuestAddress359     pub fn end_addr(&self) -> GuestAddress {
360         self.regions
361             .iter()
362             .max_by_key(|region| region.start())
363             .map_or(GuestAddress(0), MemoryRegion::end)
364     }
365 
366     /// Returns the guest addresses and sizes of the memory regions.
guest_memory_regions(&self) -> Vec<(GuestAddress, usize)>367     pub fn guest_memory_regions(&self) -> Vec<(GuestAddress, usize)> {
368         self.regions
369             .iter()
370             .map(|region| (region.guest_base, region.mapping.size()))
371             .collect()
372     }
373 
374     /// Returns the total size of memory in bytes.
memory_size(&self) -> u64375     pub fn memory_size(&self) -> u64 {
376         self.regions
377             .iter()
378             .map(|region| region.mapping.size() as u64)
379             .sum()
380     }
381 
382     /// Returns true if the given address is within the memory range available to the guest.
address_in_range(&self, addr: GuestAddress) -> bool383     pub fn address_in_range(&self, addr: GuestAddress) -> bool {
384         self.regions.iter().any(|region| region.contains(addr))
385     }
386 
387     /// Returns true if the given range (start, end) is overlap with the memory range
388     /// available to the guest.
range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool389     pub fn range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool {
390         self.regions
391             .iter()
392             .any(|region| region.start() < end && start < region.end())
393     }
394 
395     /// Returns an address `addr + offset` if it's in range.
396     ///
397     /// This function doesn't care whether a region `[addr, addr + offset)` is in range or not. To
398     /// guarantee it's a valid range, use `is_valid_range()` instead.
checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress>399     pub fn checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress> {
400         addr.checked_add(offset).and_then(|a| {
401             if self.address_in_range(a) {
402                 Some(a)
403             } else {
404                 None
405             }
406         })
407     }
408 
409     /// Returns true if the given range `[start, start + length)` is a valid contiguous memory
410     /// range available to the guest and it's backed by a single underlying memory region.
is_valid_range(&self, start: GuestAddress, length: u64) -> bool411     pub fn is_valid_range(&self, start: GuestAddress, length: u64) -> bool {
412         if length == 0 {
413             return false;
414         }
415 
416         let end = if let Some(end) = start.checked_add(length - 1) {
417             end
418         } else {
419             return false;
420         };
421 
422         self.regions
423             .iter()
424             .any(|region| region.start() <= start && end < region.end())
425     }
426 
427     /// Returns the size of the memory region in bytes.
num_regions(&self) -> u64428     pub fn num_regions(&self) -> u64 {
429         self.regions.len() as u64
430     }
431 
432     /// Perform the specified action on each region's addresses.
with_regions<F, E>(&self, mut cb: F) -> result::Result<(), E> where F: FnMut(MemoryRegionInformation) -> result::Result<(), E>,433     pub fn with_regions<F, E>(&self, mut cb: F) -> result::Result<(), E>
434     where
435         F: FnMut(MemoryRegionInformation) -> result::Result<(), E>,
436     {
437         for (index, region) in self.regions.iter().enumerate() {
438             cb(MemoryRegionInformation {
439                 index,
440                 guest_addr: region.start(),
441                 size: region.mapping.size(),
442                 host_addr: region.mapping.as_ptr() as usize,
443                 shm: &region.shared_obj,
444                 shm_offset: region.obj_offset,
445                 options: region.options,
446             })?;
447         }
448         Ok(())
449     }
450 
451     /// Writes a slice to guest memory at the specified guest address.
452     /// Returns the number of bytes written.  The number of bytes written can
453     /// be less than the length of the slice if there isn't enough room in the
454     /// memory region.
455     ///
456     /// # Examples
457     /// * Write a slice at guestaddress 0x200.
458     ///
459     /// ```
460     /// # use base::MemoryMapping;
461     /// # use vm_memory::{GuestAddress, GuestMemory};
462     /// # fn test_write_u64() -> Result<(), ()> {
463     /// #   let start_addr = GuestAddress(0x1000);
464     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
465     ///     let res = gm.write_at_addr(&[1,2,3,4,5], GuestAddress(0x200)).map_err(|_| ())?;
466     ///     assert_eq!(5, res);
467     ///     Ok(())
468     /// # }
469     /// ```
write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize>470     pub fn write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize> {
471         let (mapping, offset, _) = self.find_region(guest_addr)?;
472         mapping
473             .write_slice(buf, offset)
474             .map_err(|e| Error::MemoryAccess(guest_addr, e))
475     }
476 
477     /// Writes the entire contents of a slice to guest memory at the specified
478     /// guest address.
479     ///
480     /// Returns an error if there isn't enough room in the memory region to
481     /// complete the entire write. Part of the data may have been written
482     /// nevertheless.
483     ///
484     /// # Examples
485     ///
486     /// ```
487     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
488     ///
489     /// fn test_write_all() -> guest_memory::Result<()> {
490     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
491     ///     let gm = GuestMemory::new(ranges)?;
492     ///     gm.write_all_at_addr(b"zyxwvut", GuestAddress(0x1200))
493     /// }
494     /// ```
write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()>495     pub fn write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()> {
496         let expected = buf.len();
497         let completed = self.write_at_addr(buf, guest_addr)?;
498         if expected == completed {
499             Ok(())
500         } else {
501             Err(Error::ShortWrite {
502                 expected,
503                 completed,
504             })
505         }
506     }
507 
508     /// Reads to a slice from guest memory at the specified guest address.
509     /// Returns the number of bytes read.  The number of bytes read can
510     /// be less than the length of the slice if there isn't enough room in the
511     /// memory region.
512     ///
513     /// # Examples
514     /// * Read a slice of length 16 at guestaddress 0x200.
515     ///
516     /// ```
517     /// # use base::MemoryMapping;
518     /// # use vm_memory::{GuestAddress, GuestMemory};
519     /// # fn test_write_u64() -> Result<(), ()> {
520     /// #   let start_addr = GuestAddress(0x1000);
521     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
522     ///     let buf = &mut [0u8; 16];
523     ///     let res = gm.read_at_addr(buf, GuestAddress(0x200)).map_err(|_| ())?;
524     ///     assert_eq!(16, res);
525     ///     Ok(())
526     /// # }
527     /// ```
read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize>528     pub fn read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize> {
529         let (mapping, offset, _) = self.find_region(guest_addr)?;
530         mapping
531             .read_slice(buf, offset)
532             .map_err(|e| Error::MemoryAccess(guest_addr, e))
533     }
534 
535     /// Reads from guest memory at the specified address to fill the entire
536     /// buffer.
537     ///
538     /// Returns an error if there isn't enough room in the memory region to fill
539     /// the entire buffer. Part of the buffer may have been filled nevertheless.
540     ///
541     /// # Examples
542     ///
543     /// ```
544     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
545     ///
546     /// fn test_read_exact() -> guest_memory::Result<()> {
547     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
548     ///     let gm = GuestMemory::new(ranges)?;
549     ///     let mut buffer = [0u8; 0x200];
550     ///     gm.read_exact_at_addr(&mut buffer, GuestAddress(0x1200))
551     /// }
552     /// ```
read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()>553     pub fn read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()> {
554         let expected = buf.len();
555         let completed = self.read_at_addr(buf, guest_addr)?;
556         if expected == completed {
557             Ok(())
558         } else {
559             Err(Error::ShortRead {
560                 expected,
561                 completed,
562             })
563         }
564     }
565 
566     /// Reads an object from guest memory at the given guest address.
567     ///
568     /// # Examples
569     /// * Read a u64 from two areas of guest memory backed by separate mappings.
570     ///
571     /// ```
572     /// # use vm_memory::{GuestAddress, GuestMemory};
573     /// # fn test_read_u64() -> Result<u64, ()> {
574     /// #     let start_addr1 = GuestAddress(0x0);
575     /// #     let start_addr2 = GuestAddress(0x400);
576     /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
577     /// #         .map_err(|_| ())?;
578     ///       let num1: u64 = gm.read_obj_from_addr(GuestAddress(32)).map_err(|_| ())?;
579     ///       let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x400+32)).map_err(|_| ())?;
580     /// #     Ok(num1 + num2)
581     /// # }
582     /// ```
read_obj_from_addr<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T>583     pub fn read_obj_from_addr<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
584         let (mapping, offset, _) = self.find_region(guest_addr)?;
585         mapping
586             .read_obj(offset)
587             .map_err(|e| Error::MemoryAccess(guest_addr, e))
588     }
589 
590     /// Reads an object from guest memory at the given guest address.
591     /// Reading from a volatile area isn't strictly safe as it could change
592     /// mid-read.  However, as long as the type T is plain old data and can
593     /// handle random initialization, everything will be OK.
594     ///
595     /// The read operation will be volatile, i.e. it will not be reordered by
596     /// the compiler and is suitable for I/O, but must be aligned. When reading
597     /// from regular memory, prefer [`GuestMemory::read_obj_from_addr`].
598     ///
599     /// # Examples
600     /// * Read a u64 from two areas of guest memory backed by separate mappings.
601     ///
602     /// ```
603     /// # use vm_memory::{GuestAddress, GuestMemory};
604     /// # fn test_read_u64() -> Result<u64, ()> {
605     /// #     let start_addr1 = GuestAddress(0x0);
606     /// #     let start_addr2 = GuestAddress(0x400);
607     /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
608     /// #         .map_err(|_| ())?;
609     ///       let num1: u64 = gm.read_obj_from_addr_volatile(GuestAddress(32)).map_err(|_| ())?;
610     ///       let num2: u64 = gm.read_obj_from_addr_volatile(GuestAddress(0x400+32)).map_err(|_| ())?;
611     /// #     Ok(num1 + num2)
612     /// # }
613     /// ```
read_obj_from_addr_volatile<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T>614     pub fn read_obj_from_addr_volatile<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
615         let (mapping, offset, _) = self.find_region(guest_addr)?;
616         mapping
617             .read_obj_volatile(offset)
618             .map_err(|e| Error::MemoryAccess(guest_addr, e))
619     }
620 
621     /// Writes an object to the memory region at the specified guest address.
622     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
623     ///
624     /// # Examples
625     /// * Write a u64 at guest address 0x1100.
626     ///
627     /// ```
628     /// # use vm_memory::{GuestAddress, GuestMemory};
629     /// # fn test_write_u64() -> Result<(), ()> {
630     /// #   let start_addr = GuestAddress(0x1000);
631     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
632     ///     gm.write_obj_at_addr(55u64, GuestAddress(0x1100))
633     ///         .map_err(|_| ())
634     /// # }
635     /// ```
write_obj_at_addr<T: AsBytes>(&self, val: T, guest_addr: GuestAddress) -> Result<()>636     pub fn write_obj_at_addr<T: AsBytes>(&self, val: T, guest_addr: GuestAddress) -> Result<()> {
637         let (mapping, offset, _) = self.find_region(guest_addr)?;
638         mapping
639             .write_obj(val, offset)
640             .map_err(|e| Error::MemoryAccess(guest_addr, e))
641     }
642 
643     /// Writes an object to the memory region at the specified guest address.
644     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
645     ///
646     /// The write operation will be volatile, i.e. it will not be reordered by
647     /// the compiler and is suitable for I/O, but must be aligned. When writing
648     /// to regular memory, prefer [`GuestMemory::write_obj_at_addr`].
649     /// # Examples
650     /// * Write a u64 at guest address 0x1100.
651     ///
652     /// ```
653     /// # use vm_memory::{GuestAddress, GuestMemory};
654     /// # fn test_write_u64() -> Result<(), ()> {
655     /// #   let start_addr = GuestAddress(0x1000);
656     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
657     ///     gm.write_obj_at_addr_volatile(55u64, GuestAddress(0x1100))
658     ///         .map_err(|_| ())
659     /// # }
660     /// ```
write_obj_at_addr_volatile<T: AsBytes>( &self, val: T, guest_addr: GuestAddress, ) -> Result<()>661     pub fn write_obj_at_addr_volatile<T: AsBytes>(
662         &self,
663         val: T,
664         guest_addr: GuestAddress,
665     ) -> Result<()> {
666         let (mapping, offset, _) = self.find_region(guest_addr)?;
667         mapping
668             .write_obj_volatile(val, offset)
669             .map_err(|e| Error::MemoryAccess(guest_addr, e))
670     }
671 
672     /// Returns a `VolatileSlice` of `len` bytes starting at `addr`. Returns an error if the slice
673     /// is not a subset of this `GuestMemory`.
674     ///
675     /// # Examples
676     /// * Write `99` to 30 bytes starting at guest address 0x1010.
677     ///
678     /// ```
679     /// # use base::MemoryMapping;
680     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
681     /// # fn test_volatile_slice() -> Result<(), GuestMemoryError> {
682     /// #   let start_addr = GuestAddress(0x1000);
683     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)])?;
684     ///     let vslice = gm.get_slice_at_addr(GuestAddress(0x1010), 30)?;
685     ///     vslice.write_bytes(99);
686     /// #   Ok(())
687     /// # }
688     /// ```
get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice>689     pub fn get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice> {
690         self.regions
691             .iter()
692             .find(|region| region.contains(addr))
693             .ok_or(Error::InvalidGuestAddress(addr))
694             .and_then(|region| {
695                 // The cast to a usize is safe here because we know that `region.contains(addr)` and
696                 // it's not possible for a memory region to be larger than what fits in a usize.
697                 region
698                     .mapping
699                     .get_slice(addr.offset_from(region.start()) as usize, len)
700                     .map_err(Error::VolatileMemoryAccess)
701             })
702     }
703 
704     /// Reads data from a file descriptor and writes it to guest memory.
705     ///
706     /// # Arguments
707     /// * `guest_addr` - Begin writing memory at this offset.
708     /// * `src` - Read from `src` to memory.
709     /// * `count` - Read `count` bytes from `src` to memory.
710     ///
711     /// # Examples
712     ///
713     /// * Read bytes from /dev/urandom
714     ///
715     /// ```
716     /// # use base::MemoryMapping;
717     /// # use vm_memory::{GuestAddress, GuestMemory};
718     /// # use std::fs::File;
719     /// # use std::path::Path;
720     /// # fn test_read_random() -> Result<u32, ()> {
721     /// #     let start_addr = GuestAddress(0x1000);
722     /// #     let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
723     ///       let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
724     ///       let addr = GuestAddress(0x1010);
725     ///       gm.read_to_memory(addr, &mut file, 128).map_err(|_| ())?;
726     ///       let read_addr = addr.checked_add(8).ok_or(())?;
727     ///       let rand_val: u32 = gm.read_obj_from_addr(read_addr).map_err(|_| ())?;
728     /// #     Ok(rand_val)
729     /// # }
730     /// ```
read_to_memory<F: Read + AsRawDescriptor>( &self, guest_addr: GuestAddress, src: &mut F, count: usize, ) -> Result<()>731     pub fn read_to_memory<F: Read + AsRawDescriptor>(
732         &self,
733         guest_addr: GuestAddress,
734         src: &mut F,
735         count: usize,
736     ) -> Result<()> {
737         let (mapping, offset, _) = self.find_region(guest_addr)?;
738         mapping
739             .read_to_memory(offset, src, count)
740             .map_err(|e| Error::MemoryAccess(guest_addr, e))
741     }
742 
743     /// Writes data from memory to a file descriptor.
744     ///
745     /// # Arguments
746     /// * `guest_addr` - Begin reading memory from this offset.
747     /// * `dst` - Write from memory to `dst`.
748     /// * `count` - Read `count` bytes from memory to `src`.
749     ///
750     /// # Examples
751     ///
752     /// * Write 128 bytes to /dev/null
753     ///
754     /// ```
755     /// # use base::MemoryMapping;
756     /// # use vm_memory::{GuestAddress, GuestMemory};
757     /// # use std::fs::File;
758     /// # use std::path::Path;
759     /// # fn test_write_null() -> Result<(), ()> {
760     /// #     let start_addr = GuestAddress(0x1000);
761     /// #     let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
762     ///       let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
763     ///       let addr = GuestAddress(0x1010);
764     ///       gm.write_from_memory(addr, &mut file, 128).map_err(|_| ())?;
765     /// #     Ok(())
766     /// # }
767     /// ```
write_from_memory<F: Write + AsRawDescriptor>( &self, guest_addr: GuestAddress, dst: &mut F, count: usize, ) -> Result<()>768     pub fn write_from_memory<F: Write + AsRawDescriptor>(
769         &self,
770         guest_addr: GuestAddress,
771         dst: &mut F,
772         count: usize,
773     ) -> Result<()> {
774         let (mapping, offset, _) = self.find_region(guest_addr)?;
775         mapping
776             .write_from_memory(offset, dst, count)
777             .map_err(|e| Error::MemoryAccess(guest_addr, e))
778     }
779 
780     /// Convert a GuestAddress into a pointer in the address space of this
781     /// process. This should only be necessary for giving addresses to the
782     /// kernel, as with vhost ioctls. Normal reads/writes to guest memory should
783     /// be done through `write_from_memory`, `read_obj_from_addr`, etc.
784     ///
785     /// # Arguments
786     /// * `guest_addr` - Guest address to convert.
787     ///
788     /// # Examples
789     ///
790     /// ```
791     /// # use vm_memory::{GuestAddress, GuestMemory};
792     /// # fn test_host_addr() -> Result<(), ()> {
793     ///     let start_addr = GuestAddress(0x1000);
794     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
795     ///     let addr = gm.get_host_address(GuestAddress(0x1200)).unwrap();
796     ///     println!("Host address is {:p}", addr);
797     ///     Ok(())
798     /// # }
799     /// ```
get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8>800     pub fn get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8> {
801         let (mapping, offset, _) = self.find_region(guest_addr)?;
802         // This is safe; `find_region` already checks that offset is in
803         // bounds.
804         Ok(unsafe { mapping.as_ptr().add(offset) } as *const u8)
805     }
806 
807     /// Convert a GuestAddress into a pointer in the address space of this
808     /// process, and verify that the provided size define a valid range within
809     /// a single memory region. Similar to get_host_address(), this should only
810     /// be used for giving addresses to the kernel.
811     ///
812     /// # Arguments
813     /// * `guest_addr` - Guest address to convert.
814     /// * `size` - Size of the address range to be converted.
815     ///
816     /// # Examples
817     ///
818     /// ```
819     /// # use vm_memory::{GuestAddress, GuestMemory};
820     /// # fn test_host_addr() -> Result<(), ()> {
821     ///     let start_addr = GuestAddress(0x1000);
822     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
823     ///     let addr = gm.get_host_address_range(GuestAddress(0x1200), 0x200).unwrap();
824     ///     println!("Host address is {:p}", addr);
825     ///     Ok(())
826     /// # }
827     /// ```
get_host_address_range( &self, guest_addr: GuestAddress, size: usize, ) -> Result<*const u8>828     pub fn get_host_address_range(
829         &self,
830         guest_addr: GuestAddress,
831         size: usize,
832     ) -> Result<*const u8> {
833         if size == 0 {
834             return Err(Error::InvalidSize(size));
835         }
836 
837         // Assume no overlap among regions
838         let (mapping, offset, _) = self.find_region(guest_addr)?;
839 
840         if mapping
841             .size()
842             .checked_sub(offset)
843             .map_or(true, |v| v < size)
844         {
845             return Err(Error::InvalidGuestAddress(guest_addr));
846         }
847 
848         // This is safe; `find_region` already checks that offset is in
849         // bounds.
850         Ok(unsafe { mapping.as_ptr().add(offset) } as *const u8)
851     }
852 
853     /// Returns a reference to the region that backs the given address.
shm_region( &self, guest_addr: GuestAddress, ) -> Result<&(dyn AsRawDescriptor + Send + Sync)>854     pub fn shm_region(
855         &self,
856         guest_addr: GuestAddress,
857     ) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
858         self.regions
859             .iter()
860             .find(|region| region.contains(guest_addr))
861             .ok_or(Error::InvalidGuestAddress(guest_addr))
862             .map(|region| region.shared_obj.as_ref())
863     }
864 
865     /// Returns the region that contains the memory at `offset` from the base of guest memory.
offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)>866     pub fn offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
867         self.shm_region(
868             self.checked_offset(self.regions[0].guest_base, offset)
869                 .ok_or(Error::InvalidOffset(offset))?,
870         )
871     }
872 
873     /// Loops over all guest memory regions of `self`, and returns the
874     /// target region that contains `guest_addr`. On success, this
875     /// function returns a tuple with the following fields:
876     ///
877     /// (i) the memory mapping associated with the target region.
878     /// (ii) the relative offset from the start of the target region to `guest_addr`.
879     /// (iii) the absolute offset from the start of the memory mapping to the target region.
880     ///
881     /// If no target region is found, an error is returned.
find_region(&self, guest_addr: GuestAddress) -> Result<(&MemoryMapping, usize, u64)>882     pub fn find_region(&self, guest_addr: GuestAddress) -> Result<(&MemoryMapping, usize, u64)> {
883         self.regions
884             .iter()
885             .find(|region| region.contains(guest_addr))
886             .ok_or(Error::InvalidGuestAddress(guest_addr))
887             .map(|region| {
888                 (
889                     &region.mapping,
890                     guest_addr.offset_from(region.start()) as usize,
891                     region.obj_offset,
892                 )
893             })
894     }
895 
896     /// Convert a GuestAddress into an offset within the associated shm region.
897     ///
898     /// Due to potential gaps within GuestMemory, it is helpful to know the
899     /// offset within the shm where a given address is found. This offset
900     /// can then be passed to another process mapping the shm to read data
901     /// starting at that address.
902     ///
903     /// # Arguments
904     /// * `guest_addr` - Guest address to convert.
905     ///
906     /// # Examples
907     ///
908     /// ```
909     /// # use vm_memory::{GuestAddress, GuestMemory};
910     /// let addr_a = GuestAddress(0x10000);
911     /// let addr_b = GuestAddress(0x80000);
912     /// let mut gm = GuestMemory::new(&vec![
913     ///     (addr_a, 0x20000),
914     ///     (addr_b, 0x30000)]).expect("failed to create GuestMemory");
915     /// let offset = gm.offset_from_base(GuestAddress(0x95000))
916     ///                .expect("failed to get offset");
917     /// assert_eq!(offset, 0x35000);
918     /// ```
offset_from_base(&self, guest_addr: GuestAddress) -> Result<u64>919     pub fn offset_from_base(&self, guest_addr: GuestAddress) -> Result<u64> {
920         self.regions
921             .iter()
922             .find(|region| region.contains(guest_addr))
923             .ok_or(Error::InvalidGuestAddress(guest_addr))
924             .map(|region| region.obj_offset + guest_addr.offset_from(region.start()))
925     }
926 
927     /// Copy all guest memory into `w`.
928     ///
929     /// Assumes exclusive access to the guest memory for the duration of the call (e.g. all vCPUs
930     /// and devices must be stopped).
931     ///
932     /// Returns a JSON object that contains metadata about the underlying memory regions to allow
933     /// validation checks at restore time.
snapshot(&self, w: &mut std::fs::File) -> anyhow::Result<serde_json::Value>934     pub fn snapshot(&self, w: &mut std::fs::File) -> anyhow::Result<serde_json::Value> {
935         let mut metadata = MemorySnapshotMetadata {
936             regions: Vec::new(),
937         };
938 
939         for region in self.regions.iter() {
940             metadata
941                 .regions
942                 .push((region.guest_base.0, region.mapping.size()));
943             self.write_from_memory(region.guest_base, w, region.mapping.size())?;
944         }
945 
946         Ok(serde_json::to_value(metadata)?)
947     }
948 
949     /// Restore the guest memory using the bytes from `r`.
950     ///
951     /// Assumes exclusive access to the guest memory for the duration of the call (e.g. all vCPUs
952     /// and devices must be stopped).
953     ///
954     /// Returns an error if `metadata` doesn't match the configuration of the `GuestMemory` or if
955     /// `r` doesn't produce exactly as many bytes as needed.
restore( &self, metadata: serde_json::Value, r: &mut std::fs::File, ) -> anyhow::Result<()>956     pub fn restore(
957         &self,
958         metadata: serde_json::Value,
959         r: &mut std::fs::File,
960     ) -> anyhow::Result<()> {
961         let metadata: MemorySnapshotMetadata = serde_json::from_value(metadata)?;
962         if self.regions.len() != metadata.regions.len() {
963             bail!(
964                 "snapshot expected {} memory regions but VM has {}",
965                 metadata.regions.len(),
966                 self.regions.len()
967             );
968         }
969         for (region, (guest_base, size)) in self.regions.iter().zip(metadata.regions.iter()) {
970             if region.guest_base.0 != *guest_base || region.mapping.size() != *size {
971                 bail!("snapshot memory regions don't match VM memory regions");
972             }
973         }
974 
975         for region in self.regions.iter() {
976             self.read_to_memory(region.guest_base, r, region.mapping.size())?;
977         }
978 
979         // Should always be at EOF at this point.
980         let mut buf = [0];
981         if r.read(&mut buf)? != 0 {
982             bail!("too many bytes");
983         }
984 
985         Ok(())
986     }
987 }
988 
989 // TODO: Consider storing a hash of memory contents and validating it on restore.
990 #[derive(serde::Serialize, serde::Deserialize)]
991 struct MemorySnapshotMetadata {
992     // Guest base and size for each memory region.
993     regions: Vec<(u64, usize)>,
994 }
995 
996 // It is safe to implement BackingMemory because GuestMemory can be mutated any time already.
997 unsafe impl BackingMemory for GuestMemory {
get_volatile_slice( &self, mem_range: cros_async::MemRegion, ) -> mem::Result<VolatileSlice<'_>>998     fn get_volatile_slice(
999         &self,
1000         mem_range: cros_async::MemRegion,
1001     ) -> mem::Result<VolatileSlice<'_>> {
1002         self.get_slice_at_addr(GuestAddress(mem_range.offset as u64), mem_range.len)
1003             .map_err(|_| mem::Error::InvalidOffset(mem_range.offset, mem_range.len))
1004     }
1005 }
1006 
1007 #[cfg(test)]
1008 mod tests {
1009     use super::*;
1010 
1011     #[test]
test_alignment()1012     fn test_alignment() {
1013         let start_addr1 = GuestAddress(0x0);
1014         let start_addr2 = GuestAddress(0x10000);
1015 
1016         assert!(GuestMemory::new(&[(start_addr1, 0x100), (start_addr2, 0x400)]).is_err());
1017         assert!(GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).is_ok());
1018     }
1019 
1020     #[test]
two_regions()1021     fn two_regions() {
1022         let start_addr1 = GuestAddress(0x0);
1023         let start_addr2 = GuestAddress(0x10000);
1024         // The memory regions are `[0x0, 0x10000)`, `[0x10000, 0x20000)`.
1025         let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1026 
1027         // Although each address in `[0x0, 0x20000)` is valid, `is_valid_range()` returns false for
1028         // a range that is across multiple underlying regions.
1029         assert!(gm.is_valid_range(GuestAddress(0x5000), 0x5000));
1030         assert!(gm.is_valid_range(GuestAddress(0x10000), 0x5000));
1031         assert!(!gm.is_valid_range(GuestAddress(0x5000), 0x10000));
1032     }
1033 
1034     #[test]
overlap_memory()1035     fn overlap_memory() {
1036         let start_addr1 = GuestAddress(0x0);
1037         let start_addr2 = GuestAddress(0x10000);
1038         assert!(GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).is_err());
1039     }
1040 
1041     #[test]
region_hole()1042     fn region_hole() {
1043         let start_addr1 = GuestAddress(0x0);
1044         let start_addr2 = GuestAddress(0x40000);
1045         // The memory regions are `[0x0, 0x20000)`, `[0x40000, 0x60000)`.
1046         let gm = GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).unwrap();
1047 
1048         assert!(gm.address_in_range(GuestAddress(0x10000)));
1049         assert!(!gm.address_in_range(GuestAddress(0x30000)));
1050         assert!(gm.address_in_range(GuestAddress(0x50000)));
1051         assert!(!gm.address_in_range(GuestAddress(0x60000)));
1052         assert!(!gm.address_in_range(GuestAddress(0x60000)));
1053         assert!(gm.range_overlap(GuestAddress(0x10000), GuestAddress(0x30000)),);
1054         assert!(!gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x40000)),);
1055         assert!(gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x70000)),);
1056         assert_eq!(gm.checked_offset(GuestAddress(0x10000), 0x10000), None);
1057         assert_eq!(
1058             gm.checked_offset(GuestAddress(0x50000), 0x8000),
1059             Some(GuestAddress(0x58000))
1060         );
1061         assert_eq!(gm.checked_offset(GuestAddress(0x50000), 0x10000), None);
1062         assert!(gm.is_valid_range(GuestAddress(0x0), 0x10000));
1063         assert!(gm.is_valid_range(GuestAddress(0x0), 0x20000));
1064         assert!(!gm.is_valid_range(GuestAddress(0x0), 0x20000 + 1));
1065 
1066         // While `checked_offset(GuestAddress(0x10000), 0x40000)` succeeds because 0x50000 is a
1067         // valid address, `is_valid_range(GuestAddress(0x10000), 0x40000)` returns `false`
1068         // because there is a hole inside of [0x10000, 0x50000).
1069         assert_eq!(
1070             gm.checked_offset(GuestAddress(0x10000), 0x40000),
1071             Some(GuestAddress(0x50000))
1072         );
1073         assert!(!gm.is_valid_range(GuestAddress(0x10000), 0x40000));
1074     }
1075 
1076     #[test]
test_read_u64()1077     fn test_read_u64() {
1078         let start_addr1 = GuestAddress(0x0);
1079         let start_addr2 = GuestAddress(0x10000);
1080         let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1081 
1082         let val1: u64 = 0xaa55aa55aa55aa55;
1083         let val2: u64 = 0x55aa55aa55aa55aa;
1084         gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
1085         gm.write_obj_at_addr(val2, GuestAddress(0x10000 + 32))
1086             .unwrap();
1087         let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
1088         let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x10000 + 32)).unwrap();
1089         assert_eq!(val1, num1);
1090         assert_eq!(val2, num2);
1091     }
1092 
1093     #[test]
test_memory_size()1094     fn test_memory_size() {
1095         let start_region1 = GuestAddress(0x0);
1096         let size_region1 = 0x10000;
1097         let start_region2 = GuestAddress(0x10000);
1098         let size_region2 = 0x20000;
1099         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1100             .unwrap();
1101 
1102         let mem_size = gm.memory_size();
1103         assert_eq!(mem_size, size_region1 + size_region2);
1104     }
1105 
1106     // Get the base address of the mapping for a GuestAddress.
get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8>1107     fn get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8> {
1108         Ok(mem.find_region(addr)?.0.as_ptr() as *const u8)
1109     }
1110 
1111     #[test]
guest_to_host()1112     fn guest_to_host() {
1113         let start_addr1 = GuestAddress(0x0);
1114         let start_addr2 = GuestAddress(0x10000);
1115         let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1116 
1117         // Verify the host addresses match what we expect from the mappings.
1118         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1119         let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1120         let host_addr1 = mem.get_host_address(start_addr1).unwrap();
1121         let host_addr2 = mem.get_host_address(start_addr2).unwrap();
1122         assert_eq!(host_addr1, addr1_base);
1123         assert_eq!(host_addr2, addr2_base);
1124 
1125         // Check that a bad address returns an error.
1126         let bad_addr = GuestAddress(0x123456);
1127         assert!(mem.get_host_address(bad_addr).is_err());
1128     }
1129 
1130     #[test]
guest_to_host_range()1131     fn guest_to_host_range() {
1132         let start_addr1 = GuestAddress(0x0);
1133         let start_addr2 = GuestAddress(0x10000);
1134         let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1135 
1136         // Verify the host addresses match what we expect from the mappings.
1137         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1138         let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1139         let host_addr1 = mem.get_host_address_range(start_addr1, 0x10000).unwrap();
1140         let host_addr2 = mem.get_host_address_range(start_addr2, 0x10000).unwrap();
1141         assert_eq!(host_addr1, addr1_base);
1142         assert_eq!(host_addr2, addr2_base);
1143 
1144         let host_addr3 = mem.get_host_address_range(start_addr2, 0x20000).unwrap();
1145         assert_eq!(host_addr3, addr2_base);
1146 
1147         // Check that a valid guest address with an invalid size returns an error.
1148         assert!(mem.get_host_address_range(start_addr1, 0x20000).is_err());
1149 
1150         // Check that a bad address returns an error.
1151         let bad_addr = GuestAddress(0x123456);
1152         assert!(mem.get_host_address_range(bad_addr, 0x10000).is_err());
1153     }
1154 
1155     #[test]
shm_offset()1156     fn shm_offset() {
1157         let start_region1 = GuestAddress(0x0);
1158         let size_region1 = 0x10000;
1159         let start_region2 = GuestAddress(0x10000);
1160         let size_region2 = 0x20000;
1161         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1162             .unwrap();
1163 
1164         gm.write_obj_at_addr(0x1337u16, GuestAddress(0x0)).unwrap();
1165         gm.write_obj_at_addr(0x0420u16, GuestAddress(0x10000))
1166             .unwrap();
1167 
1168         let _ = gm.with_regions::<_, ()>(
1169             |MemoryRegionInformation {
1170                  index,
1171                  size,
1172                  shm: obj,
1173                  shm_offset: offset,
1174                  ..
1175              }| {
1176                 let shm = match obj {
1177                     BackingObject::Shm(s) => s,
1178                     _ => {
1179                         panic!("backing object isn't SharedMemory");
1180                     }
1181                 };
1182                 let mmap = MemoryMappingBuilder::new(size)
1183                     .from_shared_memory(shm)
1184                     .offset(offset)
1185                     .build()
1186                     .unwrap();
1187 
1188                 if index == 0 {
1189                     assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x1337u16);
1190                 }
1191 
1192                 if index == 1 {
1193                     assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x0420u16);
1194                 }
1195 
1196                 Ok(())
1197             },
1198         );
1199     }
1200 }
1201