• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Track memory regions that are mapped to the guest VM.
6 
7 use std::convert::AsRef;
8 use std::convert::TryFrom;
9 use std::fs::File;
10 use std::io::Read;
11 use std::io::Write;
12 use std::marker::Send;
13 use std::marker::Sync;
14 use std::result;
15 use std::sync::Arc;
16 
17 use anyhow::bail;
18 use anyhow::Context;
19 use base::pagesize;
20 use base::AsRawDescriptor;
21 use base::AsRawDescriptors;
22 use base::Error as SysError;
23 use base::MappedRegion;
24 use base::MemoryMapping;
25 use base::MemoryMappingBuilder;
26 use base::MmapError;
27 use base::RawDescriptor;
28 use base::SharedMemory;
29 use base::VolatileMemory;
30 use base::VolatileMemoryError;
31 use base::VolatileSlice;
32 use cros_async::mem;
33 use cros_async::BackingMemory;
34 use remain::sorted;
35 use thiserror::Error;
36 use zerocopy::AsBytes;
37 use zerocopy::FromBytes;
38 
39 use crate::guest_address::GuestAddress;
40 
41 mod sys;
42 pub use sys::MemoryPolicy;
43 
44 #[sorted]
45 #[derive(Error, Debug)]
46 pub enum Error {
47     #[error("invalid guest address {0}")]
48     InvalidGuestAddress(GuestAddress),
49     #[error("invalid offset {0}")]
50     InvalidOffset(u64),
51     #[error("size {0} must not be zero")]
52     InvalidSize(usize),
53     #[error("invalid guest memory access at addr={0}: {1}")]
54     MemoryAccess(GuestAddress, #[source] MmapError),
55     #[error("failed to set seals on shm region: {0}")]
56     MemoryAddSealsFailed(#[source] SysError),
57     #[error("failed to create shm region: {0}")]
58     MemoryCreationFailed(#[source] SysError),
59     #[error("failed to map guest memory: {0}")]
60     MemoryMappingFailed(#[source] MmapError),
61     #[error("shm regions must be page aligned")]
62     MemoryNotAligned,
63     #[error("memory regions overlap")]
64     MemoryRegionOverlap,
65     #[error("memory region size {0} is too large")]
66     MemoryRegionTooLarge(u128),
67     #[error("incomplete read of {completed} instead of {expected} bytes")]
68     ShortRead { expected: usize, completed: usize },
69     #[error("incomplete write of {completed} instead of {expected} bytes")]
70     ShortWrite { expected: usize, completed: usize },
71     #[error("DescriptorChain split is out of bounds: {0}")]
72     SplitOutOfBounds(usize),
73     #[error("{0}")]
74     VolatileMemoryAccess(#[source] VolatileMemoryError),
75 }
76 
77 pub type Result<T> = result::Result<T, Error>;
78 
79 /// A file-like object backing `MemoryRegion`.
80 #[derive(Clone, Debug)]
81 pub enum BackingObject {
82     Shm(Arc<SharedMemory>),
83     File(Arc<File>),
84 }
85 
86 impl AsRawDescriptor for BackingObject {
as_raw_descriptor(&self) -> RawDescriptor87     fn as_raw_descriptor(&self) -> RawDescriptor {
88         match self {
89             Self::Shm(shm) => shm.as_raw_descriptor(),
90             Self::File(f) => f.as_raw_descriptor(),
91         }
92     }
93 }
94 
95 impl AsRef<dyn AsRawDescriptor + Sync + Send> for BackingObject {
as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static)96     fn as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static) {
97         match self {
98             BackingObject::Shm(shm) => shm.as_ref(),
99             BackingObject::File(f) => f.as_ref(),
100         }
101     }
102 }
103 
104 /// For MemoryRegion::regions
105 pub struct MemoryRegionInformation<'a> {
106     pub index: usize,
107     pub guest_addr: GuestAddress,
108     pub size: usize,
109     pub host_addr: usize,
110     pub shm: &'a BackingObject,
111     pub shm_offset: u64,
112     pub options: MemoryRegionOptions,
113 }
114 
115 #[sorted]
116 #[derive(Clone, Copy, Debug, Default, PartialOrd, PartialEq, Eq, Ord)]
117 pub enum MemoryRegionPurpose {
118     // General purpose guest memory
119     #[default]
120     GuestMemoryRegion,
121     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
122     ProtectedFirmwareRegion,
123     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
124     StaticSwiotlbRegion,
125 }
126 
127 #[derive(Clone, Copy, Debug, Default, PartialOrd, PartialEq, Eq, Ord)]
128 pub struct MemoryRegionOptions {
129     /// Some hypervisors (presently: Gunyah) need explicit knowledge about
130     /// which memory region is used for protected firwmare, static swiotlb,
131     /// or general purpose guest memory.
132     pub purpose: MemoryRegionPurpose,
133     /// Alignment for the mapping of this region. This intends to be used for
134     /// arm64 KVM support where a block alignment is required for transparent
135     /// huge-pages support
136     pub align: u64,
137 }
138 
139 impl MemoryRegionOptions {
new() -> MemoryRegionOptions140     pub fn new() -> MemoryRegionOptions {
141         Default::default()
142     }
143 
purpose(mut self, purpose: MemoryRegionPurpose) -> Self144     pub fn purpose(mut self, purpose: MemoryRegionPurpose) -> Self {
145         self.purpose = purpose;
146         self
147     }
148 
align(mut self, alignment: u64) -> Self149     pub fn align(mut self, alignment: u64) -> Self {
150         self.align = alignment;
151         self
152     }
153 }
154 
155 /// A regions of memory mapped memory.
156 /// Holds the memory mapping with its offset in guest memory.
157 /// Also holds the backing object for the mapping and the offset in that object of the mapping.
158 #[derive(Debug)]
159 pub struct MemoryRegion {
160     mapping: MemoryMapping,
161     guest_base: GuestAddress,
162 
163     shared_obj: BackingObject,
164     obj_offset: u64,
165 
166     options: MemoryRegionOptions,
167 }
168 
169 impl MemoryRegion {
170     /// Creates a new MemoryRegion using the given SharedMemory object to later be attached to a VM
171     /// at `guest_base` address in the guest.
new_from_shm( size: u64, guest_base: GuestAddress, offset: u64, shm: Arc<SharedMemory>, ) -> Result<Self>172     pub fn new_from_shm(
173         size: u64,
174         guest_base: GuestAddress,
175         offset: u64,
176         shm: Arc<SharedMemory>,
177     ) -> Result<Self> {
178         let mapping = MemoryMappingBuilder::new(size as usize)
179             .from_shared_memory(shm.as_ref())
180             .offset(offset)
181             .build()
182             .map_err(Error::MemoryMappingFailed)?;
183         Ok(MemoryRegion {
184             mapping,
185             guest_base,
186             shared_obj: BackingObject::Shm(shm),
187             obj_offset: offset,
188             options: Default::default(),
189         })
190     }
191 
192     /// Creates a new MemoryRegion using the given file to get available later at `guest_base`
193     /// address in the guest.
new_from_file( size: u64, guest_base: GuestAddress, offset: u64, file: Arc<File>, ) -> Result<Self>194     pub fn new_from_file(
195         size: u64,
196         guest_base: GuestAddress,
197         offset: u64,
198         file: Arc<File>,
199     ) -> Result<Self> {
200         let mapping = MemoryMappingBuilder::new(size as usize)
201             .from_file(&file)
202             .offset(offset)
203             .build()
204             .map_err(Error::MemoryMappingFailed)?;
205         Ok(MemoryRegion {
206             mapping,
207             guest_base,
208             shared_obj: BackingObject::File(file),
209             obj_offset: offset,
210             options: Default::default(),
211         })
212     }
213 
start(&self) -> GuestAddress214     fn start(&self) -> GuestAddress {
215         self.guest_base
216     }
217 
end(&self) -> GuestAddress218     fn end(&self) -> GuestAddress {
219         // unchecked_add is safe as the region bounds were checked when it was created.
220         self.guest_base.unchecked_add(self.mapping.size() as u64)
221     }
222 
contains(&self, addr: GuestAddress) -> bool223     fn contains(&self, addr: GuestAddress) -> bool {
224         addr >= self.guest_base && addr < self.end()
225     }
226 }
227 
228 /// Tracks memory regions and where they are mapped in the guest, along with shm
229 /// descriptors of the underlying memory regions.
230 #[derive(Clone, Debug)]
231 pub struct GuestMemory {
232     regions: Arc<[MemoryRegion]>,
233 }
234 
235 impl AsRawDescriptors for GuestMemory {
236     /// USE WITH CAUTION, the descriptors returned here are not necessarily
237     /// files!
as_raw_descriptors(&self) -> Vec<RawDescriptor>238     fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
239         self.regions
240             .iter()
241             .map(|r| r.shared_obj.as_raw_descriptor())
242             .collect()
243     }
244 }
245 
246 impl GuestMemory {
247     /// Creates backing shm for GuestMemory regions
create_shm(ranges: &[(GuestAddress, u64, MemoryRegionOptions)]) -> Result<SharedMemory>248     fn create_shm(ranges: &[(GuestAddress, u64, MemoryRegionOptions)]) -> Result<SharedMemory> {
249         let mut aligned_size = 0;
250         let pg_size = pagesize();
251         for range in ranges {
252             if range.1 % pg_size as u64 != 0 {
253                 return Err(Error::MemoryNotAligned);
254             }
255 
256             aligned_size += range.1;
257         }
258 
259         // NOTE: Some tests rely on the GuestMemory's name when capturing metrics.
260         let name = "crosvm_guest";
261         // Shm must be mut even though it is only updated on Unix systems.
262         #[allow(unused_mut)]
263         let mut shm = SharedMemory::new(name, aligned_size).map_err(Error::MemoryCreationFailed)?;
264 
265         sys::finalize_shm(&mut shm)?;
266 
267         Ok(shm)
268     }
269 
270     /// Creates a container for guest memory regions.
271     /// Valid memory regions are specified as a Vec of (Address, Size, MemoryRegionOptions)
new_with_options( ranges: &[(GuestAddress, u64, MemoryRegionOptions)], ) -> Result<GuestMemory>272     pub fn new_with_options(
273         ranges: &[(GuestAddress, u64, MemoryRegionOptions)],
274     ) -> Result<GuestMemory> {
275         // Create shm
276         let shm = Arc::new(GuestMemory::create_shm(ranges)?);
277 
278         // Create memory regions
279         let mut regions = Vec::<MemoryRegion>::new();
280         let mut offset = 0;
281 
282         for range in ranges {
283             if let Some(last) = regions.last() {
284                 if last
285                     .guest_base
286                     .checked_add(last.mapping.size() as u64)
287                     .map_or(true, |a| a > range.0)
288                 {
289                     return Err(Error::MemoryRegionOverlap);
290                 }
291             }
292 
293             let size = usize::try_from(range.1)
294                 .map_err(|_| Error::MemoryRegionTooLarge(range.1 as u128))?;
295             let mapping = MemoryMappingBuilder::new(size)
296                 .from_shared_memory(shm.as_ref())
297                 .offset(offset)
298                 .align(range.2.align)
299                 .build()
300                 .map_err(Error::MemoryMappingFailed)?;
301 
302             regions.push(MemoryRegion {
303                 mapping,
304                 guest_base: range.0,
305                 shared_obj: BackingObject::Shm(shm.clone()),
306                 obj_offset: offset,
307                 options: range.2,
308             });
309 
310             offset += size as u64;
311         }
312 
313         Ok(GuestMemory {
314             regions: Arc::from(regions),
315         })
316     }
317 
318     /// Creates a container for guest memory regions.
319     /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory>320     pub fn new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory> {
321         GuestMemory::new_with_options(
322             ranges
323                 .iter()
324                 .map(|(addr, size)| (*addr, *size, Default::default()))
325                 .collect::<Vec<(GuestAddress, u64, MemoryRegionOptions)>>()
326                 .as_slice(),
327         )
328     }
329 
330     /// Creates a `GuestMemory` from a collection of MemoryRegions.
from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self>331     pub fn from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self> {
332         // Sort the regions and ensure non overlap.
333         regions.sort_by(|a, b| a.guest_base.cmp(&b.guest_base));
334 
335         if regions.len() > 1 {
336             let mut prev_end = regions[0]
337                 .guest_base
338                 .checked_add(regions[0].mapping.size() as u64)
339                 .ok_or(Error::MemoryRegionOverlap)?;
340             for region in &regions[1..] {
341                 if prev_end > region.guest_base {
342                     return Err(Error::MemoryRegionOverlap);
343                 }
344                 prev_end = region
345                     .guest_base
346                     .checked_add(region.mapping.size() as u64)
347                     .ok_or(Error::MemoryRegionTooLarge(
348                         region.guest_base.0 as u128 + region.mapping.size() as u128,
349                     ))?;
350             }
351         }
352 
353         Ok(GuestMemory {
354             regions: Arc::from(regions),
355         })
356     }
357 
358     /// Returns the end address of memory.
359     ///
360     /// # Examples
361     ///
362     /// ```
363     /// # use base::MemoryMapping;
364     /// # use vm_memory::{GuestAddress, GuestMemory};
365     /// # fn test_end_addr() -> Result<(), ()> {
366     ///     let start_addr = GuestAddress(0x1000);
367     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
368     ///     assert_eq!(start_addr.checked_add(0x400), Some(gm.end_addr()));
369     ///     Ok(())
370     /// # }
371     /// ```
end_addr(&self) -> GuestAddress372     pub fn end_addr(&self) -> GuestAddress {
373         self.regions
374             .iter()
375             .max_by_key(|region| region.start())
376             .map_or(GuestAddress(0), MemoryRegion::end)
377     }
378 
379     /// Returns the guest addresses and sizes of the memory regions.
guest_memory_regions(&self) -> Vec<(GuestAddress, usize)>380     pub fn guest_memory_regions(&self) -> Vec<(GuestAddress, usize)> {
381         self.regions
382             .iter()
383             .map(|region| (region.guest_base, region.mapping.size()))
384             .collect()
385     }
386 
387     /// Returns the total size of memory in bytes.
memory_size(&self) -> u64388     pub fn memory_size(&self) -> u64 {
389         self.regions
390             .iter()
391             .map(|region| region.mapping.size() as u64)
392             .sum()
393     }
394 
395     /// Returns true if the given address is within the memory range available to the guest.
address_in_range(&self, addr: GuestAddress) -> bool396     pub fn address_in_range(&self, addr: GuestAddress) -> bool {
397         self.regions.iter().any(|region| region.contains(addr))
398     }
399 
400     /// Returns true if the given range (start, end) is overlap with the memory range
401     /// available to the guest.
range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool402     pub fn range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool {
403         self.regions
404             .iter()
405             .any(|region| region.start() < end && start < region.end())
406     }
407 
408     /// Returns an address `addr + offset` if it's in range.
409     ///
410     /// This function doesn't care whether a region `[addr, addr + offset)` is in range or not. To
411     /// guarantee it's a valid range, use `is_valid_range()` instead.
checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress>412     pub fn checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress> {
413         addr.checked_add(offset).and_then(|a| {
414             if self.address_in_range(a) {
415                 Some(a)
416             } else {
417                 None
418             }
419         })
420     }
421 
422     /// Returns true if the given range `[start, start + length)` is a valid contiguous memory
423     /// range available to the guest and it's backed by a single underlying memory region.
is_valid_range(&self, start: GuestAddress, length: u64) -> bool424     pub fn is_valid_range(&self, start: GuestAddress, length: u64) -> bool {
425         if length == 0 {
426             return false;
427         }
428 
429         let end = if let Some(end) = start.checked_add(length - 1) {
430             end
431         } else {
432             return false;
433         };
434 
435         self.regions
436             .iter()
437             .any(|region| region.start() <= start && end < region.end())
438     }
439 
440     /// Returns the size of the memory region in bytes.
num_regions(&self) -> u64441     pub fn num_regions(&self) -> u64 {
442         self.regions.len() as u64
443     }
444 
regions(&self) -> impl Iterator<Item = MemoryRegionInformation>445     pub fn regions(&self) -> impl Iterator<Item = MemoryRegionInformation> {
446         self.regions
447             .iter()
448             .enumerate()
449             .map(|(index, region)| MemoryRegionInformation {
450                 index,
451                 guest_addr: region.start(),
452                 size: region.mapping.size(),
453                 host_addr: region.mapping.as_ptr() as usize,
454                 shm: &region.shared_obj,
455                 shm_offset: region.obj_offset,
456                 options: region.options,
457             })
458     }
459 
460     /// Writes a slice to guest memory at the specified guest address.
461     /// Returns the number of bytes written.  The number of bytes written can
462     /// be less than the length of the slice if there isn't enough room in the
463     /// memory region.
464     ///
465     /// # Examples
466     /// * Write a slice at guestaddress 0x200.
467     ///
468     /// ```
469     /// # use base::MemoryMapping;
470     /// # use vm_memory::{GuestAddress, GuestMemory};
471     /// # fn test_write_u64() -> Result<(), ()> {
472     /// #   let start_addr = GuestAddress(0x1000);
473     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
474     ///     let res = gm.write_at_addr(&[1,2,3,4,5], GuestAddress(0x200)).map_err(|_| ())?;
475     ///     assert_eq!(5, res);
476     ///     Ok(())
477     /// # }
478     /// ```
write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize>479     pub fn write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize> {
480         let (mapping, offset, _) = self.find_region(guest_addr)?;
481         mapping
482             .write_slice(buf, offset)
483             .map_err(|e| Error::MemoryAccess(guest_addr, e))
484     }
485 
486     /// Writes the entire contents of a slice to guest memory at the specified
487     /// guest address.
488     ///
489     /// Returns an error if there isn't enough room in the memory region to
490     /// complete the entire write. Part of the data may have been written
491     /// nevertheless.
492     ///
493     /// # Examples
494     ///
495     /// ```
496     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
497     ///
498     /// fn test_write_all() -> guest_memory::Result<()> {
499     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
500     ///     let gm = GuestMemory::new(ranges)?;
501     ///     gm.write_all_at_addr(b"zyxwvut", GuestAddress(0x1200))
502     /// }
503     /// ```
write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()>504     pub fn write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()> {
505         let expected = buf.len();
506         let completed = self.write_at_addr(buf, guest_addr)?;
507         if expected == completed {
508             Ok(())
509         } else {
510             Err(Error::ShortWrite {
511                 expected,
512                 completed,
513             })
514         }
515     }
516 
517     /// Reads to a slice from guest memory at the specified guest address.
518     /// Returns the number of bytes read.  The number of bytes read can
519     /// be less than the length of the slice if there isn't enough room in the
520     /// memory region.
521     ///
522     /// # Examples
523     /// * Read a slice of length 16 at guestaddress 0x200.
524     ///
525     /// ```
526     /// # use base::MemoryMapping;
527     /// # use vm_memory::{GuestAddress, GuestMemory};
528     /// # fn test_write_u64() -> Result<(), ()> {
529     /// #   let start_addr = GuestAddress(0x1000);
530     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
531     ///     let buf = &mut [0u8; 16];
532     ///     let res = gm.read_at_addr(buf, GuestAddress(0x200)).map_err(|_| ())?;
533     ///     assert_eq!(16, res);
534     ///     Ok(())
535     /// # }
536     /// ```
read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize>537     pub fn read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize> {
538         let (mapping, offset, _) = self.find_region(guest_addr)?;
539         mapping
540             .read_slice(buf, offset)
541             .map_err(|e| Error::MemoryAccess(guest_addr, e))
542     }
543 
544     /// Reads from guest memory at the specified address to fill the entire
545     /// buffer.
546     ///
547     /// Returns an error if there isn't enough room in the memory region to fill
548     /// the entire buffer. Part of the buffer may have been filled nevertheless.
549     ///
550     /// # Examples
551     ///
552     /// ```
553     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
554     ///
555     /// fn test_read_exact() -> guest_memory::Result<()> {
556     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
557     ///     let gm = GuestMemory::new(ranges)?;
558     ///     let mut buffer = [0u8; 0x200];
559     ///     gm.read_exact_at_addr(&mut buffer, GuestAddress(0x1200))
560     /// }
561     /// ```
read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()>562     pub fn read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()> {
563         let expected = buf.len();
564         let completed = self.read_at_addr(buf, guest_addr)?;
565         if expected == completed {
566             Ok(())
567         } else {
568             Err(Error::ShortRead {
569                 expected,
570                 completed,
571             })
572         }
573     }
574 
575     /// Reads an object from guest memory at the given guest address.
576     ///
577     /// # Examples
578     /// * Read a u64 from two areas of guest memory backed by separate mappings.
579     ///
580     /// ```
581     /// # use vm_memory::{GuestAddress, GuestMemory};
582     /// # fn test_read_u64() -> Result<u64, ()> {
583     /// #     let start_addr1 = GuestAddress(0x0);
584     /// #     let start_addr2 = GuestAddress(0x400);
585     /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
586     /// #         .map_err(|_| ())?;
587     ///       let num1: u64 = gm.read_obj_from_addr(GuestAddress(32)).map_err(|_| ())?;
588     ///       let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x400+32)).map_err(|_| ())?;
589     /// #     Ok(num1 + num2)
590     /// # }
591     /// ```
read_obj_from_addr<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T>592     pub fn read_obj_from_addr<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
593         let (mapping, offset, _) = self.find_region(guest_addr)?;
594         mapping
595             .read_obj(offset)
596             .map_err(|e| Error::MemoryAccess(guest_addr, e))
597     }
598 
599     /// Reads an object from guest memory at the given guest address.
600     /// Reading from a volatile area isn't strictly safe as it could change
601     /// mid-read.  However, as long as the type T is plain old data and can
602     /// handle random initialization, everything will be OK.
603     ///
604     /// The read operation will be volatile, i.e. it will not be reordered by
605     /// the compiler and is suitable for I/O, but must be aligned. When reading
606     /// from regular memory, prefer [`GuestMemory::read_obj_from_addr`].
607     ///
608     /// # Examples
609     /// * Read a u64 from two areas of guest memory backed by separate mappings.
610     ///
611     /// ```
612     /// # use vm_memory::{GuestAddress, GuestMemory};
613     /// # fn test_read_u64() -> Result<u64, ()> {
614     /// #     let start_addr1 = GuestAddress(0x0);
615     /// #     let start_addr2 = GuestAddress(0x400);
616     /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
617     /// #         .map_err(|_| ())?;
618     ///       let num1: u64 = gm.read_obj_from_addr_volatile(GuestAddress(32)).map_err(|_| ())?;
619     ///       let num2: u64 = gm.read_obj_from_addr_volatile(GuestAddress(0x400+32)).map_err(|_| ())?;
620     /// #     Ok(num1 + num2)
621     /// # }
622     /// ```
read_obj_from_addr_volatile<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T>623     pub fn read_obj_from_addr_volatile<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
624         let (mapping, offset, _) = self.find_region(guest_addr)?;
625         mapping
626             .read_obj_volatile(offset)
627             .map_err(|e| Error::MemoryAccess(guest_addr, e))
628     }
629 
630     /// Writes an object to the memory region at the specified guest address.
631     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
632     ///
633     /// # Examples
634     /// * Write a u64 at guest address 0x1100.
635     ///
636     /// ```
637     /// # use vm_memory::{GuestAddress, GuestMemory};
638     /// # fn test_write_u64() -> Result<(), ()> {
639     /// #   let start_addr = GuestAddress(0x1000);
640     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
641     ///     gm.write_obj_at_addr(55u64, GuestAddress(0x1100))
642     ///         .map_err(|_| ())
643     /// # }
644     /// ```
write_obj_at_addr<T: AsBytes>(&self, val: T, guest_addr: GuestAddress) -> Result<()>645     pub fn write_obj_at_addr<T: AsBytes>(&self, val: T, guest_addr: GuestAddress) -> Result<()> {
646         let (mapping, offset, _) = self.find_region(guest_addr)?;
647         mapping
648             .write_obj(val, offset)
649             .map_err(|e| Error::MemoryAccess(guest_addr, e))
650     }
651 
652     /// Writes an object to the memory region at the specified guest address.
653     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
654     ///
655     /// The write operation will be volatile, i.e. it will not be reordered by
656     /// the compiler and is suitable for I/O, but must be aligned. When writing
657     /// to regular memory, prefer [`GuestMemory::write_obj_at_addr`].
658     /// # Examples
659     /// * Write a u64 at guest address 0x1100.
660     ///
661     /// ```
662     /// # use vm_memory::{GuestAddress, GuestMemory};
663     /// # fn test_write_u64() -> Result<(), ()> {
664     /// #   let start_addr = GuestAddress(0x1000);
665     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
666     ///     gm.write_obj_at_addr_volatile(55u64, GuestAddress(0x1100))
667     ///         .map_err(|_| ())
668     /// # }
669     /// ```
write_obj_at_addr_volatile<T: AsBytes>( &self, val: T, guest_addr: GuestAddress, ) -> Result<()>670     pub fn write_obj_at_addr_volatile<T: AsBytes>(
671         &self,
672         val: T,
673         guest_addr: GuestAddress,
674     ) -> Result<()> {
675         let (mapping, offset, _) = self.find_region(guest_addr)?;
676         mapping
677             .write_obj_volatile(val, offset)
678             .map_err(|e| Error::MemoryAccess(guest_addr, e))
679     }
680 
681     /// Returns a `VolatileSlice` of `len` bytes starting at `addr`. Returns an error if the slice
682     /// is not a subset of this `GuestMemory`.
683     ///
684     /// # Examples
685     /// * Write `99` to 30 bytes starting at guest address 0x1010.
686     ///
687     /// ```
688     /// # use base::MemoryMapping;
689     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
690     /// # fn test_volatile_slice() -> Result<(), GuestMemoryError> {
691     /// #   let start_addr = GuestAddress(0x1000);
692     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)])?;
693     ///     let vslice = gm.get_slice_at_addr(GuestAddress(0x1010), 30)?;
694     ///     vslice.write_bytes(99);
695     /// #   Ok(())
696     /// # }
697     /// ```
get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice>698     pub fn get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice> {
699         self.regions
700             .iter()
701             .find(|region| region.contains(addr))
702             .ok_or(Error::InvalidGuestAddress(addr))
703             .and_then(|region| {
704                 // The cast to a usize is safe here because we know that `region.contains(addr)` and
705                 // it's not possible for a memory region to be larger than what fits in a usize.
706                 region
707                     .mapping
708                     .get_slice(addr.offset_from(region.start()) as usize, len)
709                     .map_err(Error::VolatileMemoryAccess)
710             })
711     }
712     /// Convert a GuestAddress into a pointer in the address space of this
713     /// process. This should only be necessary for giving addresses to the
714     /// kernel, as with vhost ioctls. Normal reads/writes to guest memory should
715     /// be done through `write_obj_at_addr`, `read_obj_from_addr`, etc.
716     ///
717     /// # Arguments
718     /// * `guest_addr` - Guest address to convert.
719     ///
720     /// # Examples
721     ///
722     /// ```
723     /// # use vm_memory::{GuestAddress, GuestMemory};
724     /// # fn test_host_addr() -> Result<(), ()> {
725     ///     let start_addr = GuestAddress(0x1000);
726     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
727     ///     let addr = gm.get_host_address(GuestAddress(0x1200)).unwrap();
728     ///     println!("Host address is {:p}", addr);
729     ///     Ok(())
730     /// # }
731     /// ```
get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8>732     pub fn get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8> {
733         let (mapping, offset, _) = self.find_region(guest_addr)?;
734         Ok(
735             // SAFETY:
736             // This is safe; `find_region` already checks that offset is in
737             // bounds.
738             unsafe { mapping.as_ptr().add(offset) } as *const u8,
739         )
740     }
741 
742     /// Convert a GuestAddress into a pointer in the address space of this
743     /// process, and verify that the provided size define a valid range within
744     /// a single memory region. Similar to get_host_address(), this should only
745     /// be used for giving addresses to the kernel.
746     ///
747     /// # Arguments
748     /// * `guest_addr` - Guest address to convert.
749     /// * `size` - Size of the address range to be converted.
750     ///
751     /// # Examples
752     ///
753     /// ```
754     /// # use vm_memory::{GuestAddress, GuestMemory};
755     /// # fn test_host_addr() -> Result<(), ()> {
756     ///     let start_addr = GuestAddress(0x1000);
757     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
758     ///     let addr = gm.get_host_address_range(GuestAddress(0x1200), 0x200).unwrap();
759     ///     println!("Host address is {:p}", addr);
760     ///     Ok(())
761     /// # }
762     /// ```
get_host_address_range( &self, guest_addr: GuestAddress, size: usize, ) -> Result<*const u8>763     pub fn get_host_address_range(
764         &self,
765         guest_addr: GuestAddress,
766         size: usize,
767     ) -> Result<*const u8> {
768         if size == 0 {
769             return Err(Error::InvalidSize(size));
770         }
771 
772         // Assume no overlap among regions
773         let (mapping, offset, _) = self.find_region(guest_addr)?;
774 
775         if mapping
776             .size()
777             .checked_sub(offset)
778             .map_or(true, |v| v < size)
779         {
780             return Err(Error::InvalidGuestAddress(guest_addr));
781         }
782 
783         Ok(
784             //SAFETY:
785             // This is safe; `find_region` already checks that offset is in
786             // bounds.
787             unsafe { mapping.as_ptr().add(offset) } as *const u8,
788         )
789     }
790 
791     /// Returns a reference to the region that backs the given address.
shm_region( &self, guest_addr: GuestAddress, ) -> Result<&(dyn AsRawDescriptor + Send + Sync)>792     pub fn shm_region(
793         &self,
794         guest_addr: GuestAddress,
795     ) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
796         self.regions
797             .iter()
798             .find(|region| region.contains(guest_addr))
799             .ok_or(Error::InvalidGuestAddress(guest_addr))
800             .map(|region| region.shared_obj.as_ref())
801     }
802 
803     /// Returns the region that contains the memory at `offset` from the base of guest memory.
offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)>804     pub fn offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
805         self.shm_region(
806             self.checked_offset(self.regions[0].guest_base, offset)
807                 .ok_or(Error::InvalidOffset(offset))?,
808         )
809     }
810 
811     /// Loops over all guest memory regions of `self`, and returns the
812     /// target region that contains `guest_addr`. On success, this
813     /// function returns a tuple with the following fields:
814     ///
815     /// (i) the memory mapping associated with the target region.
816     /// (ii) the relative offset from the start of the target region to `guest_addr`.
817     /// (iii) the absolute offset from the start of the memory mapping to the target region.
818     ///
819     /// If no target region is found, an error is returned.
find_region(&self, guest_addr: GuestAddress) -> Result<(&MemoryMapping, usize, u64)>820     pub fn find_region(&self, guest_addr: GuestAddress) -> Result<(&MemoryMapping, usize, u64)> {
821         self.regions
822             .iter()
823             .find(|region| region.contains(guest_addr))
824             .ok_or(Error::InvalidGuestAddress(guest_addr))
825             .map(|region| {
826                 (
827                     &region.mapping,
828                     guest_addr.offset_from(region.start()) as usize,
829                     region.obj_offset,
830                 )
831             })
832     }
833 
834     /// Convert a GuestAddress into an offset within the associated shm region.
835     ///
836     /// Due to potential gaps within GuestMemory, it is helpful to know the
837     /// offset within the shm where a given address is found. This offset
838     /// can then be passed to another process mapping the shm to read data
839     /// starting at that address.
840     ///
841     /// # Arguments
842     /// * `guest_addr` - Guest address to convert.
843     ///
844     /// # Examples
845     ///
846     /// ```
847     /// # use vm_memory::{GuestAddress, GuestMemory};
848     /// let addr_a = GuestAddress(0x10000);
849     /// let addr_b = GuestAddress(0x80000);
850     /// let mut gm = GuestMemory::new(&vec![
851     ///     (addr_a, 0x20000),
852     ///     (addr_b, 0x30000)]).expect("failed to create GuestMemory");
853     /// let offset = gm.offset_from_base(GuestAddress(0x95000))
854     ///                .expect("failed to get offset");
855     /// assert_eq!(offset, 0x35000);
856     /// ```
offset_from_base(&self, guest_addr: GuestAddress) -> Result<u64>857     pub fn offset_from_base(&self, guest_addr: GuestAddress) -> Result<u64> {
858         self.regions
859             .iter()
860             .find(|region| region.contains(guest_addr))
861             .ok_or(Error::InvalidGuestAddress(guest_addr))
862             .map(|region| region.obj_offset + guest_addr.offset_from(region.start()))
863     }
864 
865     /// Copy all guest memory into `w`.
866     ///
867     /// # Safety
868     /// Must have exclusive access to the guest memory for the duration of the
869     /// call (e.g. all vCPUs and devices must be stopped).
870     ///
871     /// Returns a JSON object that contains metadata about the underlying memory regions to allow
872     /// validation checks at restore time.
873     #[deny(unsafe_op_in_unsafe_fn)]
snapshot<T: Write>( &self, w: &mut T, compress: bool, ) -> anyhow::Result<serde_json::Value>874     pub unsafe fn snapshot<T: Write>(
875         &self,
876         w: &mut T,
877         compress: bool,
878     ) -> anyhow::Result<serde_json::Value> {
879         fn go(
880             this: &GuestMemory,
881             w: &mut impl Write,
882         ) -> anyhow::Result<Vec<MemoryRegionSnapshotMetadata>> {
883             let mut regions = Vec::new();
884             for region in this.regions.iter() {
885                 let data_ranges = region
886                     .find_data_ranges()
887                     .context("find_data_ranges failed")?;
888                 for range in &data_ranges {
889                     let region_vslice = region
890                         .mapping
891                         .get_slice(range.start, range.end - range.start)?;
892                     // SAFETY:
893                     // 1. The data is guaranteed to be present & of expected length by the
894                     //    `VolatileSlice`.
895                     // 2. Aliasing the `VolatileSlice`'s memory is safe because a. The only mutable
896                     //    reference to it is held by the guest, and the guest's VCPUs are stopped
897                     //    (guaranteed by caller), so that mutable reference can be ignored (aliasing
898                     //    is only an issue if temporal overlap occurs, and it does not here). b.
899                     //    Some host code does manipulate guest memory through raw pointers. This
900                     //    aliases the underlying memory of the slice, so we must ensure that host
901                     //    code is not running (the caller guarantees this).
902                     w.write_all(unsafe {
903                         std::slice::from_raw_parts(region_vslice.as_ptr(), region_vslice.size())
904                     })?;
905                 }
906                 regions.push(MemoryRegionSnapshotMetadata {
907                     guest_base: region.guest_base.0,
908                     size: region.mapping.size(),
909                     data_ranges,
910                 });
911             }
912             Ok(regions)
913         }
914 
915         let regions = if compress {
916             let mut w = lz4_flex::frame::FrameEncoder::new(w);
917             let regions = go(self, &mut w)?;
918             w.finish()?;
919             regions
920         } else {
921             go(self, w)?
922         };
923 
924         Ok(serde_json::to_value(MemorySnapshotMetadata {
925             regions,
926             compressed: compress,
927         })?)
928     }
929 
930     /// Restore the guest memory using the bytes from `r`.
931     ///
932     /// # Safety
933     /// Must have exclusive access to the guest memory for the duration of the
934     /// call (e.g. all vCPUs and devices must be stopped).
935     ///
936     /// Returns an error if `metadata` doesn't match the configuration of the `GuestMemory` or if
937     /// `r` doesn't produce exactly as many bytes as needed.
938     #[deny(unsafe_op_in_unsafe_fn)]
restore<T: Read>( &self, metadata: serde_json::Value, r: &mut T, ) -> anyhow::Result<()>939     pub unsafe fn restore<T: Read>(
940         &self,
941         metadata: serde_json::Value,
942         r: &mut T,
943     ) -> anyhow::Result<()> {
944         let metadata: MemorySnapshotMetadata = serde_json::from_value(metadata)?;
945 
946         let mut r: Box<dyn Read> = if metadata.compressed {
947             Box::new(lz4_flex::frame::FrameDecoder::new(r))
948         } else {
949             Box::new(r)
950         };
951 
952         if self.regions.len() != metadata.regions.len() {
953             bail!(
954                 "snapshot expected {} memory regions but VM has {}",
955                 metadata.regions.len(),
956                 self.regions.len()
957             );
958         }
959         for (region, metadata) in self.regions.iter().zip(metadata.regions.iter()) {
960             let MemoryRegionSnapshotMetadata {
961                 guest_base,
962                 size,
963                 data_ranges,
964             } = metadata;
965             if region.guest_base.0 != *guest_base || region.mapping.size() != *size {
966                 bail!("snapshot memory regions don't match VM memory regions");
967             }
968 
969             let mut prev_end = 0;
970             for range in data_ranges {
971                 let hole_size = range
972                     .start
973                     .checked_sub(prev_end)
974                     .context("invalid data range")?;
975                 if hole_size > 0 {
976                     region.zero_range(prev_end, hole_size)?;
977                 }
978                 let region_vslice = region
979                     .mapping
980                     .get_slice(range.start, range.end - range.start)?;
981 
982                 // SAFETY:
983                 // See `Self::snapshot` for the detailed safety statement, and
984                 // note that both mutable and non-mutable aliasing is safe.
985                 r.read_exact(unsafe {
986                     std::slice::from_raw_parts_mut(region_vslice.as_mut_ptr(), region_vslice.size())
987                 })?;
988 
989                 prev_end = range.end;
990             }
991             let hole_size = region
992                 .mapping
993                 .size()
994                 .checked_sub(prev_end)
995                 .context("invalid data range")?;
996             if hole_size > 0 {
997                 region.zero_range(prev_end, hole_size)?;
998             }
999         }
1000 
1001         // Should always be at EOF at this point.
1002         let mut buf = [0];
1003         if r.read(&mut buf)? != 0 {
1004             bail!("too many bytes");
1005         }
1006 
1007         Ok(())
1008     }
1009 }
1010 
1011 #[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1012 struct MemorySnapshotMetadata {
1013     regions: Vec<MemoryRegionSnapshotMetadata>,
1014     compressed: bool,
1015 }
1016 
1017 #[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1018 struct MemoryRegionSnapshotMetadata {
1019     guest_base: u64,
1020     size: usize,
1021     // Ranges of the mmap that are stored in the snapshot file. All other ranges of the region are
1022     // zeros.
1023     data_ranges: Vec<std::ops::Range<usize>>,
1024 }
1025 
1026 // SAFETY:
1027 // It is safe to implement BackingMemory because GuestMemory can be mutated any time already.
1028 unsafe impl BackingMemory for GuestMemory {
get_volatile_slice( &self, mem_range: cros_async::MemRegion, ) -> mem::Result<VolatileSlice<'_>>1029     fn get_volatile_slice(
1030         &self,
1031         mem_range: cros_async::MemRegion,
1032     ) -> mem::Result<VolatileSlice<'_>> {
1033         self.get_slice_at_addr(GuestAddress(mem_range.offset), mem_range.len)
1034             .map_err(|_| mem::Error::InvalidOffset(mem_range.offset, mem_range.len))
1035     }
1036 }
1037 
1038 #[cfg(test)]
1039 mod tests {
1040     use super::*;
1041 
1042     #[test]
test_alignment()1043     fn test_alignment() {
1044         let start_addr1 = GuestAddress(0x0);
1045         let start_addr2 = GuestAddress(0x10000);
1046 
1047         assert!(GuestMemory::new(&[(start_addr1, 0x100), (start_addr2, 0x400)]).is_err());
1048         assert!(GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).is_ok());
1049     }
1050 
1051     #[test]
two_regions()1052     fn two_regions() {
1053         let start_addr1 = GuestAddress(0x0);
1054         let start_addr2 = GuestAddress(0x10000);
1055         // The memory regions are `[0x0, 0x10000)`, `[0x10000, 0x20000)`.
1056         let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1057 
1058         // Although each address in `[0x0, 0x20000)` is valid, `is_valid_range()` returns false for
1059         // a range that is across multiple underlying regions.
1060         assert!(gm.is_valid_range(GuestAddress(0x5000), 0x5000));
1061         assert!(gm.is_valid_range(GuestAddress(0x10000), 0x5000));
1062         assert!(!gm.is_valid_range(GuestAddress(0x5000), 0x10000));
1063     }
1064 
1065     #[test]
overlap_memory()1066     fn overlap_memory() {
1067         let start_addr1 = GuestAddress(0x0);
1068         let start_addr2 = GuestAddress(0x10000);
1069         assert!(GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).is_err());
1070     }
1071 
1072     #[test]
region_hole()1073     fn region_hole() {
1074         let start_addr1 = GuestAddress(0x0);
1075         let start_addr2 = GuestAddress(0x40000);
1076         // The memory regions are `[0x0, 0x20000)`, `[0x40000, 0x60000)`.
1077         let gm = GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).unwrap();
1078 
1079         assert!(gm.address_in_range(GuestAddress(0x10000)));
1080         assert!(!gm.address_in_range(GuestAddress(0x30000)));
1081         assert!(gm.address_in_range(GuestAddress(0x50000)));
1082         assert!(!gm.address_in_range(GuestAddress(0x60000)));
1083         assert!(!gm.address_in_range(GuestAddress(0x60000)));
1084         assert!(gm.range_overlap(GuestAddress(0x10000), GuestAddress(0x30000)),);
1085         assert!(!gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x40000)),);
1086         assert!(gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x70000)),);
1087         assert_eq!(gm.checked_offset(GuestAddress(0x10000), 0x10000), None);
1088         assert_eq!(
1089             gm.checked_offset(GuestAddress(0x50000), 0x8000),
1090             Some(GuestAddress(0x58000))
1091         );
1092         assert_eq!(gm.checked_offset(GuestAddress(0x50000), 0x10000), None);
1093         assert!(gm.is_valid_range(GuestAddress(0x0), 0x10000));
1094         assert!(gm.is_valid_range(GuestAddress(0x0), 0x20000));
1095         assert!(!gm.is_valid_range(GuestAddress(0x0), 0x20000 + 1));
1096 
1097         // While `checked_offset(GuestAddress(0x10000), 0x40000)` succeeds because 0x50000 is a
1098         // valid address, `is_valid_range(GuestAddress(0x10000), 0x40000)` returns `false`
1099         // because there is a hole inside of [0x10000, 0x50000).
1100         assert_eq!(
1101             gm.checked_offset(GuestAddress(0x10000), 0x40000),
1102             Some(GuestAddress(0x50000))
1103         );
1104         assert!(!gm.is_valid_range(GuestAddress(0x10000), 0x40000));
1105     }
1106 
1107     #[test]
test_read_u64()1108     fn test_read_u64() {
1109         let start_addr1 = GuestAddress(0x0);
1110         let start_addr2 = GuestAddress(0x10000);
1111         let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1112 
1113         let val1: u64 = 0xaa55aa55aa55aa55;
1114         let val2: u64 = 0x55aa55aa55aa55aa;
1115         gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
1116         gm.write_obj_at_addr(val2, GuestAddress(0x10000 + 32))
1117             .unwrap();
1118         let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
1119         let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x10000 + 32)).unwrap();
1120         assert_eq!(val1, num1);
1121         assert_eq!(val2, num2);
1122     }
1123 
1124     #[test]
test_memory_size()1125     fn test_memory_size() {
1126         let start_region1 = GuestAddress(0x0);
1127         let size_region1 = 0x10000;
1128         let start_region2 = GuestAddress(0x10000);
1129         let size_region2 = 0x20000;
1130         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1131             .unwrap();
1132 
1133         let mem_size = gm.memory_size();
1134         assert_eq!(mem_size, size_region1 + size_region2);
1135     }
1136 
1137     // Get the base address of the mapping for a GuestAddress.
get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8>1138     fn get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8> {
1139         Ok(mem.find_region(addr)?.0.as_ptr() as *const u8)
1140     }
1141 
1142     #[test]
guest_to_host()1143     fn guest_to_host() {
1144         let start_addr1 = GuestAddress(0x0);
1145         let start_addr2 = GuestAddress(0x10000);
1146         let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1147 
1148         // Verify the host addresses match what we expect from the mappings.
1149         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1150         let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1151         let host_addr1 = mem.get_host_address(start_addr1).unwrap();
1152         let host_addr2 = mem.get_host_address(start_addr2).unwrap();
1153         assert_eq!(host_addr1, addr1_base);
1154         assert_eq!(host_addr2, addr2_base);
1155 
1156         // Check that a bad address returns an error.
1157         let bad_addr = GuestAddress(0x123456);
1158         assert!(mem.get_host_address(bad_addr).is_err());
1159     }
1160 
1161     #[test]
guest_to_host_range()1162     fn guest_to_host_range() {
1163         let start_addr1 = GuestAddress(0x0);
1164         let start_addr2 = GuestAddress(0x10000);
1165         let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1166 
1167         // Verify the host addresses match what we expect from the mappings.
1168         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1169         let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1170         let host_addr1 = mem.get_host_address_range(start_addr1, 0x10000).unwrap();
1171         let host_addr2 = mem.get_host_address_range(start_addr2, 0x10000).unwrap();
1172         assert_eq!(host_addr1, addr1_base);
1173         assert_eq!(host_addr2, addr2_base);
1174 
1175         let host_addr3 = mem.get_host_address_range(start_addr2, 0x20000).unwrap();
1176         assert_eq!(host_addr3, addr2_base);
1177 
1178         // Check that a valid guest address with an invalid size returns an error.
1179         assert!(mem.get_host_address_range(start_addr1, 0x20000).is_err());
1180 
1181         // Check that a bad address returns an error.
1182         let bad_addr = GuestAddress(0x123456);
1183         assert!(mem.get_host_address_range(bad_addr, 0x10000).is_err());
1184     }
1185 
1186     #[test]
shm_offset()1187     fn shm_offset() {
1188         let start_region1 = GuestAddress(0x0);
1189         let size_region1 = 0x10000;
1190         let start_region2 = GuestAddress(0x10000);
1191         let size_region2 = 0x20000;
1192         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1193             .unwrap();
1194 
1195         gm.write_obj_at_addr(0x1337u16, GuestAddress(0x0)).unwrap();
1196         gm.write_obj_at_addr(0x0420u16, GuestAddress(0x10000))
1197             .unwrap();
1198 
1199         for region in gm.regions() {
1200             let shm = match region.shm {
1201                 BackingObject::Shm(s) => s,
1202                 _ => {
1203                     panic!("backing object isn't SharedMemory");
1204                 }
1205             };
1206             let mmap = MemoryMappingBuilder::new(region.size)
1207                 .from_shared_memory(shm)
1208                 .offset(region.shm_offset)
1209                 .build()
1210                 .unwrap();
1211 
1212             if region.index == 0 {
1213                 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x1337u16);
1214             }
1215 
1216             if region.index == 1 {
1217                 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x0420u16);
1218             }
1219         }
1220     }
1221 
1222     #[test]
1223     // Disabled for non-x86 because test infra uses qemu-user, which doesn't support MADV_REMOVE.
1224     #[cfg(target_arch = "x86_64")]
snapshot_restore()1225     fn snapshot_restore() {
1226         let regions = &[
1227             // Hole at start.
1228             (GuestAddress(0x0), 0x10000),
1229             // Hole at end.
1230             (GuestAddress(0x10000), 0x10000),
1231             // Hole in middle.
1232             (GuestAddress(0x20000), 0x10000),
1233             // All holes.
1234             (GuestAddress(0x30000), 0x10000),
1235             // No holes.
1236             (GuestAddress(0x40000), 0x1000),
1237         ];
1238         let writes = &[
1239             (GuestAddress(0x0FFF0), 1u64),
1240             (GuestAddress(0x10000), 2u64),
1241             (GuestAddress(0x29000), 3u64),
1242             (GuestAddress(0x40000), 4u64),
1243         ];
1244 
1245         let gm = GuestMemory::new(regions).unwrap();
1246         for &(addr, value) in writes {
1247             gm.write_obj_at_addr(value, addr).unwrap();
1248         }
1249 
1250         let mut data = tempfile::tempfile().unwrap();
1251         // SAFETY:
1252         // no vm is running
1253         let metadata_json = unsafe { gm.snapshot(&mut data, false).unwrap() };
1254         let metadata: MemorySnapshotMetadata =
1255             serde_json::from_value(metadata_json.clone()).unwrap();
1256 
1257         #[cfg(unix)]
1258         assert_eq!(
1259             metadata,
1260             MemorySnapshotMetadata {
1261                 regions: vec![
1262                     MemoryRegionSnapshotMetadata {
1263                         guest_base: 0,
1264                         size: 0x10000,
1265                         data_ranges: vec![0x0F000..0x10000],
1266                     },
1267                     MemoryRegionSnapshotMetadata {
1268                         guest_base: 0x10000,
1269                         size: 0x10000,
1270                         data_ranges: vec![0x00000..0x01000],
1271                     },
1272                     MemoryRegionSnapshotMetadata {
1273                         guest_base: 0x20000,
1274                         size: 0x10000,
1275                         data_ranges: vec![0x09000..0x0A000],
1276                     },
1277                     MemoryRegionSnapshotMetadata {
1278                         guest_base: 0x30000,
1279                         size: 0x10000,
1280                         data_ranges: vec![],
1281                     },
1282                     MemoryRegionSnapshotMetadata {
1283                         guest_base: 0x40000,
1284                         size: 0x1000,
1285                         data_ranges: vec![0x00000..0x01000],
1286                     }
1287                 ],
1288                 compressed: false,
1289             }
1290         );
1291         // We can't detect the holes on Windows yet.
1292         #[cfg(windows)]
1293         assert_eq!(
1294             metadata,
1295             MemorySnapshotMetadata {
1296                 regions: vec![
1297                     MemoryRegionSnapshotMetadata {
1298                         guest_base: 0,
1299                         size: 0x10000,
1300                         data_ranges: vec![0x00000..0x10000],
1301                     },
1302                     MemoryRegionSnapshotMetadata {
1303                         guest_base: 0x10000,
1304                         size: 0x10000,
1305                         data_ranges: vec![0x00000..0x10000],
1306                     },
1307                     MemoryRegionSnapshotMetadata {
1308                         guest_base: 0x20000,
1309                         size: 0x10000,
1310                         data_ranges: vec![0x00000..0x10000],
1311                     },
1312                     MemoryRegionSnapshotMetadata {
1313                         guest_base: 0x30000,
1314                         size: 0x10000,
1315                         data_ranges: vec![0x00000..0x10000],
1316                     },
1317                     MemoryRegionSnapshotMetadata {
1318                         guest_base: 0x40000,
1319                         size: 0x1000,
1320                         data_ranges: vec![0x00000..0x01000],
1321                     }
1322                 ],
1323                 compressed: false,
1324             }
1325         );
1326 
1327         std::mem::drop(gm);
1328 
1329         let gm2 = GuestMemory::new(regions).unwrap();
1330 
1331         // Write to a hole so we can assert the restore zeroes it.
1332         let hole_addr = GuestAddress(0x30000);
1333         gm2.write_obj_at_addr(8u64, hole_addr).unwrap();
1334 
1335         use std::io::Seek;
1336         data.seek(std::io::SeekFrom::Start(0)).unwrap();
1337         // SAFETY:
1338         // no vm is running
1339         unsafe { gm2.restore(metadata_json, &mut data).unwrap() };
1340 
1341         assert_eq!(gm2.read_obj_from_addr::<u64>(hole_addr).unwrap(), 0);
1342         for &(addr, value) in writes {
1343             assert_eq!(gm2.read_obj_from_addr::<u64>(addr).unwrap(), value);
1344         }
1345     }
1346 }
1347