• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Track memory regions that are mapped to the guest VM.
6 
7 use std::convert::AsRef;
8 use std::convert::TryFrom;
9 use std::fs::File;
10 use std::io::Read;
11 use std::io::Write;
12 use std::marker::Send;
13 use std::marker::Sync;
14 use std::result;
15 use std::sync::Arc;
16 
17 use anyhow::bail;
18 use anyhow::Context;
19 use base::pagesize;
20 use base::AsRawDescriptor;
21 use base::AsRawDescriptors;
22 use base::Error as SysError;
23 use base::MappedRegion;
24 use base::MemoryMapping;
25 use base::MemoryMappingBuilder;
26 use base::MmapError;
27 use base::RawDescriptor;
28 use base::SharedMemory;
29 use base::VolatileMemory;
30 use base::VolatileMemoryError;
31 use base::VolatileSlice;
32 use cros_async::mem;
33 use cros_async::BackingMemory;
34 use remain::sorted;
35 use serde::Deserialize;
36 use serde::Serialize;
37 use serde_keyvalue::FromKeyValues;
38 use snapshot::AnySnapshot;
39 use thiserror::Error;
40 use zerocopy::FromBytes;
41 use zerocopy::Immutable;
42 use zerocopy::IntoBytes;
43 
44 use crate::guest_address::GuestAddress;
45 
46 mod sys;
47 pub use sys::MemoryPolicy;
48 
49 #[sorted]
50 #[derive(Error, Debug)]
51 pub enum Error {
52     #[error("failed to map guest memory to file: {0}")]
53     FiledBackedMemoryMappingFailed(#[source] MmapError),
54     #[error("failed to open file for file backed mapping: {0}")]
55     FiledBackedOpenFailed(#[source] std::io::Error),
56     #[error("invalid guest address {0}")]
57     InvalidGuestAddress(GuestAddress),
58     #[error("invalid offset {0}")]
59     InvalidOffset(u64),
60     #[error("size {0} must not be zero")]
61     InvalidSize(usize),
62     #[error("invalid guest memory access at addr={0}: {1}")]
63     MemoryAccess(GuestAddress, #[source] MmapError),
64     #[error("failed to set seals on shm region: {0}")]
65     MemoryAddSealsFailed(#[source] SysError),
66     #[error("failed to create shm region: {0}")]
67     MemoryCreationFailed(#[source] SysError),
68     #[error("failed to map guest memory: {0}")]
69     MemoryMappingFailed(#[source] MmapError),
70     #[error("guest memory region {0}+{1:#x} is not page aligned")]
71     MemoryNotAligned(GuestAddress, u64),
72     #[error("memory regions overlap")]
73     MemoryRegionOverlap,
74     #[error("memory region size {0} is too large")]
75     MemoryRegionTooLarge(u128),
76     #[error("incomplete read of {completed} instead of {expected} bytes")]
77     ShortRead { expected: usize, completed: usize },
78     #[error("incomplete write of {completed} instead of {expected} bytes")]
79     ShortWrite { expected: usize, completed: usize },
80     #[error("DescriptorChain split is out of bounds: {0}")]
81     SplitOutOfBounds(usize),
82     #[error("{0}")]
83     VolatileMemoryAccess(#[source] VolatileMemoryError),
84 }
85 
86 pub type Result<T> = result::Result<T, Error>;
87 
88 /// A file-like object backing `MemoryRegion`.
89 #[derive(Clone, Debug)]
90 pub enum BackingObject {
91     Shm(Arc<SharedMemory>),
92     File(Arc<File>),
93 }
94 
95 impl AsRawDescriptor for BackingObject {
as_raw_descriptor(&self) -> RawDescriptor96     fn as_raw_descriptor(&self) -> RawDescriptor {
97         match self {
98             Self::Shm(shm) => shm.as_raw_descriptor(),
99             Self::File(f) => f.as_raw_descriptor(),
100         }
101     }
102 }
103 
104 impl AsRef<dyn AsRawDescriptor + Sync + Send> for BackingObject {
as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static)105     fn as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static) {
106         match self {
107             BackingObject::Shm(shm) => shm.as_ref(),
108             BackingObject::File(f) => f.as_ref(),
109         }
110     }
111 }
112 
113 /// For MemoryRegion::regions
114 pub struct MemoryRegionInformation<'a> {
115     pub index: usize,
116     pub guest_addr: GuestAddress,
117     pub size: usize,
118     pub host_addr: usize,
119     pub shm: &'a BackingObject,
120     pub shm_offset: u64,
121     pub options: MemoryRegionOptions,
122 }
123 
124 #[sorted]
125 #[derive(Clone, Copy, Debug, Default, PartialOrd, PartialEq, Eq, Ord)]
126 pub enum MemoryRegionPurpose {
127     /// BIOS/firmware ROM
128     Bios,
129 
130     /// General purpose guest memory
131     #[default]
132     GuestMemoryRegion,
133 
134     /// PVMFW
135     ProtectedFirmwareRegion,
136 
137     /// An area that should be backed by a GuestMemory region but reported as reserved to the
138     /// guest.
139     ReservedMemory,
140 
141     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
142     StaticSwiotlbRegion,
143 }
144 
145 #[derive(Clone, Debug, Serialize, Deserialize, FromKeyValues, PartialEq, Eq, PartialOrd, Ord)]
146 #[serde(deny_unknown_fields)]
147 pub struct FileBackedMappingParameters {
148     pub path: std::path::PathBuf,
149     #[serde(rename = "addr")]
150     pub address: u64,
151     pub size: u64,
152     #[serde(default)]
153     pub offset: u64,
154     #[serde(rename = "rw", default)]
155     pub writable: bool,
156     #[serde(default)]
157     pub sync: bool,
158     #[serde(default)]
159     pub align: bool,
160     /// Whether the mapping is for RAM or MMIO.
161     #[serde(default)]
162     pub ram: bool,
163 }
164 
165 #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
166 pub struct MemoryRegionOptions {
167     /// Some hypervisors (presently: Gunyah) need explicit knowledge about
168     /// which memory region is used for protected firwmare, static swiotlb,
169     /// or general purpose guest memory.
170     pub purpose: MemoryRegionPurpose,
171     /// Alignment for the mapping of this region. This intends to be used for
172     /// arm64 KVM support where a block alignment is required for transparent
173     /// huge-pages support
174     pub align: u64,
175     /// Backing file params.
176     pub file_backed: Option<FileBackedMappingParameters>,
177 }
178 
179 impl MemoryRegionOptions {
new() -> MemoryRegionOptions180     pub fn new() -> MemoryRegionOptions {
181         Default::default()
182     }
183 
purpose(mut self, purpose: MemoryRegionPurpose) -> Self184     pub fn purpose(mut self, purpose: MemoryRegionPurpose) -> Self {
185         self.purpose = purpose;
186         self
187     }
188 
align(mut self, alignment: u64) -> Self189     pub fn align(mut self, alignment: u64) -> Self {
190         self.align = alignment;
191         self
192     }
193 
file_backed(mut self, params: FileBackedMappingParameters) -> Self194     pub fn file_backed(mut self, params: FileBackedMappingParameters) -> Self {
195         self.file_backed = Some(params);
196         self
197     }
198 }
199 
200 /// A regions of memory mapped memory.
201 /// Holds the memory mapping with its offset in guest memory.
202 /// Also holds the backing object for the mapping and the offset in that object of the mapping.
203 #[derive(Debug)]
204 pub struct MemoryRegion {
205     mapping: MemoryMapping,
206     guest_base: GuestAddress,
207 
208     shared_obj: BackingObject,
209     obj_offset: u64,
210 
211     options: MemoryRegionOptions,
212 }
213 
214 impl MemoryRegion {
215     /// Creates a new MemoryRegion using the given SharedMemory object to later be attached to a VM
216     /// at `guest_base` address in the guest.
new_from_shm( size: u64, guest_base: GuestAddress, offset: u64, shm: Arc<SharedMemory>, ) -> Result<Self>217     pub fn new_from_shm(
218         size: u64,
219         guest_base: GuestAddress,
220         offset: u64,
221         shm: Arc<SharedMemory>,
222     ) -> Result<Self> {
223         let mapping = MemoryMappingBuilder::new(size as usize)
224             .from_shared_memory(shm.as_ref())
225             .offset(offset)
226             .build()
227             .map_err(Error::MemoryMappingFailed)?;
228         Ok(MemoryRegion {
229             mapping,
230             guest_base,
231             shared_obj: BackingObject::Shm(shm),
232             obj_offset: offset,
233             options: Default::default(),
234         })
235     }
236 
237     /// Creates a new MemoryRegion using the given file to get available later at `guest_base`
238     /// address in the guest.
new_from_file( size: u64, guest_base: GuestAddress, offset: u64, file: Arc<File>, ) -> Result<Self>239     pub fn new_from_file(
240         size: u64,
241         guest_base: GuestAddress,
242         offset: u64,
243         file: Arc<File>,
244     ) -> Result<Self> {
245         let mapping = MemoryMappingBuilder::new(size as usize)
246             .from_file(&file)
247             .offset(offset)
248             .build()
249             .map_err(Error::MemoryMappingFailed)?;
250         Ok(MemoryRegion {
251             mapping,
252             guest_base,
253             shared_obj: BackingObject::File(file),
254             obj_offset: offset,
255             options: Default::default(),
256         })
257     }
258 
start(&self) -> GuestAddress259     fn start(&self) -> GuestAddress {
260         self.guest_base
261     }
262 
end(&self) -> GuestAddress263     fn end(&self) -> GuestAddress {
264         // unchecked_add is safe as the region bounds were checked when it was created.
265         self.guest_base.unchecked_add(self.mapping.size() as u64)
266     }
267 
contains(&self, addr: GuestAddress) -> bool268     fn contains(&self, addr: GuestAddress) -> bool {
269         addr >= self.guest_base && addr < self.end()
270     }
271 }
272 
273 /// Tracks memory regions and where they are mapped in the guest, along with shm
274 /// descriptors of the underlying memory regions.
275 #[derive(Clone, Debug)]
276 pub struct GuestMemory {
277     regions: Arc<[MemoryRegion]>,
278     locked: bool,
279 }
280 
281 impl AsRawDescriptors for GuestMemory {
282     /// USE WITH CAUTION, the descriptors returned here are not necessarily
283     /// files!
as_raw_descriptors(&self) -> Vec<RawDescriptor>284     fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
285         self.regions
286             .iter()
287             .map(|r| r.shared_obj.as_raw_descriptor())
288             .collect()
289     }
290 }
291 
292 impl GuestMemory {
293     /// Creates backing shm for GuestMemory regions
create_shm(ranges: &[(GuestAddress, u64, MemoryRegionOptions)]) -> Result<SharedMemory>294     fn create_shm(ranges: &[(GuestAddress, u64, MemoryRegionOptions)]) -> Result<SharedMemory> {
295         let mut aligned_size = 0;
296         let pg_size = pagesize();
297         for range in ranges {
298             if range.2.file_backed.is_some() {
299                 // Regions with a backing file don't use part of the `SharedMemory`.
300                 continue;
301             }
302             if range.1 % pg_size as u64 != 0 {
303                 return Err(Error::MemoryNotAligned(range.0, range.1));
304             }
305 
306             aligned_size += range.1;
307         }
308 
309         // NOTE: Some tests rely on the GuestMemory's name when capturing metrics.
310         let name = "crosvm_guest";
311         // Shm must be mut even though it is only updated on Unix systems.
312         #[allow(unused_mut)]
313         let mut shm = SharedMemory::new(name, aligned_size).map_err(Error::MemoryCreationFailed)?;
314 
315         sys::finalize_shm(&mut shm)?;
316 
317         Ok(shm)
318     }
319 
320     /// Creates a container for guest memory regions.
321     /// Valid memory regions are specified as a Vec of (Address, Size, MemoryRegionOptions)
new_with_options( ranges: &[(GuestAddress, u64, MemoryRegionOptions)], ) -> Result<GuestMemory>322     pub fn new_with_options(
323         ranges: &[(GuestAddress, u64, MemoryRegionOptions)],
324     ) -> Result<GuestMemory> {
325         // Create shm
326         let shm = Arc::new(GuestMemory::create_shm(ranges)?);
327 
328         // Create memory regions
329         let mut regions = Vec::<MemoryRegion>::new();
330         let mut shm_offset = 0;
331 
332         for range in ranges {
333             if let Some(last) = regions.last() {
334                 if last
335                     .guest_base
336                     .checked_add(last.mapping.size() as u64)
337                     .map_or(true, |a| a > range.0)
338                 {
339                     return Err(Error::MemoryRegionOverlap);
340                 }
341             }
342 
343             let size = usize::try_from(range.1)
344                 .map_err(|_| Error::MemoryRegionTooLarge(range.1 as u128))?;
345             if let Some(file_backed) = &range.2.file_backed {
346                 assert_eq!(usize::try_from(file_backed.size).unwrap(), size);
347                 let file = file_backed.open().map_err(Error::FiledBackedOpenFailed)?;
348                 let mapping = MemoryMappingBuilder::new(size)
349                     .from_file(&file)
350                     .offset(file_backed.offset)
351                     .align(range.2.align)
352                     .protection(if file_backed.writable {
353                         base::Protection::read_write()
354                     } else {
355                         base::Protection::read()
356                     })
357                     .build()
358                     .map_err(Error::FiledBackedMemoryMappingFailed)?;
359                 regions.push(MemoryRegion {
360                     mapping,
361                     guest_base: range.0,
362                     shared_obj: BackingObject::File(Arc::new(file)),
363                     obj_offset: file_backed.offset,
364                     options: range.2.clone(),
365                 });
366             } else {
367                 let mapping = MemoryMappingBuilder::new(size)
368                     .from_shared_memory(shm.as_ref())
369                     .offset(shm_offset)
370                     .align(range.2.align)
371                     .build()
372                     .map_err(Error::MemoryMappingFailed)?;
373                 regions.push(MemoryRegion {
374                     mapping,
375                     guest_base: range.0,
376                     shared_obj: BackingObject::Shm(shm.clone()),
377                     obj_offset: shm_offset,
378                     options: range.2.clone(),
379                 });
380                 shm_offset += size as u64;
381             }
382         }
383 
384         Ok(GuestMemory {
385             regions: Arc::from(regions),
386             locked: false,
387         })
388     }
389 
390     /// Creates a container for guest memory regions.
391     /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory>392     pub fn new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory> {
393         GuestMemory::new_with_options(
394             ranges
395                 .iter()
396                 .map(|(addr, size)| (*addr, *size, Default::default()))
397                 .collect::<Vec<(GuestAddress, u64, MemoryRegionOptions)>>()
398                 .as_slice(),
399         )
400     }
401 
402     /// Creates a `GuestMemory` from a collection of MemoryRegions.
from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self>403     pub fn from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self> {
404         // Sort the regions and ensure non overlap.
405         regions.sort_by(|a, b| a.guest_base.cmp(&b.guest_base));
406 
407         if regions.len() > 1 {
408             let mut prev_end = regions[0]
409                 .guest_base
410                 .checked_add(regions[0].mapping.size() as u64)
411                 .ok_or(Error::MemoryRegionOverlap)?;
412             for region in &regions[1..] {
413                 if prev_end > region.guest_base {
414                     return Err(Error::MemoryRegionOverlap);
415                 }
416                 prev_end = region
417                     .guest_base
418                     .checked_add(region.mapping.size() as u64)
419                     .ok_or(Error::MemoryRegionTooLarge(
420                         region.guest_base.0 as u128 + region.mapping.size() as u128,
421                     ))?;
422             }
423         }
424 
425         Ok(GuestMemory {
426             regions: Arc::from(regions),
427             locked: false,
428         })
429     }
430 
431     // Whether `MemoryPolicy::LOCK_GUEST_MEMORY` was set.
locked(&self) -> bool432     pub fn locked(&self) -> bool {
433         self.locked
434     }
435 
436     /// Returns the end address of memory.
437     ///
438     /// # Examples
439     ///
440     /// ```
441     /// # use base::MemoryMapping;
442     /// # use vm_memory::{GuestAddress, GuestMemory};
443     /// # fn test_end_addr() -> Result<(), ()> {
444     ///     let start_addr = GuestAddress(0x1000);
445     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
446     ///     assert_eq!(start_addr.checked_add(0x400), Some(gm.end_addr()));
447     ///     Ok(())
448     /// # }
449     /// ```
end_addr(&self) -> GuestAddress450     pub fn end_addr(&self) -> GuestAddress {
451         self.regions
452             .iter()
453             .max_by_key(|region| region.start())
454             .map_or(GuestAddress(0), MemoryRegion::end)
455     }
456 
457     /// Returns the guest addresses and sizes of the memory regions.
guest_memory_regions(&self) -> Vec<(GuestAddress, usize)>458     pub fn guest_memory_regions(&self) -> Vec<(GuestAddress, usize)> {
459         self.regions
460             .iter()
461             .map(|region| (region.guest_base, region.mapping.size()))
462             .collect()
463     }
464 
465     /// Returns the total size of memory in bytes.
memory_size(&self) -> u64466     pub fn memory_size(&self) -> u64 {
467         self.regions
468             .iter()
469             .map(|region| region.mapping.size() as u64)
470             .sum()
471     }
472 
473     /// Returns true if the given address is within the memory range available to the guest.
address_in_range(&self, addr: GuestAddress) -> bool474     pub fn address_in_range(&self, addr: GuestAddress) -> bool {
475         self.regions.iter().any(|region| region.contains(addr))
476     }
477 
478     /// Returns true if the given range (start, end) is overlap with the memory range
479     /// available to the guest.
range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool480     pub fn range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool {
481         self.regions
482             .iter()
483             .any(|region| region.start() < end && start < region.end())
484     }
485 
486     /// Returns an address `addr + offset` if it's in range.
487     ///
488     /// This function doesn't care whether a region `[addr, addr + offset)` is in range or not. To
489     /// guarantee it's a valid range, use `is_valid_range()` instead.
checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress>490     pub fn checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress> {
491         addr.checked_add(offset).and_then(|a| {
492             if self.address_in_range(a) {
493                 Some(a)
494             } else {
495                 None
496             }
497         })
498     }
499 
500     /// Returns true if the given range `[start, start + length)` is a valid contiguous memory
501     /// range available to the guest and it's backed by a single underlying memory region.
is_valid_range(&self, start: GuestAddress, length: u64) -> bool502     pub fn is_valid_range(&self, start: GuestAddress, length: u64) -> bool {
503         if length == 0 {
504             return false;
505         }
506 
507         let end = if let Some(end) = start.checked_add(length - 1) {
508             end
509         } else {
510             return false;
511         };
512 
513         self.regions
514             .iter()
515             .any(|region| region.start() <= start && end < region.end())
516     }
517 
518     /// Returns the size of the memory region in bytes.
num_regions(&self) -> u64519     pub fn num_regions(&self) -> u64 {
520         self.regions.len() as u64
521     }
522 
regions(&self) -> impl Iterator<Item = MemoryRegionInformation>523     pub fn regions(&self) -> impl Iterator<Item = MemoryRegionInformation> {
524         self.regions
525             .iter()
526             .enumerate()
527             .map(|(index, region)| MemoryRegionInformation {
528                 index,
529                 guest_addr: region.start(),
530                 size: region.mapping.size(),
531                 host_addr: region.mapping.as_ptr() as usize,
532                 shm: &region.shared_obj,
533                 shm_offset: region.obj_offset,
534                 options: region.options.clone(),
535             })
536     }
537 
538     /// Writes a slice to guest memory at the specified guest address.
539     /// Returns the number of bytes written.  The number of bytes written can
540     /// be less than the length of the slice if there isn't enough room in the
541     /// memory region.
542     ///
543     /// # Examples
544     /// * Write a slice at guestaddress 0x200.
545     ///
546     /// ```
547     /// # use base::MemoryMapping;
548     /// # use vm_memory::{GuestAddress, GuestMemory};
549     /// # fn test_write_u64() -> Result<(), ()> {
550     /// #   let start_addr = GuestAddress(0x1000);
551     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
552     ///     let res = gm.write_at_addr(&[1,2,3,4,5], GuestAddress(0x200)).map_err(|_| ())?;
553     ///     assert_eq!(5, res);
554     ///     Ok(())
555     /// # }
556     /// ```
write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize>557     pub fn write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize> {
558         let (mapping, offset, _) = self.find_region(guest_addr)?;
559         mapping
560             .write_slice(buf, offset)
561             .map_err(|e| Error::MemoryAccess(guest_addr, e))
562     }
563 
564     /// Writes the entire contents of a slice to guest memory at the specified
565     /// guest address.
566     ///
567     /// Returns an error if there isn't enough room in the memory region to
568     /// complete the entire write. Part of the data may have been written
569     /// nevertheless.
570     ///
571     /// # Examples
572     ///
573     /// ```
574     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
575     ///
576     /// fn test_write_all() -> guest_memory::Result<()> {
577     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
578     ///     let gm = GuestMemory::new(ranges)?;
579     ///     gm.write_all_at_addr(b"zyxwvut", GuestAddress(0x1200))
580     /// }
581     /// ```
write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()>582     pub fn write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()> {
583         let expected = buf.len();
584         let completed = self.write_at_addr(buf, guest_addr)?;
585         if expected == completed {
586             Ok(())
587         } else {
588             Err(Error::ShortWrite {
589                 expected,
590                 completed,
591             })
592         }
593     }
594 
595     /// Reads to a slice from guest memory at the specified guest address.
596     /// Returns the number of bytes read.  The number of bytes read can
597     /// be less than the length of the slice if there isn't enough room in the
598     /// memory region.
599     ///
600     /// # Examples
601     /// * Read a slice of length 16 at guestaddress 0x200.
602     ///
603     /// ```
604     /// # use base::MemoryMapping;
605     /// # use vm_memory::{GuestAddress, GuestMemory};
606     /// # fn test_write_u64() -> Result<(), ()> {
607     /// #   let start_addr = GuestAddress(0x1000);
608     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
609     ///     let buf = &mut [0u8; 16];
610     ///     let res = gm.read_at_addr(buf, GuestAddress(0x200)).map_err(|_| ())?;
611     ///     assert_eq!(16, res);
612     ///     Ok(())
613     /// # }
614     /// ```
read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize>615     pub fn read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize> {
616         let (mapping, offset, _) = self.find_region(guest_addr)?;
617         mapping
618             .read_slice(buf, offset)
619             .map_err(|e| Error::MemoryAccess(guest_addr, e))
620     }
621 
622     /// Reads from guest memory at the specified address to fill the entire
623     /// buffer.
624     ///
625     /// Returns an error if there isn't enough room in the memory region to fill
626     /// the entire buffer. Part of the buffer may have been filled nevertheless.
627     ///
628     /// # Examples
629     ///
630     /// ```
631     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
632     ///
633     /// fn test_read_exact() -> guest_memory::Result<()> {
634     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
635     ///     let gm = GuestMemory::new(ranges)?;
636     ///     let mut buffer = [0u8; 0x200];
637     ///     gm.read_exact_at_addr(&mut buffer, GuestAddress(0x1200))
638     /// }
639     /// ```
read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()>640     pub fn read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()> {
641         let expected = buf.len();
642         let completed = self.read_at_addr(buf, guest_addr)?;
643         if expected == completed {
644             Ok(())
645         } else {
646             Err(Error::ShortRead {
647                 expected,
648                 completed,
649             })
650         }
651     }
652 
653     /// Reads an object from guest memory at the given guest address.
654     ///
655     /// # Examples
656     /// * Read a u64 from two areas of guest memory backed by separate mappings.
657     ///
658     /// ```
659     /// # use vm_memory::{GuestAddress, GuestMemory};
660     /// # fn test_read_u64() -> Result<u64, ()> {
661     /// #     let start_addr1 = GuestAddress(0x0);
662     /// #     let start_addr2 = GuestAddress(0x400);
663     /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
664     /// #         .map_err(|_| ())?;
665     ///       let num1: u64 = gm.read_obj_from_addr(GuestAddress(32)).map_err(|_| ())?;
666     ///       let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x400+32)).map_err(|_| ())?;
667     /// #     Ok(num1 + num2)
668     /// # }
669     /// ```
read_obj_from_addr<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T>670     pub fn read_obj_from_addr<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
671         let (mapping, offset, _) = self.find_region(guest_addr)?;
672         mapping
673             .read_obj(offset)
674             .map_err(|e| Error::MemoryAccess(guest_addr, e))
675     }
676 
677     /// Reads an object from guest memory at the given guest address.
678     /// Reading from a volatile area isn't strictly safe as it could change
679     /// mid-read.  However, as long as the type T is plain old data and can
680     /// handle random initialization, everything will be OK.
681     ///
682     /// The read operation will be volatile, i.e. it will not be reordered by
683     /// the compiler and is suitable for I/O, but must be aligned. When reading
684     /// from regular memory, prefer [`GuestMemory::read_obj_from_addr`].
685     ///
686     /// # Examples
687     /// * Read a u64 from two areas of guest memory backed by separate mappings.
688     ///
689     /// ```
690     /// # use vm_memory::{GuestAddress, GuestMemory};
691     /// # fn test_read_u64() -> Result<u64, ()> {
692     /// #     let start_addr1 = GuestAddress(0x0);
693     /// #     let start_addr2 = GuestAddress(0x400);
694     /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
695     /// #         .map_err(|_| ())?;
696     ///       let num1: u64 = gm.read_obj_from_addr_volatile(GuestAddress(32)).map_err(|_| ())?;
697     ///       let num2: u64 = gm.read_obj_from_addr_volatile(GuestAddress(0x400+32)).map_err(|_| ())?;
698     /// #     Ok(num1 + num2)
699     /// # }
700     /// ```
read_obj_from_addr_volatile<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T>701     pub fn read_obj_from_addr_volatile<T: FromBytes>(&self, guest_addr: GuestAddress) -> Result<T> {
702         let (mapping, offset, _) = self.find_region(guest_addr)?;
703         mapping
704             .read_obj_volatile(offset)
705             .map_err(|e| Error::MemoryAccess(guest_addr, e))
706     }
707 
708     /// Writes an object to the memory region at the specified guest address.
709     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
710     ///
711     /// # Examples
712     /// * Write a u64 at guest address 0x1100.
713     ///
714     /// ```
715     /// # use vm_memory::{GuestAddress, GuestMemory};
716     /// # fn test_write_u64() -> Result<(), ()> {
717     /// #   let start_addr = GuestAddress(0x1000);
718     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
719     ///     gm.write_obj_at_addr(55u64, GuestAddress(0x1100))
720     ///         .map_err(|_| ())
721     /// # }
722     /// ```
write_obj_at_addr<T: IntoBytes + Immutable>( &self, val: T, guest_addr: GuestAddress, ) -> Result<()>723     pub fn write_obj_at_addr<T: IntoBytes + Immutable>(
724         &self,
725         val: T,
726         guest_addr: GuestAddress,
727     ) -> Result<()> {
728         let (mapping, offset, _) = self.find_region(guest_addr)?;
729         mapping
730             .write_obj(val, offset)
731             .map_err(|e| Error::MemoryAccess(guest_addr, e))
732     }
733 
734     /// Writes an object to the memory region at the specified guest address.
735     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
736     ///
737     /// The write operation will be volatile, i.e. it will not be reordered by
738     /// the compiler and is suitable for I/O, but must be aligned. When writing
739     /// to regular memory, prefer [`GuestMemory::write_obj_at_addr`].
740     /// # Examples
741     /// * Write a u64 at guest address 0x1100.
742     ///
743     /// ```
744     /// # use vm_memory::{GuestAddress, GuestMemory};
745     /// # fn test_write_u64() -> Result<(), ()> {
746     /// #   let start_addr = GuestAddress(0x1000);
747     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
748     ///     gm.write_obj_at_addr_volatile(55u64, GuestAddress(0x1100))
749     ///         .map_err(|_| ())
750     /// # }
751     /// ```
write_obj_at_addr_volatile<T: IntoBytes + Immutable>( &self, val: T, guest_addr: GuestAddress, ) -> Result<()>752     pub fn write_obj_at_addr_volatile<T: IntoBytes + Immutable>(
753         &self,
754         val: T,
755         guest_addr: GuestAddress,
756     ) -> Result<()> {
757         let (mapping, offset, _) = self.find_region(guest_addr)?;
758         mapping
759             .write_obj_volatile(val, offset)
760             .map_err(|e| Error::MemoryAccess(guest_addr, e))
761     }
762 
763     /// Returns a `VolatileSlice` of `len` bytes starting at `addr`. Returns an error if the slice
764     /// is not a subset of this `GuestMemory`.
765     ///
766     /// # Examples
767     /// * Write `99` to 30 bytes starting at guest address 0x1010.
768     ///
769     /// ```
770     /// # use base::MemoryMapping;
771     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
772     /// # fn test_volatile_slice() -> Result<(), GuestMemoryError> {
773     /// #   let start_addr = GuestAddress(0x1000);
774     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)])?;
775     ///     let vslice = gm.get_slice_at_addr(GuestAddress(0x1010), 30)?;
776     ///     vslice.write_bytes(99);
777     /// #   Ok(())
778     /// # }
779     /// ```
get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice>780     pub fn get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice> {
781         self.regions
782             .iter()
783             .find(|region| region.contains(addr))
784             .ok_or(Error::InvalidGuestAddress(addr))
785             .and_then(|region| {
786                 // The cast to a usize is safe here because we know that `region.contains(addr)` and
787                 // it's not possible for a memory region to be larger than what fits in a usize.
788                 region
789                     .mapping
790                     .get_slice(addr.offset_from(region.start()) as usize, len)
791                     .map_err(Error::VolatileMemoryAccess)
792             })
793     }
794     /// Convert a GuestAddress into a pointer in the address space of this
795     /// process. This should only be necessary for giving addresses to the
796     /// kernel, as with vhost ioctls. Normal reads/writes to guest memory should
797     /// be done through `write_obj_at_addr`, `read_obj_from_addr`, etc.
798     ///
799     /// # Arguments
800     /// * `guest_addr` - Guest address to convert.
801     ///
802     /// # Examples
803     ///
804     /// ```
805     /// # use vm_memory::{GuestAddress, GuestMemory};
806     /// # fn test_host_addr() -> Result<(), ()> {
807     ///     let start_addr = GuestAddress(0x1000);
808     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
809     ///     let addr = gm.get_host_address(GuestAddress(0x1200)).unwrap();
810     ///     println!("Host address is {:p}", addr);
811     ///     Ok(())
812     /// # }
813     /// ```
get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8>814     pub fn get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8> {
815         let (mapping, offset, _) = self.find_region(guest_addr)?;
816         Ok(
817             // SAFETY:
818             // This is safe; `find_region` already checks that offset is in
819             // bounds.
820             unsafe { mapping.as_ptr().add(offset) } as *const u8,
821         )
822     }
823 
824     /// Convert a GuestAddress into a pointer in the address space of this
825     /// process, and verify that the provided size define a valid range within
826     /// a single memory region. Similar to get_host_address(), this should only
827     /// be used for giving addresses to the kernel.
828     ///
829     /// # Arguments
830     /// * `guest_addr` - Guest address to convert.
831     /// * `size` - Size of the address range to be converted.
832     ///
833     /// # Examples
834     ///
835     /// ```
836     /// # use vm_memory::{GuestAddress, GuestMemory};
837     /// # fn test_host_addr() -> Result<(), ()> {
838     ///     let start_addr = GuestAddress(0x1000);
839     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
840     ///     let addr = gm.get_host_address_range(GuestAddress(0x1200), 0x200).unwrap();
841     ///     println!("Host address is {:p}", addr);
842     ///     Ok(())
843     /// # }
844     /// ```
get_host_address_range( &self, guest_addr: GuestAddress, size: usize, ) -> Result<*const u8>845     pub fn get_host_address_range(
846         &self,
847         guest_addr: GuestAddress,
848         size: usize,
849     ) -> Result<*const u8> {
850         if size == 0 {
851             return Err(Error::InvalidSize(size));
852         }
853 
854         // Assume no overlap among regions
855         let (mapping, offset, _) = self.find_region(guest_addr)?;
856 
857         if mapping
858             .size()
859             .checked_sub(offset)
860             .map_or(true, |v| v < size)
861         {
862             return Err(Error::InvalidGuestAddress(guest_addr));
863         }
864 
865         Ok(
866             //SAFETY:
867             // This is safe; `find_region` already checks that offset is in
868             // bounds.
869             unsafe { mapping.as_ptr().add(offset) } as *const u8,
870         )
871     }
872 
873     /// Returns a reference to the region that backs the given address.
shm_region( &self, guest_addr: GuestAddress, ) -> Result<&(dyn AsRawDescriptor + Send + Sync)>874     pub fn shm_region(
875         &self,
876         guest_addr: GuestAddress,
877     ) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
878         self.regions
879             .iter()
880             .find(|region| region.contains(guest_addr))
881             .ok_or(Error::InvalidGuestAddress(guest_addr))
882             .map(|region| region.shared_obj.as_ref())
883     }
884 
885     /// Returns the region that contains the memory at `offset` from the base of guest memory.
offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)>886     pub fn offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
887         self.shm_region(
888             self.checked_offset(self.regions[0].guest_base, offset)
889                 .ok_or(Error::InvalidOffset(offset))?,
890         )
891     }
892 
893     /// Loops over all guest memory regions of `self`, and returns the
894     /// target region that contains `guest_addr`. On success, this
895     /// function returns a tuple with the following fields:
896     ///
897     /// (i) the memory mapping associated with the target region.
898     /// (ii) the relative offset from the start of the target region to `guest_addr`.
899     /// (iii) the absolute offset from the start of the backing object to the target region.
900     ///
901     /// If no target region is found, an error is returned.
find_region(&self, guest_addr: GuestAddress) -> Result<(&MemoryMapping, usize, u64)>902     pub fn find_region(&self, guest_addr: GuestAddress) -> Result<(&MemoryMapping, usize, u64)> {
903         self.regions
904             .iter()
905             .find(|region| region.contains(guest_addr))
906             .ok_or(Error::InvalidGuestAddress(guest_addr))
907             .map(|region| {
908                 (
909                     &region.mapping,
910                     guest_addr.offset_from(region.start()) as usize,
911                     region.obj_offset,
912                 )
913             })
914     }
915 
916     /// Convert a GuestAddress into an offset within the associated shm region.
917     ///
918     /// A `GuestMemory` may have multiple backing objects and the offset is
919     /// only meaningful in relation to the associated backing object, so a
920     /// reference to it is included in the return value.
921     ///
922     /// Due to potential gaps within GuestMemory, it is helpful to know the
923     /// offset within the shm where a given address is found. This offset
924     /// can then be passed to another process mapping the shm to read data
925     /// starting at that address.
926     ///
927     /// # Arguments
928     /// * `guest_addr` - Guest address to convert.
929     ///
930     /// # Examples
931     ///
932     /// ```
933     /// # use vm_memory::{GuestAddress, GuestMemory};
934     /// let addr_a = GuestAddress(0x10000);
935     /// let addr_b = GuestAddress(0x80000);
936     /// let mut gm = GuestMemory::new(&vec![
937     ///     (addr_a, 0x20000),
938     ///     (addr_b, 0x30000)]).expect("failed to create GuestMemory");
939     /// let (_backing_object, offset) = gm.offset_from_base(GuestAddress(0x95000))
940     ///                .expect("failed to get offset");
941     /// assert_eq!(offset, 0x35000);
942     /// ```
offset_from_base( &self, guest_addr: GuestAddress, ) -> Result<(&(dyn AsRawDescriptor + Send + Sync), u64)>943     pub fn offset_from_base(
944         &self,
945         guest_addr: GuestAddress,
946     ) -> Result<(&(dyn AsRawDescriptor + Send + Sync), u64)> {
947         self.regions
948             .iter()
949             .find(|region| region.contains(guest_addr))
950             .ok_or(Error::InvalidGuestAddress(guest_addr))
951             .map(|region| {
952                 (
953                     region.shared_obj.as_ref(),
954                     region.obj_offset + guest_addr.offset_from(region.start()),
955                 )
956             })
957     }
958 
959     /// Copy all guest memory into `w`.
960     ///
961     /// # Safety
962     /// Must have exclusive access to the guest memory for the duration of the
963     /// call (e.g. all vCPUs and devices must be stopped).
964     ///
965     /// Returns a JSON object that contains metadata about the underlying memory regions to allow
966     /// validation checks at restore time.
967     #[deny(unsafe_op_in_unsafe_fn)]
snapshot<T: Write>( &self, w: &mut T, compress: bool, ) -> anyhow::Result<AnySnapshot>968     pub unsafe fn snapshot<T: Write>(
969         &self,
970         w: &mut T,
971         compress: bool,
972     ) -> anyhow::Result<AnySnapshot> {
973         fn go(
974             this: &GuestMemory,
975             w: &mut impl Write,
976         ) -> anyhow::Result<Vec<MemoryRegionSnapshotMetadata>> {
977             let mut regions = Vec::new();
978             for region in this.regions.iter() {
979                 let data_ranges = region
980                     .find_data_ranges()
981                     .context("find_data_ranges failed")?;
982                 for range in &data_ranges {
983                     let region_vslice = region
984                         .mapping
985                         .get_slice(range.start, range.end - range.start)?;
986                     // SAFETY:
987                     // 1. The data is guaranteed to be present & of expected length by the
988                     //    `VolatileSlice`.
989                     // 2. Aliasing the `VolatileSlice`'s memory is safe because a. The only mutable
990                     //    reference to it is held by the guest, and the guest's VCPUs are stopped
991                     //    (guaranteed by caller), so that mutable reference can be ignored (aliasing
992                     //    is only an issue if temporal overlap occurs, and it does not here). b.
993                     //    Some host code does manipulate guest memory through raw pointers. This
994                     //    aliases the underlying memory of the slice, so we must ensure that host
995                     //    code is not running (the caller guarantees this).
996                     w.write_all(unsafe {
997                         std::slice::from_raw_parts(region_vslice.as_ptr(), region_vslice.size())
998                     })?;
999                 }
1000                 regions.push(MemoryRegionSnapshotMetadata {
1001                     guest_base: region.guest_base.0,
1002                     size: region.mapping.size(),
1003                     data_ranges,
1004                 });
1005             }
1006             Ok(regions)
1007         }
1008 
1009         let regions = if compress {
1010             let mut w = lz4_flex::frame::FrameEncoder::new(w);
1011             let regions = go(self, &mut w)?;
1012             w.finish()?;
1013             regions
1014         } else {
1015             go(self, w)?
1016         };
1017 
1018         AnySnapshot::to_any(MemorySnapshotMetadata {
1019             regions,
1020             compressed: compress,
1021         })
1022     }
1023 
1024     /// Restore the guest memory using the bytes from `r`.
1025     ///
1026     /// # Safety
1027     /// Must have exclusive access to the guest memory for the duration of the
1028     /// call (e.g. all vCPUs and devices must be stopped).
1029     ///
1030     /// Returns an error if `metadata` doesn't match the configuration of the `GuestMemory` or if
1031     /// `r` doesn't produce exactly as many bytes as needed.
1032     #[deny(unsafe_op_in_unsafe_fn)]
restore<T: Read>(&self, metadata: AnySnapshot, r: &mut T) -> anyhow::Result<()>1033     pub unsafe fn restore<T: Read>(&self, metadata: AnySnapshot, r: &mut T) -> anyhow::Result<()> {
1034         let metadata: MemorySnapshotMetadata = AnySnapshot::from_any(metadata)?;
1035 
1036         let mut r: Box<dyn Read> = if metadata.compressed {
1037             Box::new(lz4_flex::frame::FrameDecoder::new(r))
1038         } else {
1039             Box::new(r)
1040         };
1041 
1042         if self.regions.len() != metadata.regions.len() {
1043             bail!(
1044                 "snapshot expected {} memory regions but VM has {}",
1045                 metadata.regions.len(),
1046                 self.regions.len()
1047             );
1048         }
1049         for (region, metadata) in self.regions.iter().zip(metadata.regions.iter()) {
1050             let MemoryRegionSnapshotMetadata {
1051                 guest_base,
1052                 size,
1053                 data_ranges,
1054             } = metadata;
1055             if region.guest_base.0 != *guest_base || region.mapping.size() != *size {
1056                 bail!("snapshot memory regions don't match VM memory regions");
1057             }
1058 
1059             let mut prev_end = 0;
1060             for range in data_ranges {
1061                 let hole_size = range
1062                     .start
1063                     .checked_sub(prev_end)
1064                     .context("invalid data range")?;
1065                 if hole_size > 0 {
1066                     region.zero_range(prev_end, hole_size)?;
1067                 }
1068                 let region_vslice = region
1069                     .mapping
1070                     .get_slice(range.start, range.end - range.start)?;
1071 
1072                 // SAFETY:
1073                 // See `Self::snapshot` for the detailed safety statement, and
1074                 // note that both mutable and non-mutable aliasing is safe.
1075                 r.read_exact(unsafe {
1076                     std::slice::from_raw_parts_mut(region_vslice.as_mut_ptr(), region_vslice.size())
1077                 })?;
1078 
1079                 prev_end = range.end;
1080             }
1081             let hole_size = region
1082                 .mapping
1083                 .size()
1084                 .checked_sub(prev_end)
1085                 .context("invalid data range")?;
1086             if hole_size > 0 {
1087                 region.zero_range(prev_end, hole_size)?;
1088             }
1089         }
1090 
1091         // Should always be at EOF at this point.
1092         let mut buf = [0];
1093         if r.read(&mut buf)? != 0 {
1094             bail!("too many bytes");
1095         }
1096 
1097         Ok(())
1098     }
1099 }
1100 
1101 #[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1102 struct MemorySnapshotMetadata {
1103     regions: Vec<MemoryRegionSnapshotMetadata>,
1104     compressed: bool,
1105 }
1106 
1107 #[derive(Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
1108 struct MemoryRegionSnapshotMetadata {
1109     guest_base: u64,
1110     size: usize,
1111     // Ranges of the mmap that are stored in the snapshot file. All other ranges of the region are
1112     // zeros.
1113     data_ranges: Vec<std::ops::Range<usize>>,
1114 }
1115 
1116 // SAFETY:
1117 // It is safe to implement BackingMemory because GuestMemory can be mutated any time already.
1118 unsafe impl BackingMemory for GuestMemory {
get_volatile_slice( &self, mem_range: cros_async::MemRegion, ) -> mem::Result<VolatileSlice<'_>>1119     fn get_volatile_slice(
1120         &self,
1121         mem_range: cros_async::MemRegion,
1122     ) -> mem::Result<VolatileSlice<'_>> {
1123         self.get_slice_at_addr(GuestAddress(mem_range.offset), mem_range.len)
1124             .map_err(|_| mem::Error::InvalidOffset(mem_range.offset, mem_range.len))
1125     }
1126 }
1127 
1128 #[cfg(test)]
1129 mod tests {
1130     use super::*;
1131 
1132     #[test]
test_alignment()1133     fn test_alignment() {
1134         let start_addr1 = GuestAddress(0x0);
1135         let start_addr2 = GuestAddress(0x10000);
1136 
1137         assert!(GuestMemory::new(&[(start_addr1, 0x100), (start_addr2, 0x400)]).is_err());
1138         assert!(GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).is_ok());
1139     }
1140 
1141     #[test]
two_regions()1142     fn two_regions() {
1143         let start_addr1 = GuestAddress(0x0);
1144         let start_addr2 = GuestAddress(0x10000);
1145         // The memory regions are `[0x0, 0x10000)`, `[0x10000, 0x20000)`.
1146         let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1147 
1148         // Although each address in `[0x0, 0x20000)` is valid, `is_valid_range()` returns false for
1149         // a range that is across multiple underlying regions.
1150         assert!(gm.is_valid_range(GuestAddress(0x5000), 0x5000));
1151         assert!(gm.is_valid_range(GuestAddress(0x10000), 0x5000));
1152         assert!(!gm.is_valid_range(GuestAddress(0x5000), 0x10000));
1153     }
1154 
1155     #[test]
overlap_memory()1156     fn overlap_memory() {
1157         let start_addr1 = GuestAddress(0x0);
1158         let start_addr2 = GuestAddress(0x10000);
1159         assert!(GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).is_err());
1160     }
1161 
1162     #[test]
region_hole()1163     fn region_hole() {
1164         let start_addr1 = GuestAddress(0x0);
1165         let start_addr2 = GuestAddress(0x40000);
1166         // The memory regions are `[0x0, 0x20000)`, `[0x40000, 0x60000)`.
1167         let gm = GuestMemory::new(&[(start_addr1, 0x20000), (start_addr2, 0x20000)]).unwrap();
1168 
1169         assert!(gm.address_in_range(GuestAddress(0x10000)));
1170         assert!(!gm.address_in_range(GuestAddress(0x30000)));
1171         assert!(gm.address_in_range(GuestAddress(0x50000)));
1172         assert!(!gm.address_in_range(GuestAddress(0x60000)));
1173         assert!(!gm.address_in_range(GuestAddress(0x60000)));
1174         assert!(gm.range_overlap(GuestAddress(0x10000), GuestAddress(0x30000)),);
1175         assert!(!gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x40000)),);
1176         assert!(gm.range_overlap(GuestAddress(0x30000), GuestAddress(0x70000)),);
1177         assert_eq!(gm.checked_offset(GuestAddress(0x10000), 0x10000), None);
1178         assert_eq!(
1179             gm.checked_offset(GuestAddress(0x50000), 0x8000),
1180             Some(GuestAddress(0x58000))
1181         );
1182         assert_eq!(gm.checked_offset(GuestAddress(0x50000), 0x10000), None);
1183         assert!(gm.is_valid_range(GuestAddress(0x0), 0x10000));
1184         assert!(gm.is_valid_range(GuestAddress(0x0), 0x20000));
1185         assert!(!gm.is_valid_range(GuestAddress(0x0), 0x20000 + 1));
1186 
1187         // While `checked_offset(GuestAddress(0x10000), 0x40000)` succeeds because 0x50000 is a
1188         // valid address, `is_valid_range(GuestAddress(0x10000), 0x40000)` returns `false`
1189         // because there is a hole inside of [0x10000, 0x50000).
1190         assert_eq!(
1191             gm.checked_offset(GuestAddress(0x10000), 0x40000),
1192             Some(GuestAddress(0x50000))
1193         );
1194         assert!(!gm.is_valid_range(GuestAddress(0x10000), 0x40000));
1195     }
1196 
1197     #[test]
test_read_u64()1198     fn test_read_u64() {
1199         let start_addr1 = GuestAddress(0x0);
1200         let start_addr2 = GuestAddress(0x10000);
1201         let gm = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x10000)]).unwrap();
1202 
1203         let val1: u64 = 0xaa55aa55aa55aa55;
1204         let val2: u64 = 0x55aa55aa55aa55aa;
1205         gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
1206         gm.write_obj_at_addr(val2, GuestAddress(0x10000 + 32))
1207             .unwrap();
1208         let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
1209         let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x10000 + 32)).unwrap();
1210         assert_eq!(val1, num1);
1211         assert_eq!(val2, num2);
1212     }
1213 
1214     #[test]
test_memory_size()1215     fn test_memory_size() {
1216         let start_region1 = GuestAddress(0x0);
1217         let size_region1 = 0x10000;
1218         let start_region2 = GuestAddress(0x10000);
1219         let size_region2 = 0x20000;
1220         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1221             .unwrap();
1222 
1223         let mem_size = gm.memory_size();
1224         assert_eq!(mem_size, size_region1 + size_region2);
1225     }
1226 
1227     // Get the base address of the mapping for a GuestAddress.
get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8>1228     fn get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8> {
1229         Ok(mem.find_region(addr)?.0.as_ptr() as *const u8)
1230     }
1231 
1232     #[test]
guest_to_host()1233     fn guest_to_host() {
1234         let start_addr1 = GuestAddress(0x0);
1235         let start_addr2 = GuestAddress(0x10000);
1236         let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1237 
1238         // Verify the host addresses match what we expect from the mappings.
1239         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1240         let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1241         let host_addr1 = mem.get_host_address(start_addr1).unwrap();
1242         let host_addr2 = mem.get_host_address(start_addr2).unwrap();
1243         assert_eq!(host_addr1, addr1_base);
1244         assert_eq!(host_addr2, addr2_base);
1245 
1246         // Check that a bad address returns an error.
1247         let bad_addr = GuestAddress(0x123456);
1248         assert!(mem.get_host_address(bad_addr).is_err());
1249     }
1250 
1251     #[test]
guest_to_host_range()1252     fn guest_to_host_range() {
1253         let start_addr1 = GuestAddress(0x0);
1254         let start_addr2 = GuestAddress(0x10000);
1255         let mem = GuestMemory::new(&[(start_addr1, 0x10000), (start_addr2, 0x40000)]).unwrap();
1256 
1257         // Verify the host addresses match what we expect from the mappings.
1258         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1259         let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1260         let host_addr1 = mem.get_host_address_range(start_addr1, 0x10000).unwrap();
1261         let host_addr2 = mem.get_host_address_range(start_addr2, 0x10000).unwrap();
1262         assert_eq!(host_addr1, addr1_base);
1263         assert_eq!(host_addr2, addr2_base);
1264 
1265         let host_addr3 = mem.get_host_address_range(start_addr2, 0x20000).unwrap();
1266         assert_eq!(host_addr3, addr2_base);
1267 
1268         // Check that a valid guest address with an invalid size returns an error.
1269         assert!(mem.get_host_address_range(start_addr1, 0x20000).is_err());
1270 
1271         // Check that a bad address returns an error.
1272         let bad_addr = GuestAddress(0x123456);
1273         assert!(mem.get_host_address_range(bad_addr, 0x10000).is_err());
1274     }
1275 
1276     #[test]
shm_offset()1277     fn shm_offset() {
1278         let start_region1 = GuestAddress(0x0);
1279         let size_region1 = 0x10000;
1280         let start_region2 = GuestAddress(0x10000);
1281         let size_region2 = 0x20000;
1282         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1283             .unwrap();
1284 
1285         gm.write_obj_at_addr(0x1337u16, GuestAddress(0x0)).unwrap();
1286         gm.write_obj_at_addr(0x0420u16, GuestAddress(0x10000))
1287             .unwrap();
1288 
1289         for region in gm.regions() {
1290             let shm = match region.shm {
1291                 BackingObject::Shm(s) => s,
1292                 _ => {
1293                     panic!("backing object isn't SharedMemory");
1294                 }
1295             };
1296             let mmap = MemoryMappingBuilder::new(region.size)
1297                 .from_shared_memory(shm)
1298                 .offset(region.shm_offset)
1299                 .build()
1300                 .unwrap();
1301 
1302             if region.index == 0 {
1303                 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x1337u16);
1304             }
1305 
1306             if region.index == 1 {
1307                 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x0420u16);
1308             }
1309         }
1310     }
1311 
1312     #[test]
1313     // Disabled for non-x86 because test infra uses qemu-user, which doesn't support MADV_REMOVE.
1314     #[cfg(target_arch = "x86_64")]
snapshot_restore()1315     fn snapshot_restore() {
1316         let regions = &[
1317             // Hole at start.
1318             (GuestAddress(0x0), 0x10000),
1319             // Hole at end.
1320             (GuestAddress(0x10000), 0x10000),
1321             // Hole in middle.
1322             (GuestAddress(0x20000), 0x10000),
1323             // All holes.
1324             (GuestAddress(0x30000), 0x10000),
1325             // No holes.
1326             (GuestAddress(0x40000), 0x1000),
1327         ];
1328         let writes = &[
1329             (GuestAddress(0x0FFF0), 1u64),
1330             (GuestAddress(0x10000), 2u64),
1331             (GuestAddress(0x29000), 3u64),
1332             (GuestAddress(0x40000), 4u64),
1333         ];
1334 
1335         let gm = GuestMemory::new(regions).unwrap();
1336         for &(addr, value) in writes {
1337             gm.write_obj_at_addr(value, addr).unwrap();
1338         }
1339 
1340         let mut data = tempfile::tempfile().unwrap();
1341         // SAFETY:
1342         // no vm is running
1343         let metadata_json = unsafe { gm.snapshot(&mut data, false).unwrap() };
1344         let metadata: MemorySnapshotMetadata =
1345             AnySnapshot::from_any(metadata_json.clone()).unwrap();
1346 
1347         #[cfg(unix)]
1348         assert_eq!(
1349             metadata,
1350             MemorySnapshotMetadata {
1351                 regions: vec![
1352                     MemoryRegionSnapshotMetadata {
1353                         guest_base: 0,
1354                         size: 0x10000,
1355                         data_ranges: vec![0x0F000..0x10000],
1356                     },
1357                     MemoryRegionSnapshotMetadata {
1358                         guest_base: 0x10000,
1359                         size: 0x10000,
1360                         data_ranges: vec![0x00000..0x01000],
1361                     },
1362                     MemoryRegionSnapshotMetadata {
1363                         guest_base: 0x20000,
1364                         size: 0x10000,
1365                         data_ranges: vec![0x09000..0x0A000],
1366                     },
1367                     MemoryRegionSnapshotMetadata {
1368                         guest_base: 0x30000,
1369                         size: 0x10000,
1370                         data_ranges: vec![],
1371                     },
1372                     MemoryRegionSnapshotMetadata {
1373                         guest_base: 0x40000,
1374                         size: 0x1000,
1375                         data_ranges: vec![0x00000..0x01000],
1376                     }
1377                 ],
1378                 compressed: false,
1379             }
1380         );
1381         // We can't detect the holes on Windows yet.
1382         #[cfg(windows)]
1383         assert_eq!(
1384             metadata,
1385             MemorySnapshotMetadata {
1386                 regions: vec![
1387                     MemoryRegionSnapshotMetadata {
1388                         guest_base: 0,
1389                         size: 0x10000,
1390                         data_ranges: vec![0x00000..0x10000],
1391                     },
1392                     MemoryRegionSnapshotMetadata {
1393                         guest_base: 0x10000,
1394                         size: 0x10000,
1395                         data_ranges: vec![0x00000..0x10000],
1396                     },
1397                     MemoryRegionSnapshotMetadata {
1398                         guest_base: 0x20000,
1399                         size: 0x10000,
1400                         data_ranges: vec![0x00000..0x10000],
1401                     },
1402                     MemoryRegionSnapshotMetadata {
1403                         guest_base: 0x30000,
1404                         size: 0x10000,
1405                         data_ranges: vec![0x00000..0x10000],
1406                     },
1407                     MemoryRegionSnapshotMetadata {
1408                         guest_base: 0x40000,
1409                         size: 0x1000,
1410                         data_ranges: vec![0x00000..0x01000],
1411                     }
1412                 ],
1413                 compressed: false,
1414             }
1415         );
1416 
1417         std::mem::drop(gm);
1418 
1419         let gm2 = GuestMemory::new(regions).unwrap();
1420 
1421         // Write to a hole so we can assert the restore zeroes it.
1422         let hole_addr = GuestAddress(0x30000);
1423         gm2.write_obj_at_addr(8u64, hole_addr).unwrap();
1424 
1425         use std::io::Seek;
1426         data.seek(std::io::SeekFrom::Start(0)).unwrap();
1427         // SAFETY:
1428         // no vm is running
1429         unsafe { gm2.restore(metadata_json, &mut data).unwrap() };
1430 
1431         assert_eq!(gm2.read_obj_from_addr::<u64>(hole_addr).unwrap(), 0);
1432         for &(addr, value) in writes {
1433             assert_eq!(gm2.read_obj_from_addr::<u64>(addr).unwrap(), value);
1434         }
1435     }
1436 }
1437