• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
2 //
3 // Portions Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
4 //
5 // Portions Copyright 2017 The Chromium OS Authors. All rights reserved.
6 // Use of this source code is governed by a BSD-style license that can be
7 // found in the LICENSE-BSD-3-Clause file.
8 //
9 // SPDX-License-Identifier: Apache-2.0 OR BSD-3-Clause
10 
11 //! Traits to track and access the physical memory of the guest.
12 //!
13 //! To make the abstraction as generic as possible, all the core traits declared here only define
14 //! methods to access guest's memory, and never define methods to manage (create, delete, insert,
15 //! remove etc) guest's memory. This way, the guest memory consumers (virtio device drivers,
16 //! vhost drivers and boot loaders etc) may be decoupled from the guest memory provider (typically
17 //! a hypervisor).
18 //!
19 //! Traits and Structs
20 //! - [`GuestAddress`](struct.GuestAddress.html): represents a guest physical address (GPA).
21 //! - [`MemoryRegionAddress`](struct.MemoryRegionAddress.html): represents an offset inside a
22 //!   region.
23 //! - [`GuestMemoryRegion`](trait.GuestMemoryRegion.html): represent a continuous region of guest's
24 //!   physical memory.
25 //! - [`GuestMemory`](trait.GuestMemory.html): represent a collection of `GuestMemoryRegion`
26 //!   objects.
27 //!   The main responsibilities of the `GuestMemory` trait are:
28 //!     - hide the detail of accessing guest's physical address.
29 //!     - map a request address to a `GuestMemoryRegion` object and relay the request to it.
30 //!     - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
31 //!
32 //! Whenever a collection of `GuestMemoryRegion` objects is mutable,
33 //! [`GuestAddressSpace`](trait.GuestAddressSpace.html) should be implemented
34 //! for clients to obtain a [`GuestMemory`] reference or smart pointer.
35 //!
36 //! The `GuestMemoryRegion` trait has an associated `B: Bitmap` type which is used to handle
37 //! dirty bitmap tracking. Backends are free to define the granularity (or whether tracking is
38 //! actually performed at all). Those that do implement tracking functionality are expected to
39 //! ensure the correctness of the underlying `Bytes` implementation. The user has to explicitly
40 //! record (using the handle returned by `GuestRegionMmap::bitmap`) write accesses performed
41 //! via pointers, references, or slices returned by methods of `GuestMemory`,`GuestMemoryRegion`,
42 //! `VolatileSlice`, `VolatileRef`, or `VolatileArrayRef`.
43 
44 use std::convert::From;
45 use std::fs::File;
46 use std::io::{self, Read, Write};
47 use std::ops::{BitAnd, BitOr, Deref};
48 use std::rc::Rc;
49 use std::sync::atomic::Ordering;
50 use std::sync::Arc;
51 
52 use crate::address::{Address, AddressValue};
53 use crate::bitmap::{Bitmap, BS, MS};
54 use crate::bytes::{AtomicAccess, Bytes};
55 use crate::io::{ReadVolatile, WriteVolatile};
56 use crate::volatile_memory::{self, VolatileSlice};
57 use crate::GuestMemoryError;
58 
59 static MAX_ACCESS_CHUNK: usize = 4096;
60 
61 /// Errors associated with handling guest memory accesses.
62 #[allow(missing_docs)]
63 #[derive(Debug, thiserror::Error)]
64 pub enum Error {
65     /// Failure in finding a guest address in any memory regions mapped by this guest.
66     #[error("Guest memory error: invalid guest address {}",.0.raw_value())]
67     InvalidGuestAddress(GuestAddress),
68     /// Couldn't read/write from the given source.
69     #[error("Guest memory error: {0}")]
70     IOError(io::Error),
71     /// Incomplete read or write.
72     #[error("Guest memory error: only used {completed} bytes in {expected} long buffer")]
73     PartialBuffer { expected: usize, completed: usize },
74     /// Requested backend address is out of range.
75     #[error("Guest memory error: invalid backend address")]
76     InvalidBackendAddress,
77     /// Host virtual address not available.
78     #[error("Guest memory error: host virtual address not available")]
79     HostAddressNotAvailable,
80     /// The length returned by the callback passed to `try_access` is outside the address range.
81     #[error(
82         "The length returned by the callback passed to `try_access` is outside the address range."
83     )]
84     CallbackOutOfRange,
85     /// The address to be read by `try_access` is outside the address range.
86     #[error("The address to be read by `try_access` is outside the address range")]
87     GuestAddressOverflow,
88 }
89 
90 impl From<volatile_memory::Error> for Error {
from(e: volatile_memory::Error) -> Self91     fn from(e: volatile_memory::Error) -> Self {
92         match e {
93             volatile_memory::Error::OutOfBounds { .. } => Error::InvalidBackendAddress,
94             volatile_memory::Error::Overflow { .. } => Error::InvalidBackendAddress,
95             volatile_memory::Error::TooBig { .. } => Error::InvalidBackendAddress,
96             volatile_memory::Error::Misaligned { .. } => Error::InvalidBackendAddress,
97             volatile_memory::Error::IOError(e) => Error::IOError(e),
98             volatile_memory::Error::PartialBuffer {
99                 expected,
100                 completed,
101             } => Error::PartialBuffer {
102                 expected,
103                 completed,
104             },
105         }
106     }
107 }
108 
109 /// Result of guest memory operations.
110 pub type Result<T> = std::result::Result<T, Error>;
111 
112 /// Represents a guest physical address (GPA).
113 ///
114 /// # Notes:
115 /// On ARM64, a 32-bit hypervisor may be used to support a 64-bit guest. For simplicity,
116 /// `u64` is used to store the the raw value no matter if the guest a 32-bit or 64-bit virtual
117 /// machine.
118 #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
119 pub struct GuestAddress(pub u64);
120 impl_address_ops!(GuestAddress, u64);
121 
122 /// Represents an offset inside a region.
123 #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd)]
124 pub struct MemoryRegionAddress(pub u64);
125 impl_address_ops!(MemoryRegionAddress, u64);
126 
127 /// Type of the raw value stored in a `GuestAddress` object.
128 pub type GuestUsize = <GuestAddress as AddressValue>::V;
129 
130 /// Represents the start point within a `File` that backs a `GuestMemoryRegion`.
131 #[derive(Clone, Debug)]
132 pub struct FileOffset {
133     file: Arc<File>,
134     start: u64,
135 }
136 
137 impl FileOffset {
138     /// Creates a new `FileOffset` object.
new(file: File, start: u64) -> Self139     pub fn new(file: File, start: u64) -> Self {
140         FileOffset::from_arc(Arc::new(file), start)
141     }
142 
143     /// Creates a new `FileOffset` object based on an exiting `Arc<File>`.
from_arc(file: Arc<File>, start: u64) -> Self144     pub fn from_arc(file: Arc<File>, start: u64) -> Self {
145         FileOffset { file, start }
146     }
147 
148     /// Returns a reference to the inner `File` object.
file(&self) -> &File149     pub fn file(&self) -> &File {
150         self.file.as_ref()
151     }
152 
153     /// Return a reference to the inner `Arc<File>` object.
arc(&self) -> &Arc<File>154     pub fn arc(&self) -> &Arc<File> {
155         &self.file
156     }
157 
158     /// Returns the start offset within the file.
start(&self) -> u64159     pub fn start(&self) -> u64 {
160         self.start
161     }
162 }
163 
164 /// Represents a continuous region of guest physical memory.
165 #[allow(clippy::len_without_is_empty)]
166 pub trait GuestMemoryRegion: Bytes<MemoryRegionAddress, E = Error> {
167     /// Type used for dirty memory tracking.
168     type B: Bitmap;
169 
170     /// Returns the size of the region.
len(&self) -> GuestUsize171     fn len(&self) -> GuestUsize;
172 
173     /// Returns the minimum (inclusive) address managed by the region.
start_addr(&self) -> GuestAddress174     fn start_addr(&self) -> GuestAddress;
175 
176     /// Returns the maximum (inclusive) address managed by the region.
last_addr(&self) -> GuestAddress177     fn last_addr(&self) -> GuestAddress {
178         // unchecked_add is safe as the region bounds were checked when it was created.
179         self.start_addr().unchecked_add(self.len() - 1)
180     }
181 
182     /// Borrow the associated `Bitmap` object.
bitmap(&self) -> &Self::B183     fn bitmap(&self) -> &Self::B;
184 
185     /// Returns the given address if it is within this region.
check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress>186     fn check_address(&self, addr: MemoryRegionAddress) -> Option<MemoryRegionAddress> {
187         if self.address_in_range(addr) {
188             Some(addr)
189         } else {
190             None
191         }
192     }
193 
194     /// Returns `true` if the given address is within this region.
address_in_range(&self, addr: MemoryRegionAddress) -> bool195     fn address_in_range(&self, addr: MemoryRegionAddress) -> bool {
196         addr.raw_value() < self.len()
197     }
198 
199     /// Returns the address plus the offset if it is in this region.
checked_offset( &self, base: MemoryRegionAddress, offset: usize, ) -> Option<MemoryRegionAddress>200     fn checked_offset(
201         &self,
202         base: MemoryRegionAddress,
203         offset: usize,
204     ) -> Option<MemoryRegionAddress> {
205         base.checked_add(offset as u64)
206             .and_then(|addr| self.check_address(addr))
207     }
208 
209     /// Tries to convert an absolute address to a relative address within this region.
210     ///
211     /// Returns `None` if `addr` is out of the bounds of this region.
to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress>212     fn to_region_addr(&self, addr: GuestAddress) -> Option<MemoryRegionAddress> {
213         addr.checked_offset_from(self.start_addr())
214             .and_then(|offset| self.check_address(MemoryRegionAddress(offset)))
215     }
216 
217     /// Returns the host virtual address corresponding to the region address.
218     ///
219     /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
220     /// have the capability to mmap guest address range into host virtual address space for
221     /// direct access, so the corresponding host virtual address may be passed to other subsystems.
222     ///
223     /// # Note
224     /// The underlying guest memory is not protected from memory aliasing, which breaks the
225     /// Rust memory safety model. It's the caller's responsibility to ensure that there's no
226     /// concurrent accesses to the underlying guest memory.
get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8>227     fn get_host_address(&self, _addr: MemoryRegionAddress) -> Result<*mut u8> {
228         Err(Error::HostAddressNotAvailable)
229     }
230 
231     /// Returns information regarding the file and offset backing this memory region.
file_offset(&self) -> Option<&FileOffset>232     fn file_offset(&self) -> Option<&FileOffset> {
233         None
234     }
235 
236     /// Returns a slice corresponding to the data in the region.
237     ///
238     /// Returns `None` if the region does not support slice-based access.
239     ///
240     /// # Safety
241     ///
242     /// Unsafe because of possible aliasing.
243     #[deprecated = "It is impossible to use this function for accessing memory of a running virtual \
244     machine without violating aliasing rules "]
as_slice(&self) -> Option<&[u8]>245     unsafe fn as_slice(&self) -> Option<&[u8]> {
246         None
247     }
248 
249     /// Returns a mutable slice corresponding to the data in the region.
250     ///
251     /// Returns `None` if the region does not support slice-based access.
252     ///
253     /// # Safety
254     ///
255     /// Unsafe because of possible aliasing. Mutable accesses performed through the
256     /// returned slice are not visible to the dirty bitmap tracking functionality of
257     /// the region, and must be manually recorded using the associated bitmap object.
258     #[deprecated = "It is impossible to use this function for accessing memory of a running virtual \
259     machine without violating aliasing rules "]
as_mut_slice(&self) -> Option<&mut [u8]>260     unsafe fn as_mut_slice(&self) -> Option<&mut [u8]> {
261         None
262     }
263 
264     /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
265     /// `offset`.
266     #[allow(unused_variables)]
get_slice( &self, offset: MemoryRegionAddress, count: usize, ) -> Result<VolatileSlice<BS<Self::B>>>267     fn get_slice(
268         &self,
269         offset: MemoryRegionAddress,
270         count: usize,
271     ) -> Result<VolatileSlice<BS<Self::B>>> {
272         Err(Error::HostAddressNotAvailable)
273     }
274 
275     /// Gets a slice of memory for the entire region that supports volatile access.
276     ///
277     /// # Examples (uses the `backend-mmap` feature)
278     ///
279     /// ```
280     /// # #[cfg(feature = "backend-mmap")]
281     /// # {
282     /// # use vm_memory::{GuestAddress, MmapRegion, GuestRegionMmap, GuestMemoryRegion};
283     /// # use vm_memory::volatile_memory::{VolatileMemory, VolatileSlice, VolatileRef};
284     /// #
285     /// let region = GuestRegionMmap::<()>::from_range(GuestAddress(0x0), 0x400, None)
286     ///     .expect("Could not create guest memory");
287     /// let slice = region
288     ///     .as_volatile_slice()
289     ///     .expect("Could not get volatile slice");
290     ///
291     /// let v = 42u32;
292     /// let r = slice
293     ///     .get_ref::<u32>(0x200)
294     ///     .expect("Could not get reference");
295     /// r.store(v);
296     /// assert_eq!(r.load(), v);
297     /// # }
298     /// ```
as_volatile_slice(&self) -> Result<VolatileSlice<BS<Self::B>>>299     fn as_volatile_slice(&self) -> Result<VolatileSlice<BS<Self::B>>> {
300         self.get_slice(MemoryRegionAddress(0), self.len() as usize)
301     }
302 
303     /// Show if the region is based on the `HugeTLBFS`.
304     /// Returns Some(true) if the region is backed by hugetlbfs.
305     /// None represents that no information is available.
306     ///
307     /// # Examples (uses the `backend-mmap` feature)
308     ///
309     /// ```
310     /// # #[cfg(feature = "backend-mmap")]
311     /// # {
312     /// #   use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap, GuestRegionMmap};
313     /// let addr = GuestAddress(0x1000);
314     /// let mem = GuestMemoryMmap::<()>::from_ranges(&[(addr, 0x1000)]).unwrap();
315     /// let r = mem.find_region(addr).unwrap();
316     /// assert_eq!(r.is_hugetlbfs(), None);
317     /// # }
318     /// ```
319     #[cfg(target_os = "linux")]
is_hugetlbfs(&self) -> Option<bool>320     fn is_hugetlbfs(&self) -> Option<bool> {
321         None
322     }
323 }
324 
325 /// `GuestAddressSpace` provides a way to retrieve a `GuestMemory` object.
326 /// The vm-memory crate already provides trivial implementation for
327 /// references to `GuestMemory` or reference-counted `GuestMemory` objects,
328 /// but the trait can also be implemented by any other struct in order
329 /// to provide temporary access to a snapshot of the memory map.
330 ///
331 /// In order to support generic mutable memory maps, devices (or other things
332 /// that access memory) should store the memory as a `GuestAddressSpace<M>`.
333 /// This example shows that references can also be used as the `GuestAddressSpace`
334 /// implementation, providing a zero-cost abstraction whenever immutable memory
335 /// maps are sufficient.
336 ///
337 /// # Examples (uses the `backend-mmap` and `backend-atomic` features)
338 ///
339 /// ```
340 /// # #[cfg(feature = "backend-mmap")]
341 /// # {
342 /// # use std::sync::Arc;
343 /// # use vm_memory::{GuestAddress, GuestAddressSpace, GuestMemory, GuestMemoryMmap};
344 /// #
345 /// pub struct VirtioDevice<AS: GuestAddressSpace> {
346 ///     mem: Option<AS>,
347 /// }
348 ///
349 /// impl<AS: GuestAddressSpace> VirtioDevice<AS> {
350 ///     fn new() -> Self {
351 ///         VirtioDevice { mem: None }
352 ///     }
353 ///     fn activate(&mut self, mem: AS) {
354 ///         self.mem = Some(mem)
355 ///     }
356 /// }
357 ///
358 /// fn get_mmap() -> GuestMemoryMmap<()> {
359 ///     let start_addr = GuestAddress(0x1000);
360 ///     GuestMemoryMmap::from_ranges(&vec![(start_addr, 0x400)])
361 ///         .expect("Could not create guest memory")
362 /// }
363 ///
364 /// // Using `VirtioDevice` with an immutable GuestMemoryMmap:
365 /// let mut for_immutable_mmap = VirtioDevice::<&GuestMemoryMmap<()>>::new();
366 /// let mmap = get_mmap();
367 /// for_immutable_mmap.activate(&mmap);
368 /// let mut another = VirtioDevice::<&GuestMemoryMmap<()>>::new();
369 /// another.activate(&mmap);
370 ///
371 /// # #[cfg(feature = "backend-atomic")]
372 /// # {
373 /// # use vm_memory::GuestMemoryAtomic;
374 /// // Using `VirtioDevice` with a mutable GuestMemoryMmap:
375 /// let mut for_mutable_mmap = VirtioDevice::<GuestMemoryAtomic<GuestMemoryMmap<()>>>::new();
376 /// let atomic = GuestMemoryAtomic::new(get_mmap());
377 /// for_mutable_mmap.activate(atomic.clone());
378 /// let mut another = VirtioDevice::<GuestMemoryAtomic<GuestMemoryMmap<()>>>::new();
379 /// another.activate(atomic.clone());
380 ///
381 /// // atomic can be modified here...
382 /// # }
383 /// # }
384 /// ```
385 pub trait GuestAddressSpace {
386     /// The type that will be used to access guest memory.
387     type M: GuestMemory;
388 
389     /// A type that provides access to the memory.
390     type T: Clone + Deref<Target = Self::M>;
391 
392     /// Return an object (e.g. a reference or guard) that can be used
393     /// to access memory through this address space.  The object provides
394     /// a consistent snapshot of the memory map.
memory(&self) -> Self::T395     fn memory(&self) -> Self::T;
396 }
397 
398 impl<M: GuestMemory> GuestAddressSpace for &M {
399     type M = M;
400     type T = Self;
401 
memory(&self) -> Self402     fn memory(&self) -> Self {
403         self
404     }
405 }
406 
407 impl<M: GuestMemory> GuestAddressSpace for Rc<M> {
408     type M = M;
409     type T = Self;
410 
memory(&self) -> Self411     fn memory(&self) -> Self {
412         self.clone()
413     }
414 }
415 
416 impl<M: GuestMemory> GuestAddressSpace for Arc<M> {
417     type M = M;
418     type T = Self;
419 
memory(&self) -> Self420     fn memory(&self) -> Self {
421         self.clone()
422     }
423 }
424 
425 /// `GuestMemory` represents a container for an *immutable* collection of
426 /// `GuestMemoryRegion` objects.  `GuestMemory` provides the `Bytes<GuestAddress>`
427 /// trait to hide the details of accessing guest memory by physical address.
428 /// Interior mutability is not allowed for implementations of `GuestMemory` so
429 /// that they always provide a consistent view of the memory map.
430 ///
431 /// The task of the `GuestMemory` trait are:
432 /// - map a request address to a `GuestMemoryRegion` object and relay the request to it.
433 /// - handle cases where an access request spanning two or more `GuestMemoryRegion` objects.
434 pub trait GuestMemory {
435     /// Type of objects hosted by the address space.
436     type R: GuestMemoryRegion;
437 
438     /// Returns the number of regions in the collection.
num_regions(&self) -> usize439     fn num_regions(&self) -> usize;
440 
441     /// Returns the region containing the specified address or `None`.
find_region(&self, addr: GuestAddress) -> Option<&Self::R>442     fn find_region(&self, addr: GuestAddress) -> Option<&Self::R>;
443 
444     /// Perform the specified action on each region.
445     ///
446     /// It only walks children of current region and does not step into sub regions.
447     #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
with_regions<F, E>(&self, cb: F) -> std::result::Result<(), E> where F: Fn(usize, &Self::R) -> std::result::Result<(), E>,448     fn with_regions<F, E>(&self, cb: F) -> std::result::Result<(), E>
449     where
450         F: Fn(usize, &Self::R) -> std::result::Result<(), E>,
451     {
452         for (index, region) in self.iter().enumerate() {
453             cb(index, region)?;
454         }
455         Ok(())
456     }
457 
458     /// Perform the specified action on each region mutably.
459     ///
460     /// It only walks children of current region and does not step into sub regions.
461     #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
with_regions_mut<F, E>(&self, mut cb: F) -> std::result::Result<(), E> where F: FnMut(usize, &Self::R) -> std::result::Result<(), E>,462     fn with_regions_mut<F, E>(&self, mut cb: F) -> std::result::Result<(), E>
463     where
464         F: FnMut(usize, &Self::R) -> std::result::Result<(), E>,
465     {
466         for (index, region) in self.iter().enumerate() {
467             cb(index, region)?;
468         }
469         Ok(())
470     }
471 
472     /// Gets an iterator over the entries in the collection.
473     ///
474     /// # Examples
475     ///
476     /// * Compute the total size of all memory mappings in KB by iterating over the memory regions
477     ///   and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
478     ///   `backend-mmap` feature)
479     ///
480     /// ```
481     /// # #[cfg(feature = "backend-mmap")]
482     /// # {
483     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap};
484     /// #
485     /// let start_addr1 = GuestAddress(0x0);
486     /// let start_addr2 = GuestAddress(0x400);
487     /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)])
488     ///     .expect("Could not create guest memory");
489     ///
490     /// let total_size = gm
491     ///     .iter()
492     ///     .map(|region| region.len() / 1024)
493     ///     .fold(0, |acc, size| acc + size);
494     /// assert_eq!(3, total_size)
495     /// # }
496     /// ```
iter(&self) -> impl Iterator<Item = &Self::R>497     fn iter(&self) -> impl Iterator<Item = &Self::R>;
498 
499     /// Applies two functions, specified as callbacks, on the inner memory regions.
500     ///
501     /// # Arguments
502     /// * `init` - Starting value of the accumulator for the `foldf` function.
503     /// * `mapf` - "Map" function, applied to all the inner memory regions. It returns an array of
504     ///            the same size as the memory regions array, containing the function's results
505     ///            for each region.
506     /// * `foldf` - "Fold" function, applied to the array returned by `mapf`. It acts as an
507     ///             operator, applying itself to the `init` value and to each subsequent elemnent
508     ///             in the array returned by `mapf`.
509     ///
510     /// # Examples
511     ///
512     /// * Compute the total size of all memory mappings in KB by iterating over the memory regions
513     ///   and dividing their sizes to 1024, then summing up the values in an accumulator. (uses the
514     ///   `backend-mmap` feature)
515     ///
516     /// ```
517     /// # #[cfg(feature = "backend-mmap")]
518     /// # {
519     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryRegion, GuestMemoryMmap};
520     /// #
521     /// let start_addr1 = GuestAddress(0x0);
522     /// let start_addr2 = GuestAddress(0x400);
523     /// let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr1, 1024), (start_addr2, 2048)])
524     ///     .expect("Could not create guest memory");
525     ///
526     /// let total_size = gm.map_and_fold(0, |(_, region)| region.len() / 1024, |acc, size| acc + size);
527     /// assert_eq!(3, total_size)
528     /// # }
529     /// ```
530     #[deprecated(since = "0.6.0", note = "Use `.iter()` instead")]
map_and_fold<F, G, T>(&self, init: T, mapf: F, foldf: G) -> T where F: Fn((usize, &Self::R)) -> T, G: Fn(T, T) -> T,531     fn map_and_fold<F, G, T>(&self, init: T, mapf: F, foldf: G) -> T
532     where
533         F: Fn((usize, &Self::R)) -> T,
534         G: Fn(T, T) -> T,
535     {
536         self.iter().enumerate().map(mapf).fold(init, foldf)
537     }
538 
539     /// Returns the maximum (inclusive) address managed by the
540     /// [`GuestMemory`](trait.GuestMemory.html).
541     ///
542     /// # Examples (uses the `backend-mmap` feature)
543     ///
544     /// ```
545     /// # #[cfg(feature = "backend-mmap")]
546     /// # {
547     /// # use vm_memory::{Address, GuestAddress, GuestMemory, GuestMemoryMmap};
548     /// #
549     /// let start_addr = GuestAddress(0x1000);
550     /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
551     ///     .expect("Could not create guest memory");
552     ///
553     /// assert_eq!(start_addr.checked_add(0x3ff), Some(gm.last_addr()));
554     /// # }
555     /// ```
last_addr(&self) -> GuestAddress556     fn last_addr(&self) -> GuestAddress {
557         self.iter()
558             .map(GuestMemoryRegion::last_addr)
559             .fold(GuestAddress(0), std::cmp::max)
560     }
561 
562     /// Tries to convert an absolute address to a relative address within the corresponding region.
563     ///
564     /// Returns `None` if `addr` isn't present within the memory of the guest.
to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)>565     fn to_region_addr(&self, addr: GuestAddress) -> Option<(&Self::R, MemoryRegionAddress)> {
566         self.find_region(addr)
567             .map(|r| (r, r.to_region_addr(addr).unwrap()))
568     }
569 
570     /// Returns `true` if the given address is present within the memory of the guest.
address_in_range(&self, addr: GuestAddress) -> bool571     fn address_in_range(&self, addr: GuestAddress) -> bool {
572         self.find_region(addr).is_some()
573     }
574 
575     /// Returns the given address if it is present within the memory of the guest.
check_address(&self, addr: GuestAddress) -> Option<GuestAddress>576     fn check_address(&self, addr: GuestAddress) -> Option<GuestAddress> {
577         self.find_region(addr).map(|_| addr)
578     }
579 
580     /// Check whether the range [base, base + len) is valid.
check_range(&self, base: GuestAddress, len: usize) -> bool581     fn check_range(&self, base: GuestAddress, len: usize) -> bool {
582         match self.try_access(len, base, |_, count, _, _| -> Result<usize> { Ok(count) }) {
583             Ok(count) => count == len,
584             _ => false,
585         }
586     }
587 
588     /// Returns the address plus the offset if it is present within the memory of the guest.
checked_offset(&self, base: GuestAddress, offset: usize) -> Option<GuestAddress>589     fn checked_offset(&self, base: GuestAddress, offset: usize) -> Option<GuestAddress> {
590         base.checked_add(offset as u64)
591             .and_then(|addr| self.check_address(addr))
592     }
593 
594     /// Invokes callback `f` to handle data in the address range `[addr, addr + count)`.
595     ///
596     /// The address range `[addr, addr + count)` may span more than one
597     /// [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object, or even have holes in it.
598     /// So [`try_access()`](trait.GuestMemory.html#method.try_access) invokes the callback 'f'
599     /// for each [`GuestMemoryRegion`](trait.GuestMemoryRegion.html) object involved and returns:
600     /// - the error code returned by the callback 'f'
601     /// - the size of the already handled data when encountering the first hole
602     /// - the size of the already handled data when the whole range has been handled
try_access<F>(&self, count: usize, addr: GuestAddress, mut f: F) -> Result<usize> where F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result<usize>,603     fn try_access<F>(&self, count: usize, addr: GuestAddress, mut f: F) -> Result<usize>
604     where
605         F: FnMut(usize, usize, MemoryRegionAddress, &Self::R) -> Result<usize>,
606     {
607         let mut cur = addr;
608         let mut total = 0;
609         while let Some(region) = self.find_region(cur) {
610             let start = region.to_region_addr(cur).unwrap();
611             let cap = region.len() - start.raw_value();
612             let len = std::cmp::min(cap, (count - total) as GuestUsize);
613             match f(total, len as usize, start, region) {
614                 // no more data
615                 Ok(0) => return Ok(total),
616                 // made some progress
617                 Ok(len) => {
618                     total = match total.checked_add(len) {
619                         Some(x) if x < count => x,
620                         Some(x) if x == count => return Ok(x),
621                         _ => return Err(Error::CallbackOutOfRange),
622                     };
623                     cur = match cur.overflowing_add(len as GuestUsize) {
624                         (x @ GuestAddress(0), _) | (x, false) => x,
625                         (_, true) => return Err(Error::GuestAddressOverflow),
626                     };
627                 }
628                 // error happened
629                 e => return e,
630             }
631         }
632         if total == 0 {
633             Err(Error::InvalidGuestAddress(addr))
634         } else {
635             Ok(total)
636         }
637     }
638 
639     /// Reads up to `count` bytes from an object and writes them into guest memory at `addr`.
640     ///
641     /// Returns the number of bytes written into guest memory.
642     ///
643     /// # Arguments
644     /// * `addr` - Begin writing at this address.
645     /// * `src` - Copy from `src` into the container.
646     /// * `count` - Copy `count` bytes from `src` into the container.
647     ///
648     /// # Examples
649     ///
650     /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature)
651     ///
652     /// ```
653     /// # #[cfg(feature = "backend-mmap")]
654     /// # {
655     /// # use vm_memory::{Address, GuestMemory, Bytes, GuestAddress, GuestMemoryMmap};
656     /// # use std::fs::File;
657     /// # use std::path::Path;
658     /// #
659     /// # let start_addr = GuestAddress(0x1000);
660     /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
661     /// #    .expect("Could not create guest memory");
662     /// # let addr = GuestAddress(0x1010);
663     /// # let mut file = if cfg!(unix) {
664     /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
665     /// #   file
666     /// # } else {
667     /// #   File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
668     /// #       .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
669     /// # };
670     ///
671     /// gm.read_volatile_from(addr, &mut file, 128)
672     ///     .expect("Could not read from /dev/urandom into guest memory");
673     ///
674     /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
675     /// let rand_val: u32 = gm
676     ///     .read_obj(read_addr)
677     ///     .expect("Could not read u32 val from /dev/urandom");
678     /// # }
679     /// ```
read_volatile_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize> where F: ReadVolatile,680     fn read_volatile_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
681     where
682         F: ReadVolatile,
683     {
684         self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
685             // Check if something bad happened before doing unsafe things.
686             assert!(offset <= count);
687 
688             let mut vslice = region.get_slice(caddr, len)?;
689 
690             src.read_volatile(&mut vslice)
691                 .map_err(GuestMemoryError::from)
692         })
693     }
694 
695     /// Reads up to `count` bytes from guest memory at `addr` and writes them it into an object.
696     ///
697     /// Returns the number of bytes copied from guest memory.
698     ///
699     /// # Arguments
700     /// * `addr` - Begin reading from this address.
701     /// * `dst` - Copy from guest memory to `dst`.
702     /// * `count` - Copy `count` bytes from guest memory to `dst`.
write_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize> where F: WriteVolatile,703     fn write_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
704     where
705         F: WriteVolatile,
706     {
707         self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
708             // Check if something bad happened before doing unsafe things.
709             assert!(offset <= count);
710 
711             let vslice = region.get_slice(caddr, len)?;
712 
713             // For a non-RAM region, reading could have side effects, so we
714             // must use write_all().
715             dst.write_all_volatile(&vslice)?;
716 
717             Ok(len)
718         })
719     }
720 
721     /// Reads exactly `count` bytes from an object and writes them into guest memory at `addr`.
722     ///
723     /// # Errors
724     ///
725     /// Returns an error if `count` bytes couldn't have been copied from `src` to guest memory.
726     /// Part of the data may have been copied nevertheless.
727     ///
728     /// # Arguments
729     /// * `addr` - Begin writing at this address.
730     /// * `src` - Copy from `src` into guest memory.
731     /// * `count` - Copy exactly `count` bytes from `src` into guest memory.
read_exact_volatile_from<F>( &self, addr: GuestAddress, src: &mut F, count: usize, ) -> Result<()> where F: ReadVolatile,732     fn read_exact_volatile_from<F>(
733         &self,
734         addr: GuestAddress,
735         src: &mut F,
736         count: usize,
737     ) -> Result<()>
738     where
739         F: ReadVolatile,
740     {
741         let res = self.read_volatile_from(addr, src, count)?;
742         if res != count {
743             return Err(Error::PartialBuffer {
744                 expected: count,
745                 completed: res,
746             });
747         }
748         Ok(())
749     }
750 
751     /// Reads exactly `count` bytes from guest memory at `addr` and writes them into an object.
752     ///
753     /// # Errors
754     ///
755     /// Returns an error if `count` bytes couldn't have been copied from guest memory to `dst`.
756     /// Part of the data may have been copied nevertheless.
757     ///
758     /// # Arguments
759     /// * `addr` - Begin reading from this address.
760     /// * `dst` - Copy from guest memory to `dst`.
761     /// * `count` - Copy exactly `count` bytes from guest memory to `dst`.
write_all_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()> where F: WriteVolatile,762     fn write_all_volatile_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
763     where
764         F: WriteVolatile,
765     {
766         let res = self.write_volatile_to(addr, dst, count)?;
767         if res != count {
768             return Err(Error::PartialBuffer {
769                 expected: count,
770                 completed: res,
771             });
772         }
773         Ok(())
774     }
775 
776     /// Get the host virtual address corresponding to the guest address.
777     ///
778     /// Some [`GuestMemory`](trait.GuestMemory.html) implementations, like `GuestMemoryMmap`,
779     /// have the capability to mmap the guest address range into virtual address space of the host
780     /// for direct access, so the corresponding host virtual address may be passed to other
781     /// subsystems.
782     ///
783     /// # Note
784     /// The underlying guest memory is not protected from memory aliasing, which breaks the
785     /// Rust memory safety model. It's the caller's responsibility to ensure that there's no
786     /// concurrent accesses to the underlying guest memory.
787     ///
788     /// # Arguments
789     /// * `addr` - Guest address to convert.
790     ///
791     /// # Examples (uses the `backend-mmap` feature)
792     ///
793     /// ```
794     /// # #[cfg(feature = "backend-mmap")]
795     /// # {
796     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryMmap};
797     /// #
798     /// # let start_addr = GuestAddress(0x1000);
799     /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x500)])
800     /// #    .expect("Could not create guest memory");
801     /// #
802     /// let addr = gm
803     ///     .get_host_address(GuestAddress(0x1200))
804     ///     .expect("Could not get host address");
805     /// println!("Host address is {:p}", addr);
806     /// # }
807     /// ```
get_host_address(&self, addr: GuestAddress) -> Result<*mut u8>808     fn get_host_address(&self, addr: GuestAddress) -> Result<*mut u8> {
809         self.to_region_addr(addr)
810             .ok_or(Error::InvalidGuestAddress(addr))
811             .and_then(|(r, addr)| r.get_host_address(addr))
812     }
813 
814     /// Returns a [`VolatileSlice`](struct.VolatileSlice.html) of `count` bytes starting at
815     /// `addr`.
get_slice(&self, addr: GuestAddress, count: usize) -> Result<VolatileSlice<MS<Self>>>816     fn get_slice(&self, addr: GuestAddress, count: usize) -> Result<VolatileSlice<MS<Self>>> {
817         self.to_region_addr(addr)
818             .ok_or(Error::InvalidGuestAddress(addr))
819             .and_then(|(r, addr)| r.get_slice(addr, count))
820     }
821 }
822 
823 impl<T: GuestMemory + ?Sized> Bytes<GuestAddress> for T {
824     type E = Error;
825 
write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize>826     fn write(&self, buf: &[u8], addr: GuestAddress) -> Result<usize> {
827         self.try_access(
828             buf.len(),
829             addr,
830             |offset, _count, caddr, region| -> Result<usize> {
831                 region.write(&buf[offset..], caddr)
832             },
833         )
834     }
835 
read(&self, buf: &mut [u8], addr: GuestAddress) -> Result<usize>836     fn read(&self, buf: &mut [u8], addr: GuestAddress) -> Result<usize> {
837         self.try_access(
838             buf.len(),
839             addr,
840             |offset, _count, caddr, region| -> Result<usize> {
841                 region.read(&mut buf[offset..], caddr)
842             },
843         )
844     }
845 
846     /// # Examples
847     ///
848     /// * Write a slice at guestaddress 0x1000. (uses the `backend-mmap` feature)
849     ///
850     /// ```
851     /// # #[cfg(feature = "backend-mmap")]
852     /// # {
853     /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap};
854     /// #
855     /// # let start_addr = GuestAddress(0x1000);
856     /// # let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
857     /// #    .expect("Could not create guest memory");
858     /// #
859     /// gm.write_slice(&[1, 2, 3, 4, 5], start_addr)
860     ///     .expect("Could not write slice to guest memory");
861     /// # }
862     /// ```
write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()>863     fn write_slice(&self, buf: &[u8], addr: GuestAddress) -> Result<()> {
864         let res = self.write(buf, addr)?;
865         if res != buf.len() {
866             return Err(Error::PartialBuffer {
867                 expected: buf.len(),
868                 completed: res,
869             });
870         }
871         Ok(())
872     }
873 
874     /// # Examples
875     ///
876     /// * Read a slice of length 16 at guestaddress 0x1000. (uses the `backend-mmap` feature)
877     ///
878     /// ```
879     /// # #[cfg(feature = "backend-mmap")]
880     /// # {
881     /// # use vm_memory::{Bytes, GuestAddress, mmap::GuestMemoryMmap};
882     /// #
883     /// let start_addr = GuestAddress(0x1000);
884     /// let mut gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
885     ///     .expect("Could not create guest memory");
886     /// let buf = &mut [0u8; 16];
887     ///
888     /// gm.read_slice(buf, start_addr)
889     ///     .expect("Could not read slice from guest memory");
890     /// # }
891     /// ```
read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()>892     fn read_slice(&self, buf: &mut [u8], addr: GuestAddress) -> Result<()> {
893         let res = self.read(buf, addr)?;
894         if res != buf.len() {
895             return Err(Error::PartialBuffer {
896                 expected: buf.len(),
897                 completed: res,
898             });
899         }
900         Ok(())
901     }
902 
903     /// # Examples
904     ///
905     /// * Read bytes from /dev/urandom (uses the `backend-mmap` feature)
906     ///
907     /// ```
908     /// # #[cfg(feature = "backend-mmap")]
909     /// # {
910     /// # use vm_memory::{Address, Bytes, GuestAddress, GuestMemoryMmap};
911     /// # use std::fs::File;
912     /// # use std::path::Path;
913     /// #
914     /// # let start_addr = GuestAddress(0x1000);
915     /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 0x400)])
916     /// #    .expect("Could not create guest memory");
917     /// # let addr = GuestAddress(0x1010);
918     /// # let mut file = if cfg!(unix) {
919     /// let mut file = File::open(Path::new("/dev/urandom")).expect("Could not open /dev/urandom");
920     /// #   file
921     /// # } else {
922     /// #   File::open(Path::new("c:\\Windows\\system32\\ntoskrnl.exe"))
923     /// #       .expect("Could not open c:\\Windows\\system32\\ntoskrnl.exe")
924     /// # };
925     ///
926     /// gm.read_from(addr, &mut file, 128)
927     ///     .expect("Could not read from /dev/urandom into guest memory");
928     ///
929     /// let read_addr = addr.checked_add(8).expect("Could not compute read address");
930     /// let rand_val: u32 = gm
931     ///     .read_obj(read_addr)
932     ///     .expect("Could not read u32 val from /dev/urandom");
933     /// # }
934     /// ```
read_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize> where F: Read,935     fn read_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<usize>
936     where
937         F: Read,
938     {
939         self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
940             // Check if something bad happened before doing unsafe things.
941             assert!(offset <= count);
942 
943             let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
944             let mut buf = vec![0u8; len].into_boxed_slice();
945 
946             loop {
947                 match src.read(&mut buf[..]) {
948                     Ok(bytes_read) => {
949                         // We don't need to update the dirty bitmap manually here because it's
950                         // expected to be handled by the logic within the `Bytes`
951                         // implementation for the region object.
952                         let bytes_written = region.write(&buf[0..bytes_read], caddr)?;
953                         assert_eq!(bytes_written, bytes_read);
954                         break Ok(bytes_read);
955                     }
956                     Err(ref e) if e.kind() == std::io::ErrorKind::Interrupted => continue,
957                     Err(e) => break Err(Error::IOError(e)),
958                 }
959             }
960         })
961     }
962 
read_exact_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<()> where F: Read,963     fn read_exact_from<F>(&self, addr: GuestAddress, src: &mut F, count: usize) -> Result<()>
964     where
965         F: Read,
966     {
967         #[allow(deprecated)] // this function itself is deprecated
968         let res = self.read_from(addr, src, count)?;
969         if res != count {
970             return Err(Error::PartialBuffer {
971                 expected: count,
972                 completed: res,
973             });
974         }
975         Ok(())
976     }
977 
978     /// # Examples
979     ///
980     /// * Write 128 bytes to /dev/null (uses the `backend-mmap` feature)
981     ///
982     /// ```
983     /// # #[cfg(not(unix))]
984     /// # extern crate vmm_sys_util;
985     /// # #[cfg(feature = "backend-mmap")]
986     /// # {
987     /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
988     /// #
989     /// # let start_addr = GuestAddress(0x1000);
990     /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 1024)])
991     /// #    .expect("Could not create guest memory");
992     /// # let mut file = if cfg!(unix) {
993     /// # use std::fs::OpenOptions;
994     /// let mut file = OpenOptions::new()
995     ///     .write(true)
996     ///     .open("/dev/null")
997     ///     .expect("Could not open /dev/null");
998     /// #   file
999     /// # } else {
1000     /// #   use vmm_sys_util::tempfile::TempFile;
1001     /// #   TempFile::new().unwrap().into_file()
1002     /// # };
1003     ///
1004     /// gm.write_to(start_addr, &mut file, 128)
1005     ///     .expect("Could not write 128 bytes to the provided address");
1006     /// # }
1007     /// ```
write_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize> where F: Write,1008     fn write_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<usize>
1009     where
1010         F: Write,
1011     {
1012         self.try_access(count, addr, |offset, len, caddr, region| -> Result<usize> {
1013             // Check if something bad happened before doing unsafe things.
1014             assert!(offset <= count);
1015 
1016             let len = std::cmp::min(len, MAX_ACCESS_CHUNK);
1017             let mut buf = vec![0u8; len].into_boxed_slice();
1018             let bytes_read = region.read(&mut buf, caddr)?;
1019             assert_eq!(bytes_read, len);
1020             // For a non-RAM region, reading could have side effects, so we
1021             // must use write_all().
1022             dst.write_all(&buf).map_err(Error::IOError)?;
1023             Ok(len)
1024         })
1025     }
1026 
1027     /// # Examples
1028     ///
1029     /// * Write 128 bytes to /dev/null (uses the `backend-mmap` feature)
1030     ///
1031     /// ```
1032     /// # #[cfg(not(unix))]
1033     /// # extern crate vmm_sys_util;
1034     /// # #[cfg(feature = "backend-mmap")]
1035     /// # {
1036     /// # use vm_memory::{Bytes, GuestAddress, GuestMemoryMmap};
1037     /// #
1038     /// # let start_addr = GuestAddress(0x1000);
1039     /// # let gm = GuestMemoryMmap::<()>::from_ranges(&vec![(start_addr, 1024)])
1040     /// #    .expect("Could not create guest memory");
1041     /// # let mut file = if cfg!(unix) {
1042     /// # use std::fs::OpenOptions;
1043     /// let mut file = OpenOptions::new()
1044     ///     .write(true)
1045     ///     .open("/dev/null")
1046     ///     .expect("Could not open /dev/null");
1047     /// #   file
1048     /// # } else {
1049     /// #   use vmm_sys_util::tempfile::TempFile;
1050     /// #   TempFile::new().unwrap().into_file()
1051     /// # };
1052     ///
1053     /// gm.write_all_to(start_addr, &mut file, 128)
1054     ///     .expect("Could not write 128 bytes to the provided address");
1055     /// # }
1056     /// ```
write_all_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()> where F: Write,1057     fn write_all_to<F>(&self, addr: GuestAddress, dst: &mut F, count: usize) -> Result<()>
1058     where
1059         F: Write,
1060     {
1061         #[allow(deprecated)] // this function itself is deprecated
1062         let res = self.write_to(addr, dst, count)?;
1063         if res != count {
1064             return Err(Error::PartialBuffer {
1065                 expected: count,
1066                 completed: res,
1067             });
1068         }
1069         Ok(())
1070     }
1071 
store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()>1072     fn store<O: AtomicAccess>(&self, val: O, addr: GuestAddress, order: Ordering) -> Result<()> {
1073         // `find_region` should really do what `to_region_addr` is doing right now, except
1074         // it should keep returning a `Result`.
1075         self.to_region_addr(addr)
1076             .ok_or(Error::InvalidGuestAddress(addr))
1077             .and_then(|(region, region_addr)| region.store(val, region_addr, order))
1078     }
1079 
load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O>1080     fn load<O: AtomicAccess>(&self, addr: GuestAddress, order: Ordering) -> Result<O> {
1081         self.to_region_addr(addr)
1082             .ok_or(Error::InvalidGuestAddress(addr))
1083             .and_then(|(region, region_addr)| region.load(region_addr, order))
1084     }
1085 }
1086 
1087 #[cfg(test)]
1088 mod tests {
1089     #![allow(clippy::undocumented_unsafe_blocks)]
1090     use super::*;
1091     #[cfg(feature = "backend-mmap")]
1092     use crate::bytes::ByteValued;
1093     #[cfg(feature = "backend-mmap")]
1094     use crate::GuestAddress;
1095     #[cfg(feature = "backend-mmap")]
1096     use std::time::{Duration, Instant};
1097 
1098     use vmm_sys_util::tempfile::TempFile;
1099 
1100     #[cfg(feature = "backend-mmap")]
1101     type GuestMemoryMmap = crate::GuestMemoryMmap<()>;
1102 
1103     #[cfg(feature = "backend-mmap")]
make_image(size: u8) -> Vec<u8>1104     fn make_image(size: u8) -> Vec<u8> {
1105         let mut image: Vec<u8> = Vec::with_capacity(size as usize);
1106         for i in 0..size {
1107             image.push(i);
1108         }
1109         image
1110     }
1111 
1112     #[test]
test_file_offset()1113     fn test_file_offset() {
1114         let file = TempFile::new().unwrap().into_file();
1115         let start = 1234;
1116         let file_offset = FileOffset::new(file, start);
1117         assert_eq!(file_offset.start(), start);
1118         assert_eq!(
1119             file_offset.file() as *const File,
1120             file_offset.arc().as_ref() as *const File
1121         );
1122     }
1123 
1124     #[cfg(feature = "backend-mmap")]
1125     #[test]
checked_read_from()1126     fn checked_read_from() {
1127         let start_addr1 = GuestAddress(0x0);
1128         let start_addr2 = GuestAddress(0x40);
1129         let mem = GuestMemoryMmap::from_ranges(&[(start_addr1, 64), (start_addr2, 64)]).unwrap();
1130         let image = make_image(0x80);
1131         let offset = GuestAddress(0x30);
1132         let count: usize = 0x20;
1133         assert_eq!(
1134             0x20_usize,
1135             mem.read_volatile_from(offset, &mut image.as_slice(), count)
1136                 .unwrap()
1137         );
1138     }
1139 
1140     // Runs the provided closure in a loop, until at least `duration` time units have elapsed.
1141     #[cfg(feature = "backend-mmap")]
loop_timed<F>(duration: Duration, mut f: F) where F: FnMut(),1142     fn loop_timed<F>(duration: Duration, mut f: F)
1143     where
1144         F: FnMut(),
1145     {
1146         // We check the time every `CHECK_PERIOD` iterations.
1147         const CHECK_PERIOD: u64 = 1_000_000;
1148         let start_time = Instant::now();
1149 
1150         loop {
1151             for _ in 0..CHECK_PERIOD {
1152                 f();
1153             }
1154             if start_time.elapsed() >= duration {
1155                 break;
1156             }
1157         }
1158     }
1159 
1160     // Helper method for the following test. It spawns a writer and a reader thread, which
1161     // simultaneously try to access an object that is placed at the junction of two memory regions.
1162     // The part of the object that's continuously accessed is a member of type T. The writer
1163     // flips all the bits of the member with every write, while the reader checks that every byte
1164     // has the same value (and thus it did not do a non-atomic access). The test succeeds if
1165     // no mismatch is detected after performing accesses for a pre-determined amount of time.
1166     #[cfg(feature = "backend-mmap")]
1167     #[cfg(not(miri))] // This test simulates a race condition between guest and vmm
non_atomic_access_helper<T>() where T: ByteValued + std::fmt::Debug + From<u8> + Into<u128> + std::ops::Not<Output = T> + PartialEq,1168     fn non_atomic_access_helper<T>()
1169     where
1170         T: ByteValued
1171             + std::fmt::Debug
1172             + From<u8>
1173             + Into<u128>
1174             + std::ops::Not<Output = T>
1175             + PartialEq,
1176     {
1177         use std::mem;
1178         use std::thread;
1179 
1180         // A dummy type that's always going to have the same alignment as the first member,
1181         // and then adds some bytes at the end.
1182         #[derive(Clone, Copy, Debug, Default, PartialEq)]
1183         struct Data<T> {
1184             val: T,
1185             some_bytes: [u8; 8],
1186         }
1187 
1188         // Some sanity checks.
1189         assert_eq!(mem::align_of::<T>(), mem::align_of::<Data<T>>());
1190         assert_eq!(mem::size_of::<T>(), mem::align_of::<T>());
1191 
1192         // There must be no padding bytes, as otherwise implementing ByteValued is UB
1193         assert_eq!(mem::size_of::<Data<T>>(), mem::size_of::<T>() + 8);
1194 
1195         unsafe impl<T: ByteValued> ByteValued for Data<T> {}
1196 
1197         // Start of first guest memory region.
1198         let start = GuestAddress(0);
1199         let region_len = 1 << 12;
1200 
1201         // The address where we start writing/reading a Data<T> value.
1202         let data_start = GuestAddress((region_len - mem::size_of::<T>()) as u64);
1203 
1204         let mem = GuestMemoryMmap::from_ranges(&[
1205             (start, region_len),
1206             (start.unchecked_add(region_len as u64), region_len),
1207         ])
1208         .unwrap();
1209 
1210         // Need to clone this and move it into the new thread we create.
1211         let mem2 = mem.clone();
1212         // Just some bytes.
1213         let some_bytes = [1u8, 2, 4, 16, 32, 64, 128, 255];
1214 
1215         let mut data = Data {
1216             val: T::from(0u8),
1217             some_bytes,
1218         };
1219 
1220         // Simple check that cross-region write/read is ok.
1221         mem.write_obj(data, data_start).unwrap();
1222         let read_data = mem.read_obj::<Data<T>>(data_start).unwrap();
1223         assert_eq!(read_data, data);
1224 
1225         let t = thread::spawn(move || {
1226             let mut count: u64 = 0;
1227 
1228             loop_timed(Duration::from_secs(3), || {
1229                 let data = mem2.read_obj::<Data<T>>(data_start).unwrap();
1230 
1231                 // Every time data is written to memory by the other thread, the value of
1232                 // data.val alternates between 0 and T::MAX, so the inner bytes should always
1233                 // have the same value. If they don't match, it means we read a partial value,
1234                 // so the access was not atomic.
1235                 let bytes = data.val.into().to_le_bytes();
1236                 for i in 1..mem::size_of::<T>() {
1237                     if bytes[0] != bytes[i] {
1238                         panic!(
1239                             "val bytes don't match {:?} after {} iterations",
1240                             &bytes[..mem::size_of::<T>()],
1241                             count
1242                         );
1243                     }
1244                 }
1245                 count += 1;
1246             });
1247         });
1248 
1249         // Write the object while flipping the bits of data.val over and over again.
1250         loop_timed(Duration::from_secs(3), || {
1251             mem.write_obj(data, data_start).unwrap();
1252             data.val = !data.val;
1253         });
1254 
1255         t.join().unwrap()
1256     }
1257 
1258     #[cfg(feature = "backend-mmap")]
1259     #[test]
1260     #[cfg(not(miri))]
test_non_atomic_access()1261     fn test_non_atomic_access() {
1262         non_atomic_access_helper::<u16>()
1263     }
1264 
1265     #[cfg(feature = "backend-mmap")]
1266     #[test]
test_zero_length_accesses()1267     fn test_zero_length_accesses() {
1268         #[derive(Default, Clone, Copy)]
1269         #[repr(C)]
1270         struct ZeroSizedStruct {
1271             dummy: [u32; 0],
1272         }
1273 
1274         unsafe impl ByteValued for ZeroSizedStruct {}
1275 
1276         let addr = GuestAddress(0x1000);
1277         let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1278         let obj = ZeroSizedStruct::default();
1279         let mut image = make_image(0x80);
1280 
1281         assert_eq!(mem.write(&[], addr).unwrap(), 0);
1282         assert_eq!(mem.read(&mut [], addr).unwrap(), 0);
1283 
1284         assert!(mem.write_slice(&[], addr).is_ok());
1285         assert!(mem.read_slice(&mut [], addr).is_ok());
1286 
1287         assert!(mem.write_obj(obj, addr).is_ok());
1288         assert!(mem.read_obj::<ZeroSizedStruct>(addr).is_ok());
1289 
1290         assert_eq!(
1291             mem.read_volatile_from(addr, &mut image.as_slice(), 0)
1292                 .unwrap(),
1293             0
1294         );
1295 
1296         assert!(mem
1297             .read_exact_volatile_from(addr, &mut image.as_slice(), 0)
1298             .is_ok());
1299 
1300         assert_eq!(
1301             mem.write_volatile_to(addr, &mut image.as_mut_slice(), 0)
1302                 .unwrap(),
1303             0
1304         );
1305 
1306         assert!(mem
1307             .write_all_volatile_to(addr, &mut image.as_mut_slice(), 0)
1308             .is_ok());
1309     }
1310 
1311     #[cfg(feature = "backend-mmap")]
1312     #[test]
test_atomic_accesses()1313     fn test_atomic_accesses() {
1314         let addr = GuestAddress(0x1000);
1315         let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1316         let bad_addr = addr.unchecked_add(0x1000);
1317 
1318         crate::bytes::tests::check_atomic_accesses(mem, addr, bad_addr);
1319     }
1320 
1321     #[cfg(feature = "backend-mmap")]
1322     #[cfg(target_os = "linux")]
1323     #[test]
test_guest_memory_mmap_is_hugetlbfs()1324     fn test_guest_memory_mmap_is_hugetlbfs() {
1325         let addr = GuestAddress(0x1000);
1326         let mem = GuestMemoryMmap::from_ranges(&[(addr, 0x1000)]).unwrap();
1327         let r = mem.find_region(addr).unwrap();
1328         assert_eq!(r.is_hugetlbfs(), None);
1329     }
1330 }
1331