• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Track memory regions that are mapped to the guest VM.
6 
7 use std::convert::{AsRef, TryFrom};
8 use std::fs::File;
9 use std::marker::{Send, Sync};
10 use std::mem::size_of;
11 use std::result;
12 use std::sync::Arc;
13 
14 use base::{pagesize, Error as SysError};
15 use base::{
16     AsRawDescriptor, AsRawDescriptors, MappedRegion, MemfdSeals, MemoryMapping,
17     MemoryMappingBuilder, MemoryMappingUnix, MmapError, RawDescriptor, SharedMemory,
18     SharedMemoryUnix,
19 };
20 use bitflags::bitflags;
21 use cros_async::{mem, BackingMemory};
22 use data_model::volatile_memory::*;
23 use data_model::DataInit;
24 use remain::sorted;
25 use thiserror::Error;
26 
27 use crate::guest_address::GuestAddress;
28 
29 #[sorted]
30 #[derive(Error, Debug)]
31 pub enum Error {
32     #[error("invalid guest address {0}")]
33     InvalidGuestAddress(GuestAddress),
34     #[error("invalid offset {0}")]
35     InvalidOffset(u64),
36     #[error("size {0} must not be zero")]
37     InvalidSize(usize),
38     #[error("invalid guest memory access at addr={0}: {1}")]
39     MemoryAccess(GuestAddress, MmapError),
40     #[error("failed to set seals on shm region: {0}")]
41     MemoryAddSealsFailed(SysError),
42     #[error("failed to create shm region")]
43     MemoryCreationFailed(SysError),
44     #[error("failed to map guest memory: {0}")]
45     MemoryMappingFailed(MmapError),
46     #[error("shm regions must be page aligned")]
47     MemoryNotAligned,
48     #[error("memory regions overlap")]
49     MemoryRegionOverlap,
50     #[error("memory region size {0} is too large")]
51     MemoryRegionTooLarge(u128),
52     #[error("incomplete read of {completed} instead of {expected} bytes")]
53     ShortRead { expected: usize, completed: usize },
54     #[error("incomplete write of {completed} instead of {expected} bytes")]
55     ShortWrite { expected: usize, completed: usize },
56     #[error("DescriptorChain split is out of bounds: {0}")]
57     SplitOutOfBounds(usize),
58     #[error("{0}")]
59     VolatileMemoryAccess(VolatileMemoryError),
60 }
61 
62 pub type Result<T> = result::Result<T, Error>;
63 
64 bitflags! {
65     pub struct MemoryPolicy: u32 {
66         const USE_HUGEPAGES = 1;
67     }
68 }
69 
70 /// A file-like object backing `MemoryRegion`.
71 #[derive(Clone)]
72 pub enum BackingObject {
73     Shm(Arc<SharedMemory>),
74     File(Arc<File>),
75 }
76 
77 impl AsRawDescriptor for BackingObject {
as_raw_descriptor(&self) -> RawDescriptor78     fn as_raw_descriptor(&self) -> RawDescriptor {
79         match self {
80             Self::Shm(shm) => shm.as_raw_descriptor(),
81             Self::File(f) => f.as_raw_descriptor(),
82         }
83     }
84 }
85 
86 impl AsRef<dyn AsRawDescriptor + Sync + Send> for BackingObject {
as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static)87     fn as_ref(&self) -> &(dyn AsRawDescriptor + Sync + Send + 'static) {
88         match self {
89             BackingObject::Shm(shm) => shm.as_ref(),
90             BackingObject::File(f) => f.as_ref(),
91         }
92     }
93 }
94 
95 /// A regions of memory mapped memory.
96 /// Holds the memory mapping with its offset in guest memory.
97 /// Also holds the backing object for the mapping and the offset in that object of the mapping.
98 pub struct MemoryRegion {
99     mapping: MemoryMapping,
100     guest_base: GuestAddress,
101 
102     shared_obj: BackingObject,
103     obj_offset: u64,
104 }
105 
106 impl MemoryRegion {
107     /// Creates a new MemoryRegion using the given SharedMemory object to later be attached to a VM
108     /// at `guest_base` address in the guest.
new_from_shm( size: u64, guest_base: GuestAddress, offset: u64, shm: Arc<SharedMemory>, ) -> Result<Self>109     pub fn new_from_shm(
110         size: u64,
111         guest_base: GuestAddress,
112         offset: u64,
113         shm: Arc<SharedMemory>,
114     ) -> Result<Self> {
115         let mapping = MemoryMappingBuilder::new(size as usize)
116             .from_shared_memory(shm.as_ref())
117             .offset(offset)
118             .build()
119             .map_err(Error::MemoryMappingFailed)?;
120         Ok(MemoryRegion {
121             mapping,
122             guest_base,
123             shared_obj: BackingObject::Shm(shm),
124             obj_offset: offset,
125         })
126     }
127 
128     /// Creates a new MemoryRegion using the given file to get available later at `guest_base`
129     /// address in the guest.
new_from_file( size: u64, guest_base: GuestAddress, offset: u64, file: Arc<File>, ) -> Result<Self>130     pub fn new_from_file(
131         size: u64,
132         guest_base: GuestAddress,
133         offset: u64,
134         file: Arc<File>,
135     ) -> Result<Self> {
136         let mapping = MemoryMappingBuilder::new(size as usize)
137             .from_file(&file)
138             .offset(offset)
139             .build()
140             .map_err(Error::MemoryMappingFailed)?;
141         Ok(MemoryRegion {
142             mapping,
143             guest_base,
144             shared_obj: BackingObject::File(file),
145             obj_offset: offset,
146         })
147     }
148 
start(&self) -> GuestAddress149     fn start(&self) -> GuestAddress {
150         self.guest_base
151     }
152 
end(&self) -> GuestAddress153     fn end(&self) -> GuestAddress {
154         // unchecked_add is safe as the region bounds were checked when it was created.
155         self.guest_base.unchecked_add(self.mapping.size() as u64)
156     }
157 
contains(&self, addr: GuestAddress) -> bool158     fn contains(&self, addr: GuestAddress) -> bool {
159         addr >= self.guest_base && addr < self.end()
160     }
161 }
162 
163 /// Tracks memory regions and where they are mapped in the guest, along with shm
164 /// fds of the underlying memory regions.
165 #[derive(Clone)]
166 pub struct GuestMemory {
167     regions: Arc<[MemoryRegion]>,
168 }
169 
170 impl AsRawDescriptors for GuestMemory {
as_raw_descriptors(&self) -> Vec<RawDescriptor>171     fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
172         self.regions
173             .iter()
174             .map(|r| r.shared_obj.as_raw_descriptor())
175             .collect()
176     }
177 }
178 
179 impl GuestMemory {
180     /// Creates backing shm for GuestMemory regions
create_shm(ranges: &[(GuestAddress, u64)]) -> Result<SharedMemory>181     fn create_shm(ranges: &[(GuestAddress, u64)]) -> Result<SharedMemory> {
182         let mut aligned_size = 0;
183         let pg_size = pagesize();
184         for range in ranges {
185             if range.1 % pg_size as u64 != 0 {
186                 return Err(Error::MemoryNotAligned);
187             }
188 
189             aligned_size += range.1;
190         }
191 
192         let mut seals = MemfdSeals::new();
193 
194         seals.set_shrink_seal();
195         seals.set_grow_seal();
196         seals.set_seal_seal();
197 
198         let mut shm = SharedMemory::named("crosvm_guest", aligned_size)
199             .map_err(Error::MemoryCreationFailed)?;
200         shm.add_seals(seals).map_err(Error::MemoryAddSealsFailed)?;
201 
202         Ok(shm)
203     }
204 
205     /// Creates a container for guest memory regions.
206     /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory>207     pub fn new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory> {
208         // Create shm
209 
210         let shm = Arc::new(GuestMemory::create_shm(ranges)?);
211         // Create memory regions
212         let mut regions = Vec::<MemoryRegion>::new();
213         let mut offset = 0;
214 
215         for range in ranges {
216             if let Some(last) = regions.last() {
217                 if last
218                     .guest_base
219                     .checked_add(last.mapping.size() as u64)
220                     .map_or(true, |a| a > range.0)
221                 {
222                     return Err(Error::MemoryRegionOverlap);
223                 }
224             }
225 
226             let size = usize::try_from(range.1)
227                 .map_err(|_| Error::MemoryRegionTooLarge(range.1 as u128))?;
228             let mapping = MemoryMappingBuilder::new(size)
229                 .from_shared_memory(shm.as_ref())
230                 .offset(offset)
231                 .build()
232                 .map_err(Error::MemoryMappingFailed)?;
233             regions.push(MemoryRegion {
234                 mapping,
235                 guest_base: range.0,
236                 shared_obj: BackingObject::Shm(shm.clone()),
237                 obj_offset: offset,
238             });
239 
240             offset += size as u64;
241         }
242 
243         Ok(GuestMemory {
244             regions: Arc::from(regions),
245         })
246     }
247 
248     /// Creates a `GuestMemory` from a collection of MemoryRegions.
from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self>249     pub fn from_regions(mut regions: Vec<MemoryRegion>) -> Result<Self> {
250         // Sort the regions and ensure non overlap.
251         regions.sort_by(|a, b| a.guest_base.cmp(&b.guest_base));
252 
253         if regions.len() > 1 {
254             let mut prev_end = regions[0]
255                 .guest_base
256                 .checked_add(regions[0].mapping.size() as u64)
257                 .ok_or(Error::MemoryRegionOverlap)?;
258             for region in &regions[1..] {
259                 if prev_end > region.guest_base {
260                     return Err(Error::MemoryRegionOverlap);
261                 }
262                 prev_end = region
263                     .guest_base
264                     .checked_add(region.mapping.size() as u64)
265                     .ok_or(Error::MemoryRegionTooLarge(
266                         region.guest_base.0 as u128 + region.mapping.size() as u128,
267                     ))?;
268             }
269         }
270 
271         Ok(GuestMemory {
272             regions: Arc::from(regions),
273         })
274     }
275 
276     /// Returns the end address of memory.
277     ///
278     /// # Examples
279     ///
280     /// ```
281     /// # use base::MemoryMapping;
282     /// # use vm_memory::{GuestAddress, GuestMemory};
283     /// # fn test_end_addr() -> Result<(), ()> {
284     ///     let start_addr = GuestAddress(0x1000);
285     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
286     ///     assert_eq!(start_addr.checked_add(0x400), Some(gm.end_addr()));
287     ///     Ok(())
288     /// # }
289     /// ```
end_addr(&self) -> GuestAddress290     pub fn end_addr(&self) -> GuestAddress {
291         self.regions
292             .iter()
293             .max_by_key(|region| region.start())
294             .map_or(GuestAddress(0), MemoryRegion::end)
295     }
296 
297     /// Returns the total size of memory in bytes.
memory_size(&self) -> u64298     pub fn memory_size(&self) -> u64 {
299         self.regions
300             .iter()
301             .map(|region| region.mapping.size() as u64)
302             .sum()
303     }
304 
305     /// Returns true if the given address is within the memory range available to the guest.
address_in_range(&self, addr: GuestAddress) -> bool306     pub fn address_in_range(&self, addr: GuestAddress) -> bool {
307         self.regions.iter().any(|region| region.contains(addr))
308     }
309 
310     /// Returns true if the given range (start, end) is overlap with the memory range
311     /// available to the guest.
range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool312     pub fn range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool {
313         self.regions
314             .iter()
315             .any(|region| region.start() < end && start < region.end())
316     }
317 
318     /// Returns an address `addr + offset` if it's in range.
319     ///
320     /// This function doesn't care whether a region `[addr, addr + offset)` is in range or not. To
321     /// guarantee it's a valid range, use `is_valid_range()` instead.
checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress>322     pub fn checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress> {
323         addr.checked_add(offset).and_then(|a| {
324             if self.address_in_range(a) {
325                 Some(a)
326             } else {
327                 None
328             }
329         })
330     }
331 
332     /// Returns true if the given range `[start, start + length)` is a valid contiguous memory
333     /// range available to the guest and it's backed by a single underlying memory region.
is_valid_range(&self, start: GuestAddress, length: u64) -> bool334     pub fn is_valid_range(&self, start: GuestAddress, length: u64) -> bool {
335         if length == 0 {
336             return false;
337         }
338 
339         let end = if let Some(end) = start.checked_add(length - 1) {
340             end
341         } else {
342             return false;
343         };
344 
345         self.regions
346             .iter()
347             .any(|region| region.start() <= start && end < region.end())
348     }
349 
350     /// Returns the size of the memory region in bytes.
num_regions(&self) -> u64351     pub fn num_regions(&self) -> u64 {
352         self.regions.len() as u64
353     }
354 
355     /// Madvise away the address range in the host that is associated with the given guest range.
remove_range(&self, addr: GuestAddress, count: u64) -> Result<()>356     pub fn remove_range(&self, addr: GuestAddress, count: u64) -> Result<()> {
357         self.do_in_region(addr, move |mapping, offset, _| {
358             mapping
359                 .remove_range(offset, count as usize)
360                 .map_err(|e| Error::MemoryAccess(addr, e))
361         })
362     }
363 
364     /// Handles guest memory policy hints/advices.
set_memory_policy(&self, mem_policy: MemoryPolicy)365     pub fn set_memory_policy(&self, mem_policy: MemoryPolicy) {
366         if mem_policy.contains(MemoryPolicy::USE_HUGEPAGES) {
367             for (_, region) in self.regions.iter().enumerate() {
368                 let ret = region.mapping.use_hugepages();
369 
370                 if let Err(err) = ret {
371                     println!("Failed to enable HUGEPAGE for mapping {}", err);
372                 }
373             }
374         }
375     }
376 
377     /// Perform the specified action on each region's addresses.
378     ///
379     /// Callback is called with arguments:
380     ///  * index: usize
381     ///  * guest_addr : GuestAddress
382     ///  * size: usize
383     ///  * host_addr: usize
384     ///  * shm: Descriptor of the backing memory region
385     ///  * shm_offset: usize
with_regions<F, E>(&self, mut cb: F) -> result::Result<(), E> where F: FnMut(usize, GuestAddress, usize, usize, &BackingObject, u64) -> result::Result<(), E>,386     pub fn with_regions<F, E>(&self, mut cb: F) -> result::Result<(), E>
387     where
388         F: FnMut(usize, GuestAddress, usize, usize, &BackingObject, u64) -> result::Result<(), E>,
389     {
390         for (index, region) in self.regions.iter().enumerate() {
391             cb(
392                 index,
393                 region.start(),
394                 region.mapping.size(),
395                 region.mapping.as_ptr() as usize,
396                 &region.shared_obj,
397                 region.obj_offset,
398             )?;
399         }
400         Ok(())
401     }
402 
403     /// Writes a slice to guest memory at the specified guest address.
404     /// Returns the number of bytes written.  The number of bytes written can
405     /// be less than the length of the slice if there isn't enough room in the
406     /// memory region.
407     ///
408     /// # Examples
409     /// * Write a slice at guestaddress 0x200.
410     ///
411     /// ```
412     /// # use base::MemoryMapping;
413     /// # use vm_memory::{GuestAddress, GuestMemory};
414     /// # fn test_write_u64() -> Result<(), ()> {
415     /// #   let start_addr = GuestAddress(0x1000);
416     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
417     ///     let res = gm.write_at_addr(&[1,2,3,4,5], GuestAddress(0x200)).map_err(|_| ())?;
418     ///     assert_eq!(5, res);
419     ///     Ok(())
420     /// # }
421     /// ```
write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize>422     pub fn write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize> {
423         self.do_in_region(guest_addr, move |mapping, offset, _| {
424             mapping
425                 .write_slice(buf, offset)
426                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
427         })
428     }
429 
430     /// Writes the entire contents of a slice to guest memory at the specified
431     /// guest address.
432     ///
433     /// Returns an error if there isn't enough room in the memory region to
434     /// complete the entire write. Part of the data may have been written
435     /// nevertheless.
436     ///
437     /// # Examples
438     ///
439     /// ```
440     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
441     ///
442     /// fn test_write_all() -> guest_memory::Result<()> {
443     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
444     ///     let gm = GuestMemory::new(ranges)?;
445     ///     gm.write_all_at_addr(b"zyxwvut", GuestAddress(0x1200))
446     /// }
447     /// ```
write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()>448     pub fn write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()> {
449         let expected = buf.len();
450         let completed = self.write_at_addr(buf, guest_addr)?;
451         if expected == completed {
452             Ok(())
453         } else {
454             Err(Error::ShortWrite {
455                 expected,
456                 completed,
457             })
458         }
459     }
460 
461     /// Reads to a slice from guest memory at the specified guest address.
462     /// Returns the number of bytes read.  The number of bytes read can
463     /// be less than the length of the slice if there isn't enough room in the
464     /// memory region.
465     ///
466     /// # Examples
467     /// * Read a slice of length 16 at guestaddress 0x200.
468     ///
469     /// ```
470     /// # use base::MemoryMapping;
471     /// # use vm_memory::{GuestAddress, GuestMemory};
472     /// # fn test_write_u64() -> Result<(), ()> {
473     /// #   let start_addr = GuestAddress(0x1000);
474     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
475     ///     let buf = &mut [0u8; 16];
476     ///     let res = gm.read_at_addr(buf, GuestAddress(0x200)).map_err(|_| ())?;
477     ///     assert_eq!(16, res);
478     ///     Ok(())
479     /// # }
480     /// ```
read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize>481     pub fn read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize> {
482         self.do_in_region(guest_addr, move |mapping, offset, _| {
483             mapping
484                 .read_slice(buf, offset)
485                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
486         })
487     }
488 
489     /// Reads from guest memory at the specified address to fill the entire
490     /// buffer.
491     ///
492     /// Returns an error if there isn't enough room in the memory region to fill
493     /// the entire buffer. Part of the buffer may have been filled nevertheless.
494     ///
495     /// # Examples
496     ///
497     /// ```
498     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
499     ///
500     /// fn test_read_exact() -> guest_memory::Result<()> {
501     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
502     ///     let gm = GuestMemory::new(ranges)?;
503     ///     let mut buffer = [0u8; 0x200];
504     ///     gm.read_exact_at_addr(&mut buffer, GuestAddress(0x1200))
505     /// }
506     /// ```
read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()>507     pub fn read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()> {
508         let expected = buf.len();
509         let completed = self.read_at_addr(buf, guest_addr)?;
510         if expected == completed {
511             Ok(())
512         } else {
513             Err(Error::ShortRead {
514                 expected,
515                 completed,
516             })
517         }
518     }
519 
520     /// Reads an object from guest memory at the given guest address.
521     /// Reading from a volatile area isn't strictly safe as it could change
522     /// mid-read.  However, as long as the type T is plain old data and can
523     /// handle random initialization, everything will be OK.
524     ///
525     /// # Examples
526     /// * Read a u64 from two areas of guest memory backed by separate mappings.
527     ///
528     /// ```
529     /// # use base::MemoryMapping;
530     /// # use vm_memory::{GuestAddress, GuestMemory};
531     /// # fn test_read_u64() -> Result<u64, ()> {
532     /// #     let start_addr1 = GuestAddress(0x0);
533     /// #     let start_addr2 = GuestAddress(0x400);
534     /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
535     /// #         .map_err(|_| ())?;
536     ///       let num1: u64 = gm.read_obj_from_addr(GuestAddress(32)).map_err(|_| ())?;
537     ///       let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x400+32)).map_err(|_| ())?;
538     /// #     Ok(num1 + num2)
539     /// # }
540     /// ```
read_obj_from_addr<T: DataInit>(&self, guest_addr: GuestAddress) -> Result<T>541     pub fn read_obj_from_addr<T: DataInit>(&self, guest_addr: GuestAddress) -> Result<T> {
542         self.do_in_region(guest_addr, |mapping, offset, _| {
543             mapping
544                 .read_obj(offset)
545                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
546         })
547     }
548 
549     /// Writes an object to the memory region at the specified guest address.
550     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
551     ///
552     /// # Examples
553     /// * Write a u64 at guest address 0x1100.
554     ///
555     /// ```
556     /// # use base::MemoryMapping;
557     /// # use vm_memory::{GuestAddress, GuestMemory};
558     /// # fn test_write_u64() -> Result<(), ()> {
559     /// #   let start_addr = GuestAddress(0x1000);
560     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
561     ///     gm.write_obj_at_addr(55u64, GuestAddress(0x1100))
562     ///         .map_err(|_| ())
563     /// # }
564     /// ```
write_obj_at_addr<T: DataInit>(&self, val: T, guest_addr: GuestAddress) -> Result<()>565     pub fn write_obj_at_addr<T: DataInit>(&self, val: T, guest_addr: GuestAddress) -> Result<()> {
566         self.do_in_region(guest_addr, move |mapping, offset, _| {
567             mapping
568                 .write_obj(val, offset)
569                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
570         })
571     }
572 
573     /// Returns a `VolatileSlice` of `len` bytes starting at `addr`. Returns an error if the slice
574     /// is not a subset of this `GuestMemory`.
575     ///
576     /// # Examples
577     /// * Write `99` to 30 bytes starting at guest address 0x1010.
578     ///
579     /// ```
580     /// # use base::MemoryMapping;
581     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
582     /// # fn test_volatile_slice() -> Result<(), GuestMemoryError> {
583     /// #   let start_addr = GuestAddress(0x1000);
584     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)])?;
585     ///     let vslice = gm.get_slice_at_addr(GuestAddress(0x1010), 30)?;
586     ///     vslice.write_bytes(99);
587     /// #   Ok(())
588     /// # }
589     /// ```
get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice>590     pub fn get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice> {
591         self.regions
592             .iter()
593             .find(|region| region.contains(addr))
594             .ok_or(Error::InvalidGuestAddress(addr))
595             .and_then(|region| {
596                 // The cast to a usize is safe here because we know that `region.contains(addr)` and
597                 // it's not possible for a memory region to be larger than what fits in a usize.
598                 region
599                     .mapping
600                     .get_slice(addr.offset_from(region.start()) as usize, len)
601                     .map_err(Error::VolatileMemoryAccess)
602             })
603     }
604 
605     /// Returns a `VolatileRef` to an object at `addr`. Returns Ok(()) if the object fits, or Err if
606     /// it extends past the end.
607     ///
608     /// # Examples
609     /// * Get a &u64 at offset 0x1010.
610     ///
611     /// ```
612     /// # use base::MemoryMapping;
613     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
614     /// # fn test_ref_u64() -> Result<(), GuestMemoryError> {
615     /// #   let start_addr = GuestAddress(0x1000);
616     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)])?;
617     ///     gm.write_obj_at_addr(47u64, GuestAddress(0x1010))?;
618     ///     let vref = gm.get_ref_at_addr::<u64>(GuestAddress(0x1010))?;
619     ///     assert_eq!(vref.load(), 47u64);
620     /// #   Ok(())
621     /// # }
622     /// ```
get_ref_at_addr<T: DataInit>(&self, addr: GuestAddress) -> Result<VolatileRef<T>>623     pub fn get_ref_at_addr<T: DataInit>(&self, addr: GuestAddress) -> Result<VolatileRef<T>> {
624         let buf = self.get_slice_at_addr(addr, size_of::<T>())?;
625         // Safe because we have know that `buf` is at least `size_of::<T>()` bytes and that the
626         // returned reference will not outlive this `GuestMemory`.
627         Ok(unsafe { VolatileRef::new(buf.as_mut_ptr() as *mut T) })
628     }
629 
630     /// Reads data from a file descriptor and writes it to guest memory.
631     ///
632     /// # Arguments
633     /// * `guest_addr` - Begin writing memory at this offset.
634     /// * `src` - Read from `src` to memory.
635     /// * `count` - Read `count` bytes from `src` to memory.
636     ///
637     /// # Examples
638     ///
639     /// * Read bytes from /dev/urandom
640     ///
641     /// ```
642     /// # use base::MemoryMapping;
643     /// # use vm_memory::{GuestAddress, GuestMemory};
644     /// # use std::fs::File;
645     /// # use std::path::Path;
646     /// # fn test_read_random() -> Result<u32, ()> {
647     /// #     let start_addr = GuestAddress(0x1000);
648     /// #     let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
649     ///       let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
650     ///       let addr = GuestAddress(0x1010);
651     ///       gm.read_to_memory(addr, &mut file, 128).map_err(|_| ())?;
652     ///       let read_addr = addr.checked_add(8).ok_or(())?;
653     ///       let rand_val: u32 = gm.read_obj_from_addr(read_addr).map_err(|_| ())?;
654     /// #     Ok(rand_val)
655     /// # }
656     /// ```
read_to_memory( &self, guest_addr: GuestAddress, src: &dyn AsRawDescriptor, count: usize, ) -> Result<()>657     pub fn read_to_memory(
658         &self,
659         guest_addr: GuestAddress,
660         src: &dyn AsRawDescriptor,
661         count: usize,
662     ) -> Result<()> {
663         self.do_in_region(guest_addr, move |mapping, offset, _| {
664             mapping
665                 .read_to_memory(offset, src, count)
666                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
667         })
668     }
669 
670     /// Writes data from memory to a file descriptor.
671     ///
672     /// # Arguments
673     /// * `guest_addr` - Begin reading memory from this offset.
674     /// * `dst` - Write from memory to `dst`.
675     /// * `count` - Read `count` bytes from memory to `src`.
676     ///
677     /// # Examples
678     ///
679     /// * Write 128 bytes to /dev/null
680     ///
681     /// ```
682     /// # use base::MemoryMapping;
683     /// # use vm_memory::{GuestAddress, GuestMemory};
684     /// # use std::fs::File;
685     /// # use std::path::Path;
686     /// # fn test_write_null() -> Result<(), ()> {
687     /// #     let start_addr = GuestAddress(0x1000);
688     /// #     let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
689     ///       let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
690     ///       let addr = GuestAddress(0x1010);
691     ///       gm.write_from_memory(addr, &mut file, 128).map_err(|_| ())?;
692     /// #     Ok(())
693     /// # }
694     /// ```
write_from_memory( &self, guest_addr: GuestAddress, dst: &dyn AsRawDescriptor, count: usize, ) -> Result<()>695     pub fn write_from_memory(
696         &self,
697         guest_addr: GuestAddress,
698         dst: &dyn AsRawDescriptor,
699         count: usize,
700     ) -> Result<()> {
701         self.do_in_region(guest_addr, move |mapping, offset, _| {
702             mapping
703                 .write_from_memory(offset, dst, count)
704                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
705         })
706     }
707 
708     /// Convert a GuestAddress into a pointer in the address space of this
709     /// process. This should only be necessary for giving addresses to the
710     /// kernel, as with vhost ioctls. Normal reads/writes to guest memory should
711     /// be done through `write_from_memory`, `read_obj_from_addr`, etc.
712     ///
713     /// # Arguments
714     /// * `guest_addr` - Guest address to convert.
715     ///
716     /// # Examples
717     ///
718     /// ```
719     /// # use vm_memory::{GuestAddress, GuestMemory};
720     /// # fn test_host_addr() -> Result<(), ()> {
721     ///     let start_addr = GuestAddress(0x1000);
722     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
723     ///     let addr = gm.get_host_address(GuestAddress(0x1200)).unwrap();
724     ///     println!("Host address is {:p}", addr);
725     ///     Ok(())
726     /// # }
727     /// ```
get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8>728     pub fn get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8> {
729         self.do_in_region(guest_addr, |mapping, offset, _| {
730             // This is safe; `do_in_region` already checks that offset is in
731             // bounds.
732             Ok(unsafe { mapping.as_ptr().add(offset) } as *const u8)
733         })
734     }
735 
736     /// Convert a GuestAddress into a pointer in the address space of this
737     /// process, and verify that the provided size define a valid range within
738     /// a single memory region. Similar to get_host_address(), this should only
739     /// be used for giving addresses to the kernel.
740     ///
741     /// # Arguments
742     /// * `guest_addr` - Guest address to convert.
743     /// * `size` - Size of the address range to be converted.
744     ///
745     /// # Examples
746     ///
747     /// ```
748     /// # use vm_memory::{GuestAddress, GuestMemory};
749     /// # fn test_host_addr() -> Result<(), ()> {
750     ///     let start_addr = GuestAddress(0x1000);
751     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
752     ///     let addr = gm.get_host_address_range(GuestAddress(0x1200), 0x200).unwrap();
753     ///     println!("Host address is {:p}", addr);
754     ///     Ok(())
755     /// # }
756     /// ```
get_host_address_range( &self, guest_addr: GuestAddress, size: usize, ) -> Result<*const u8>757     pub fn get_host_address_range(
758         &self,
759         guest_addr: GuestAddress,
760         size: usize,
761     ) -> Result<*const u8> {
762         if size == 0 {
763             return Err(Error::InvalidSize(size));
764         }
765 
766         // Assume no overlap among regions
767         self.do_in_region(guest_addr, |mapping, offset, _| {
768             if mapping
769                 .size()
770                 .checked_sub(offset)
771                 .map_or(true, |v| v < size)
772             {
773                 return Err(Error::InvalidGuestAddress(guest_addr));
774             }
775 
776             // This is safe; `do_in_region` already checks that offset is in
777             // bounds.
778             Ok(unsafe { mapping.as_ptr().add(offset) } as *const u8)
779         })
780     }
781 
782     /// Returns a reference to the region that backs the given address.
shm_region( &self, guest_addr: GuestAddress, ) -> Result<&(dyn AsRawDescriptor + Send + Sync)>783     pub fn shm_region(
784         &self,
785         guest_addr: GuestAddress,
786     ) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
787         self.regions
788             .iter()
789             .find(|region| region.contains(guest_addr))
790             .ok_or(Error::InvalidGuestAddress(guest_addr))
791             .map(|region| region.shared_obj.as_ref())
792     }
793 
794     /// Returns the region that contains the memory at `offset` from the base of guest memory.
offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)>795     pub fn offset_region(&self, offset: u64) -> Result<&(dyn AsRawDescriptor + Send + Sync)> {
796         self.shm_region(
797             self.checked_offset(self.regions[0].guest_base, offset)
798                 .ok_or(Error::InvalidOffset(offset))?,
799         )
800     }
801 
802     /// Loops over all guest memory regions of `self`, and performs the callback function `F` in
803     /// the target region that contains `guest_addr`.  The callback function `F` takes in:
804     ///
805     /// (i) the memory mapping associated with the target region.
806     /// (ii) the relative offset from the start of the target region to `guest_addr`.
807     /// (iii) the absolute offset from the start of the memory mapping to the target region.
808     ///
809     /// If no target region is found, an error is returned.  The callback function `F` may return
810     /// an Ok(`T`) on success or a `GuestMemoryError` on failure.
do_in_region<F, T>(&self, guest_addr: GuestAddress, cb: F) -> Result<T> where F: FnOnce(&MemoryMapping, usize, u64) -> Result<T>,811     pub fn do_in_region<F, T>(&self, guest_addr: GuestAddress, cb: F) -> Result<T>
812     where
813         F: FnOnce(&MemoryMapping, usize, u64) -> Result<T>,
814     {
815         self.regions
816             .iter()
817             .find(|region| region.contains(guest_addr))
818             .ok_or(Error::InvalidGuestAddress(guest_addr))
819             .and_then(|region| {
820                 cb(
821                     &region.mapping,
822                     guest_addr.offset_from(region.start()) as usize,
823                     region.obj_offset,
824                 )
825             })
826     }
827 
828     /// Convert a GuestAddress into an offset within the associated shm region.
829     ///
830     /// Due to potential gaps within GuestMemory, it is helpful to know the
831     /// offset within the shm where a given address is found. This offset
832     /// can then be passed to another process mapping the shm to read data
833     /// starting at that address.
834     ///
835     /// # Arguments
836     /// * `guest_addr` - Guest address to convert.
837     ///
838     /// # Examples
839     ///
840     /// ```
841     /// # use vm_memory::{GuestAddress, GuestMemory};
842     /// let addr_a = GuestAddress(0x1000);
843     /// let addr_b = GuestAddress(0x8000);
844     /// let mut gm = GuestMemory::new(&vec![
845     ///     (addr_a, 0x2000),
846     ///     (addr_b, 0x3000)]).expect("failed to create GuestMemory");
847     /// let offset = gm.offset_from_base(GuestAddress(0x9500))
848     ///                .expect("failed to get offset");
849     /// assert_eq!(offset, 0x3500);
850     /// ```
offset_from_base(&self, guest_addr: GuestAddress) -> Result<u64>851     pub fn offset_from_base(&self, guest_addr: GuestAddress) -> Result<u64> {
852         self.regions
853             .iter()
854             .find(|region| region.contains(guest_addr))
855             .ok_or(Error::InvalidGuestAddress(guest_addr))
856             .map(|region| region.obj_offset + guest_addr.offset_from(region.start()))
857     }
858 }
859 
860 // It is safe to implement BackingMemory because GuestMemory can be mutated any time already.
861 unsafe impl BackingMemory for GuestMemory {
get_volatile_slice( &self, mem_range: cros_async::MemRegion, ) -> mem::Result<VolatileSlice<'_>>862     fn get_volatile_slice(
863         &self,
864         mem_range: cros_async::MemRegion,
865     ) -> mem::Result<VolatileSlice<'_>> {
866         self.get_slice_at_addr(GuestAddress(mem_range.offset as u64), mem_range.len)
867             .map_err(|_| mem::Error::InvalidOffset(mem_range.offset, mem_range.len))
868     }
869 }
870 
871 #[cfg(test)]
872 mod tests {
873     use super::*;
874     use base::kernel_has_memfd;
875 
876     #[test]
test_alignment()877     fn test_alignment() {
878         let start_addr1 = GuestAddress(0x0);
879         let start_addr2 = GuestAddress(0x1000);
880 
881         assert!(GuestMemory::new(&[(start_addr1, 0x100), (start_addr2, 0x400)]).is_err());
882         assert!(GuestMemory::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).is_ok());
883     }
884 
885     #[test]
two_regions()886     fn two_regions() {
887         let start_addr1 = GuestAddress(0x0);
888         let start_addr2 = GuestAddress(0x4000);
889         assert!(GuestMemory::new(&[(start_addr1, 0x4000), (start_addr2, 0x4000)]).is_ok());
890     }
891 
892     #[test]
overlap_memory()893     fn overlap_memory() {
894         let start_addr1 = GuestAddress(0x0);
895         let start_addr2 = GuestAddress(0x1000);
896         assert!(GuestMemory::new(&[(start_addr1, 0x2000), (start_addr2, 0x2000)]).is_err());
897     }
898 
899     #[test]
region_hole()900     fn region_hole() {
901         let start_addr1 = GuestAddress(0x0);
902         let start_addr2 = GuestAddress(0x4000);
903         let gm = GuestMemory::new(&[(start_addr1, 0x2000), (start_addr2, 0x2000)]).unwrap();
904         assert!(gm.address_in_range(GuestAddress(0x1000)));
905         assert!(!gm.address_in_range(GuestAddress(0x3000)));
906         assert!(gm.address_in_range(GuestAddress(0x5000)));
907         assert!(!gm.address_in_range(GuestAddress(0x6000)));
908         assert!(!gm.address_in_range(GuestAddress(0x6000)));
909         assert!(gm.range_overlap(GuestAddress(0x1000), GuestAddress(0x3000)));
910         assert!(!gm.range_overlap(GuestAddress(0x3000), GuestAddress(0x4000)));
911         assert!(gm.range_overlap(GuestAddress(0x3000), GuestAddress(0x7000)));
912         assert!(gm.checked_offset(GuestAddress(0x1000), 0x1000).is_none());
913         assert!(gm.checked_offset(GuestAddress(0x5000), 0x800).is_some());
914         assert!(gm.checked_offset(GuestAddress(0x5000), 0x1000).is_none());
915     }
916 
917     #[test]
test_read_u64()918     fn test_read_u64() {
919         let start_addr1 = GuestAddress(0x0);
920         let start_addr2 = GuestAddress(0x1000);
921         let gm = GuestMemory::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
922 
923         let val1: u64 = 0xaa55aa55aa55aa55;
924         let val2: u64 = 0x55aa55aa55aa55aa;
925         gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
926         gm.write_obj_at_addr(val2, GuestAddress(0x1000 + 32))
927             .unwrap();
928         let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
929         let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x1000 + 32)).unwrap();
930         assert_eq!(val1, num1);
931         assert_eq!(val2, num2);
932     }
933 
934     #[test]
test_ref_load_u64()935     fn test_ref_load_u64() {
936         let start_addr1 = GuestAddress(0x0);
937         let start_addr2 = GuestAddress(0x1000);
938         let gm = GuestMemory::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
939 
940         let val1: u64 = 0xaa55aa55aa55aa55;
941         let val2: u64 = 0x55aa55aa55aa55aa;
942         gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
943         gm.write_obj_at_addr(val2, GuestAddress(0x1000 + 32))
944             .unwrap();
945         let num1: u64 = gm.get_ref_at_addr(GuestAddress(0x500)).unwrap().load();
946         let num2: u64 = gm
947             .get_ref_at_addr(GuestAddress(0x1000 + 32))
948             .unwrap()
949             .load();
950         assert_eq!(val1, num1);
951         assert_eq!(val2, num2);
952     }
953 
954     #[test]
test_ref_store_u64()955     fn test_ref_store_u64() {
956         let start_addr1 = GuestAddress(0x0);
957         let start_addr2 = GuestAddress(0x1000);
958         let gm = GuestMemory::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
959 
960         let val1: u64 = 0xaa55aa55aa55aa55;
961         let val2: u64 = 0x55aa55aa55aa55aa;
962         gm.get_ref_at_addr(GuestAddress(0x500)).unwrap().store(val1);
963         gm.get_ref_at_addr(GuestAddress(0x1000 + 32))
964             .unwrap()
965             .store(val2);
966         let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
967         let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x1000 + 32)).unwrap();
968         assert_eq!(val1, num1);
969         assert_eq!(val2, num2);
970     }
971 
972     #[test]
test_memory_size()973     fn test_memory_size() {
974         let start_region1 = GuestAddress(0x0);
975         let size_region1 = 0x1000;
976         let start_region2 = GuestAddress(0x10000);
977         let size_region2 = 0x2000;
978         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
979             .unwrap();
980 
981         let mem_size = gm.memory_size();
982         assert_eq!(mem_size, size_region1 + size_region2);
983     }
984 
985     // Get the base address of the mapping for a GuestAddress.
get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8>986     fn get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8> {
987         mem.do_in_region(addr, |mapping, _, _| Ok(mapping.as_ptr() as *const u8))
988     }
989 
990     #[test]
guest_to_host()991     fn guest_to_host() {
992         let start_addr1 = GuestAddress(0x0);
993         let start_addr2 = GuestAddress(0x1000);
994         let mem = GuestMemory::new(&[(start_addr1, 0x1000), (start_addr2, 0x4000)]).unwrap();
995 
996         // Verify the host addresses match what we expect from the mappings.
997         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
998         let addr2_base = get_mapping(&mem, start_addr2).unwrap();
999         let host_addr1 = mem.get_host_address(start_addr1).unwrap();
1000         let host_addr2 = mem.get_host_address(start_addr2).unwrap();
1001         assert_eq!(host_addr1, addr1_base);
1002         assert_eq!(host_addr2, addr2_base);
1003 
1004         // Check that a bad address returns an error.
1005         let bad_addr = GuestAddress(0x123456);
1006         assert!(mem.get_host_address(bad_addr).is_err());
1007     }
1008 
1009     #[test]
guest_to_host_range()1010     fn guest_to_host_range() {
1011         let start_addr1 = GuestAddress(0x0);
1012         let start_addr2 = GuestAddress(0x1000);
1013         let mem = GuestMemory::new(&[(start_addr1, 0x1000), (start_addr2, 0x4000)]).unwrap();
1014 
1015         // Verify the host addresses match what we expect from the mappings.
1016         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
1017         let addr2_base = get_mapping(&mem, start_addr2).unwrap();
1018         let host_addr1 = mem.get_host_address_range(start_addr1, 0x1000).unwrap();
1019         let host_addr2 = mem.get_host_address_range(start_addr2, 0x1000).unwrap();
1020         assert_eq!(host_addr1, addr1_base);
1021         assert_eq!(host_addr2, addr2_base);
1022 
1023         let host_addr3 = mem.get_host_address_range(start_addr2, 0x2000).unwrap();
1024         assert_eq!(host_addr3, addr2_base);
1025 
1026         // Check that a valid guest address with an invalid size returns an error.
1027         assert!(mem.get_host_address_range(start_addr1, 0x2000).is_err());
1028 
1029         // Check that a bad address returns an error.
1030         let bad_addr = GuestAddress(0x123456);
1031         assert!(mem.get_host_address_range(bad_addr, 0x1000).is_err());
1032     }
1033 
1034     #[test]
shm_offset()1035     fn shm_offset() {
1036         if !kernel_has_memfd() {
1037             return;
1038         }
1039 
1040         let start_region1 = GuestAddress(0x0);
1041         let size_region1 = 0x1000;
1042         let start_region2 = GuestAddress(0x10000);
1043         let size_region2 = 0x2000;
1044         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
1045             .unwrap();
1046 
1047         gm.write_obj_at_addr(0x1337u16, GuestAddress(0x0)).unwrap();
1048         gm.write_obj_at_addr(0x0420u16, GuestAddress(0x10000))
1049             .unwrap();
1050 
1051         let _ = gm.with_regions::<_, ()>(|index, _, size, _, obj, offset| {
1052             let shm = match obj {
1053                 BackingObject::Shm(s) => s,
1054                 _ => {
1055                     panic!("backing object isn't SharedMemory");
1056                 }
1057             };
1058             let mmap = MemoryMappingBuilder::new(size)
1059                 .from_shared_memory(shm)
1060                 .offset(offset)
1061                 .build()
1062                 .unwrap();
1063 
1064             if index == 0 {
1065                 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x1337u16);
1066             }
1067 
1068             if index == 1 {
1069                 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x0420u16);
1070             }
1071 
1072             Ok(())
1073         });
1074     }
1075 }
1076