• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Track memory regions that are mapped to the guest VM.
6 
7 use std::convert::AsRef;
8 use std::convert::TryFrom;
9 use std::fmt::{self, Display};
10 use std::mem::size_of;
11 use std::result;
12 use std::sync::Arc;
13 
14 use crate::guest_address::GuestAddress;
15 use base::{pagesize, Error as SysError};
16 use base::{
17     AsRawDescriptor, AsRawDescriptors, MappedRegion, MemfdSeals, MemoryMapping,
18     MemoryMappingBuilder, MemoryMappingUnix, MmapError, RawDescriptor, SharedMemory,
19     SharedMemoryUnix,
20 };
21 use cros_async::{mem, BackingMemory};
22 use data_model::volatile_memory::*;
23 use data_model::DataInit;
24 
25 use bitflags::bitflags;
26 
27 #[derive(Debug)]
28 pub enum Error {
29     DescriptorChainOverflow,
30     InvalidGuestAddress(GuestAddress),
31     InvalidOffset(u64),
32     MemoryAccess(GuestAddress, MmapError),
33     MemoryMappingFailed(MmapError),
34     MemoryRegionOverlap,
35     MemoryRegionTooLarge(u64),
36     MemoryNotAligned,
37     MemoryCreationFailed(SysError),
38     MemoryAddSealsFailed(SysError),
39     ShortWrite { expected: usize, completed: usize },
40     ShortRead { expected: usize, completed: usize },
41     SplitOutOfBounds(usize),
42     VolatileMemoryAccess(VolatileMemoryError),
43 }
44 pub type Result<T> = result::Result<T, Error>;
45 
46 impl std::error::Error for Error {}
47 
48 impl Display for Error {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result49     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
50         use self::Error::*;
51 
52         match self {
53             DescriptorChainOverflow => write!(
54                 f,
55                 "the combined length of all the buffers in a DescriptorChain is too large"
56             ),
57             InvalidGuestAddress(addr) => write!(f, "invalid guest address {}", addr),
58             InvalidOffset(addr) => write!(f, "invalid offset {}", addr),
59             MemoryAccess(addr, e) => {
60                 write!(f, "invalid guest memory access at addr={}: {}", addr, e)
61             }
62             MemoryMappingFailed(e) => write!(f, "failed to map guest memory: {}", e),
63             MemoryRegionOverlap => write!(f, "memory regions overlap"),
64             MemoryRegionTooLarge(size) => write!(f, "memory region size {} is too large", size),
65             MemoryNotAligned => write!(f, "shm regions must be page aligned"),
66             MemoryCreationFailed(_) => write!(f, "failed to create shm region"),
67             MemoryAddSealsFailed(e) => write!(f, "failed to set seals on shm region: {}", e),
68             ShortWrite {
69                 expected,
70                 completed,
71             } => write!(
72                 f,
73                 "incomplete write of {} instead of {} bytes",
74                 completed, expected,
75             ),
76             ShortRead {
77                 expected,
78                 completed,
79             } => write!(
80                 f,
81                 "incomplete read of {} instead of {} bytes",
82                 completed, expected,
83             ),
84             SplitOutOfBounds(off) => write!(f, "DescriptorChain split is out of bounds: {}", off),
85             VolatileMemoryAccess(e) => e.fmt(f),
86         }
87     }
88 }
89 
90 bitflags! {
91     pub struct MemoryPolicy: u32 {
92         const USE_HUGEPAGES = 1;
93     }
94 }
95 
96 struct MemoryRegion {
97     mapping: MemoryMapping,
98     guest_base: GuestAddress,
99     shm_offset: u64,
100     shm: Arc<SharedMemory>,
101 }
102 
103 impl MemoryRegion {
start(&self) -> GuestAddress104     fn start(&self) -> GuestAddress {
105         self.guest_base
106     }
107 
end(&self) -> GuestAddress108     fn end(&self) -> GuestAddress {
109         // unchecked_add is safe as the region bounds were checked when it was created.
110         self.guest_base.unchecked_add(self.mapping.size() as u64)
111     }
112 
contains(&self, addr: GuestAddress) -> bool113     fn contains(&self, addr: GuestAddress) -> bool {
114         addr >= self.guest_base && addr < self.end()
115     }
116 }
117 
118 /// Tracks a memory region and where it is mapped in the guest, along with a shm
119 /// fd of the underlying memory regions.
120 #[derive(Clone)]
121 pub struct GuestMemory {
122     regions: Arc<[MemoryRegion]>,
123 }
124 
125 impl AsRawDescriptors for GuestMemory {
as_raw_descriptors(&self) -> Vec<RawDescriptor>126     fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
127         self.regions
128             .iter()
129             .map(|r| r.shm.as_raw_descriptor())
130             .collect()
131     }
132 }
133 
134 impl GuestMemory {
135     /// Creates backing shm for GuestMemory regions
create_shm(ranges: &[(GuestAddress, u64)]) -> Result<SharedMemory>136     fn create_shm(ranges: &[(GuestAddress, u64)]) -> Result<SharedMemory> {
137         let mut aligned_size = 0;
138         let pg_size = pagesize();
139         for range in ranges {
140             if range.1 % pg_size as u64 != 0 {
141                 return Err(Error::MemoryNotAligned);
142             }
143 
144             aligned_size += range.1;
145         }
146 
147         let mut seals = MemfdSeals::new();
148 
149         seals.set_shrink_seal();
150         seals.set_grow_seal();
151         seals.set_seal_seal();
152 
153         let mut shm = SharedMemory::named("crosvm_guest", aligned_size)
154             .map_err(Error::MemoryCreationFailed)?;
155         shm.add_seals(seals).map_err(Error::MemoryAddSealsFailed)?;
156 
157         Ok(shm)
158     }
159 
160     /// Creates a container for guest memory regions.
161     /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory>162     pub fn new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory> {
163         // Create shm
164 
165         let shm = Arc::new(GuestMemory::create_shm(ranges)?);
166         // Create memory regions
167         let mut regions = Vec::<MemoryRegion>::new();
168         let mut offset = 0;
169 
170         for range in ranges {
171             if let Some(last) = regions.last() {
172                 if last
173                     .guest_base
174                     .checked_add(last.mapping.size() as u64)
175                     .map_or(true, |a| a > range.0)
176                 {
177                     return Err(Error::MemoryRegionOverlap);
178                 }
179             }
180 
181             let size =
182                 usize::try_from(range.1).map_err(|_| Error::MemoryRegionTooLarge(range.1))?;
183             let mapping = MemoryMappingBuilder::new(size)
184                 .from_shared_memory(shm.as_ref())
185                 .offset(offset)
186                 .build()
187                 .map_err(Error::MemoryMappingFailed)?;
188             regions.push(MemoryRegion {
189                 mapping,
190                 guest_base: range.0,
191                 shm_offset: offset,
192                 shm: Arc::clone(&shm),
193             });
194 
195             offset += size as u64;
196         }
197 
198         Ok(GuestMemory {
199             regions: Arc::from(regions),
200         })
201     }
202 
203     /// Returns the end address of memory.
204     ///
205     /// # Examples
206     ///
207     /// ```
208     /// # use base::MemoryMapping;
209     /// # use vm_memory::{GuestAddress, GuestMemory};
210     /// # fn test_end_addr() -> Result<(), ()> {
211     ///     let start_addr = GuestAddress(0x1000);
212     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
213     ///     assert_eq!(start_addr.checked_add(0x400), Some(gm.end_addr()));
214     ///     Ok(())
215     /// # }
216     /// ```
end_addr(&self) -> GuestAddress217     pub fn end_addr(&self) -> GuestAddress {
218         self.regions
219             .iter()
220             .max_by_key(|region| region.start())
221             .map_or(GuestAddress(0), MemoryRegion::end)
222     }
223 
224     /// Returns the total size of memory in bytes.
memory_size(&self) -> u64225     pub fn memory_size(&self) -> u64 {
226         self.regions
227             .iter()
228             .map(|region| region.mapping.size() as u64)
229             .sum()
230     }
231 
232     /// Returns true if the given address is within the memory range available to the guest.
address_in_range(&self, addr: GuestAddress) -> bool233     pub fn address_in_range(&self, addr: GuestAddress) -> bool {
234         self.regions.iter().any(|region| region.contains(addr))
235     }
236 
237     /// Returns true if the given range (start, end) is overlap with the memory range
238     /// available to the guest.
range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool239     pub fn range_overlap(&self, start: GuestAddress, end: GuestAddress) -> bool {
240         self.regions
241             .iter()
242             .any(|region| region.start() < end && start < region.end())
243     }
244 
245     /// Returns the address plus the offset if it is in range.
checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress>246     pub fn checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress> {
247         addr.checked_add(offset).and_then(|a| {
248             if self.address_in_range(a) {
249                 Some(a)
250             } else {
251                 None
252             }
253         })
254     }
255 
256     /// Returns the size of the memory region in bytes.
num_regions(&self) -> u64257     pub fn num_regions(&self) -> u64 {
258         self.regions.len() as u64
259     }
260 
261     /// Madvise away the address range in the host that is associated with the given guest range.
remove_range(&self, addr: GuestAddress, count: u64) -> Result<()>262     pub fn remove_range(&self, addr: GuestAddress, count: u64) -> Result<()> {
263         self.do_in_region(addr, move |mapping, offset, _| {
264             mapping
265                 .remove_range(offset, count as usize)
266                 .map_err(|e| Error::MemoryAccess(addr, e))
267         })
268     }
269 
270     /// Handles guest memory policy hints/advices.
set_memory_policy(&self, mem_policy: MemoryPolicy)271     pub fn set_memory_policy(&self, mem_policy: MemoryPolicy) {
272         if mem_policy.contains(MemoryPolicy::USE_HUGEPAGES) {
273             for (_, region) in self.regions.iter().enumerate() {
274                 let ret = region.mapping.use_hugepages();
275 
276                 match ret {
277                     Err(err) => println!("Failed to enable HUGEPAGE for mapping {}", err),
278                     Ok(_) => (),
279                 }
280             }
281         }
282     }
283 
284     /// Perform the specified action on each region's addresses.
285     ///
286     /// Callback is called with arguments:
287     ///  * index: usize
288     ///  * guest_addr : GuestAddress
289     ///  * size: usize
290     ///  * host_addr: usize
291     ///  * shm: SharedMemory backing for the given region
292     ///  * shm_offset: usize
with_regions<F, E>(&self, mut cb: F) -> result::Result<(), E> where F: FnMut(usize, GuestAddress, usize, usize, &SharedMemory, u64) -> result::Result<(), E>,293     pub fn with_regions<F, E>(&self, mut cb: F) -> result::Result<(), E>
294     where
295         F: FnMut(usize, GuestAddress, usize, usize, &SharedMemory, u64) -> result::Result<(), E>,
296     {
297         for (index, region) in self.regions.iter().enumerate() {
298             cb(
299                 index,
300                 region.start(),
301                 region.mapping.size(),
302                 region.mapping.as_ptr() as usize,
303                 region.shm.as_ref(),
304                 region.shm_offset,
305             )?;
306         }
307         Ok(())
308     }
309 
310     /// Writes a slice to guest memory at the specified guest address.
311     /// Returns the number of bytes written.  The number of bytes written can
312     /// be less than the length of the slice if there isn't enough room in the
313     /// memory region.
314     ///
315     /// # Examples
316     /// * Write a slice at guestaddress 0x200.
317     ///
318     /// ```
319     /// # use base::MemoryMapping;
320     /// # use vm_memory::{GuestAddress, GuestMemory};
321     /// # fn test_write_u64() -> Result<(), ()> {
322     /// #   let start_addr = GuestAddress(0x1000);
323     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
324     ///     let res = gm.write_at_addr(&[1,2,3,4,5], GuestAddress(0x200)).map_err(|_| ())?;
325     ///     assert_eq!(5, res);
326     ///     Ok(())
327     /// # }
328     /// ```
write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize>329     pub fn write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize> {
330         self.do_in_region(guest_addr, move |mapping, offset, _| {
331             mapping
332                 .write_slice(buf, offset)
333                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
334         })
335     }
336 
337     /// Writes the entire contents of a slice to guest memory at the specified
338     /// guest address.
339     ///
340     /// Returns an error if there isn't enough room in the memory region to
341     /// complete the entire write. Part of the data may have been written
342     /// nevertheless.
343     ///
344     /// # Examples
345     ///
346     /// ```
347     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
348     ///
349     /// fn test_write_all() -> guest_memory::Result<()> {
350     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
351     ///     let gm = GuestMemory::new(ranges)?;
352     ///     gm.write_all_at_addr(b"zyxwvut", GuestAddress(0x1200))
353     /// }
354     /// ```
write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()>355     pub fn write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()> {
356         let expected = buf.len();
357         let completed = self.write_at_addr(buf, guest_addr)?;
358         if expected == completed {
359             Ok(())
360         } else {
361             Err(Error::ShortWrite {
362                 expected,
363                 completed,
364             })
365         }
366     }
367 
368     /// Reads to a slice from guest memory at the specified guest address.
369     /// Returns the number of bytes read.  The number of bytes read can
370     /// be less than the length of the slice if there isn't enough room in the
371     /// memory region.
372     ///
373     /// # Examples
374     /// * Read a slice of length 16 at guestaddress 0x200.
375     ///
376     /// ```
377     /// # use base::MemoryMapping;
378     /// # use vm_memory::{GuestAddress, GuestMemory};
379     /// # fn test_write_u64() -> Result<(), ()> {
380     /// #   let start_addr = GuestAddress(0x1000);
381     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
382     ///     let buf = &mut [0u8; 16];
383     ///     let res = gm.read_at_addr(buf, GuestAddress(0x200)).map_err(|_| ())?;
384     ///     assert_eq!(16, res);
385     ///     Ok(())
386     /// # }
387     /// ```
read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize>388     pub fn read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize> {
389         self.do_in_region(guest_addr, move |mapping, offset, _| {
390             mapping
391                 .read_slice(buf, offset)
392                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
393         })
394     }
395 
396     /// Reads from guest memory at the specified address to fill the entire
397     /// buffer.
398     ///
399     /// Returns an error if there isn't enough room in the memory region to fill
400     /// the entire buffer. Part of the buffer may have been filled nevertheless.
401     ///
402     /// # Examples
403     ///
404     /// ```
405     /// use vm_memory::{guest_memory, GuestAddress, GuestMemory};
406     ///
407     /// fn test_read_exact() -> guest_memory::Result<()> {
408     ///     let ranges = &[(GuestAddress(0x1000), 0x400)];
409     ///     let gm = GuestMemory::new(ranges)?;
410     ///     let mut buffer = [0u8; 0x200];
411     ///     gm.read_exact_at_addr(&mut buffer, GuestAddress(0x1200))
412     /// }
413     /// ```
read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()>414     pub fn read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()> {
415         let expected = buf.len();
416         let completed = self.read_at_addr(buf, guest_addr)?;
417         if expected == completed {
418             Ok(())
419         } else {
420             Err(Error::ShortRead {
421                 expected,
422                 completed,
423             })
424         }
425     }
426 
427     /// Reads an object from guest memory at the given guest address.
428     /// Reading from a volatile area isn't strictly safe as it could change
429     /// mid-read.  However, as long as the type T is plain old data and can
430     /// handle random initialization, everything will be OK.
431     ///
432     /// # Examples
433     /// * Read a u64 from two areas of guest memory backed by separate mappings.
434     ///
435     /// ```
436     /// # use base::MemoryMapping;
437     /// # use vm_memory::{GuestAddress, GuestMemory};
438     /// # fn test_read_u64() -> Result<u64, ()> {
439     /// #     let start_addr1 = GuestAddress(0x0);
440     /// #     let start_addr2 = GuestAddress(0x400);
441     /// #     let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
442     /// #         .map_err(|_| ())?;
443     ///       let num1: u64 = gm.read_obj_from_addr(GuestAddress(32)).map_err(|_| ())?;
444     ///       let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x400+32)).map_err(|_| ())?;
445     /// #     Ok(num1 + num2)
446     /// # }
447     /// ```
read_obj_from_addr<T: DataInit>(&self, guest_addr: GuestAddress) -> Result<T>448     pub fn read_obj_from_addr<T: DataInit>(&self, guest_addr: GuestAddress) -> Result<T> {
449         self.do_in_region(guest_addr, |mapping, offset, _| {
450             mapping
451                 .read_obj(offset)
452                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
453         })
454     }
455 
456     /// Writes an object to the memory region at the specified guest address.
457     /// Returns Ok(()) if the object fits, or Err if it extends past the end.
458     ///
459     /// # Examples
460     /// * Write a u64 at guest address 0x1100.
461     ///
462     /// ```
463     /// # use base::MemoryMapping;
464     /// # use vm_memory::{GuestAddress, GuestMemory};
465     /// # fn test_write_u64() -> Result<(), ()> {
466     /// #   let start_addr = GuestAddress(0x1000);
467     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
468     ///     gm.write_obj_at_addr(55u64, GuestAddress(0x1100))
469     ///         .map_err(|_| ())
470     /// # }
471     /// ```
write_obj_at_addr<T: DataInit>(&self, val: T, guest_addr: GuestAddress) -> Result<()>472     pub fn write_obj_at_addr<T: DataInit>(&self, val: T, guest_addr: GuestAddress) -> Result<()> {
473         self.do_in_region(guest_addr, move |mapping, offset, _| {
474             mapping
475                 .write_obj(val, offset)
476                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
477         })
478     }
479 
480     /// Returns a `VolatileSlice` of `len` bytes starting at `addr`. Returns an error if the slice
481     /// is not a subset of this `GuestMemory`.
482     ///
483     /// # Examples
484     /// * Write `99` to 30 bytes starting at guest address 0x1010.
485     ///
486     /// ```
487     /// # use base::MemoryMapping;
488     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
489     /// # fn test_volatile_slice() -> Result<(), GuestMemoryError> {
490     /// #   let start_addr = GuestAddress(0x1000);
491     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)])?;
492     ///     let vslice = gm.get_slice_at_addr(GuestAddress(0x1010), 30)?;
493     ///     vslice.write_bytes(99);
494     /// #   Ok(())
495     /// # }
496     /// ```
get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice>497     pub fn get_slice_at_addr(&self, addr: GuestAddress, len: usize) -> Result<VolatileSlice> {
498         self.regions
499             .iter()
500             .find(|region| region.contains(addr))
501             .ok_or(Error::InvalidGuestAddress(addr))
502             .and_then(|region| {
503                 // The cast to a usize is safe here because we know that `region.contains(addr)` and
504                 // it's not possible for a memory region to be larger than what fits in a usize.
505                 region
506                     .mapping
507                     .get_slice(addr.offset_from(region.start()) as usize, len)
508                     .map_err(Error::VolatileMemoryAccess)
509             })
510     }
511 
512     /// Returns a `VolatileRef` to an object at `addr`. Returns Ok(()) if the object fits, or Err if
513     /// it extends past the end.
514     ///
515     /// # Examples
516     /// * Get a &u64 at offset 0x1010.
517     ///
518     /// ```
519     /// # use base::MemoryMapping;
520     /// # use vm_memory::{GuestAddress, GuestMemory, GuestMemoryError};
521     /// # fn test_ref_u64() -> Result<(), GuestMemoryError> {
522     /// #   let start_addr = GuestAddress(0x1000);
523     /// #   let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)])?;
524     ///     gm.write_obj_at_addr(47u64, GuestAddress(0x1010))?;
525     ///     let vref = gm.get_ref_at_addr::<u64>(GuestAddress(0x1010))?;
526     ///     assert_eq!(vref.load(), 47u64);
527     /// #   Ok(())
528     /// # }
529     /// ```
get_ref_at_addr<T: DataInit>(&self, addr: GuestAddress) -> Result<VolatileRef<T>>530     pub fn get_ref_at_addr<T: DataInit>(&self, addr: GuestAddress) -> Result<VolatileRef<T>> {
531         let buf = self.get_slice_at_addr(addr, size_of::<T>())?;
532         // Safe because we have know that `buf` is at least `size_of::<T>()` bytes and that the
533         // returned reference will not outlive this `GuestMemory`.
534         Ok(unsafe { VolatileRef::new(buf.as_mut_ptr() as *mut T) })
535     }
536 
537     /// Reads data from a file descriptor and writes it to guest memory.
538     ///
539     /// # Arguments
540     /// * `guest_addr` - Begin writing memory at this offset.
541     /// * `src` - Read from `src` to memory.
542     /// * `count` - Read `count` bytes from `src` to memory.
543     ///
544     /// # Examples
545     ///
546     /// * Read bytes from /dev/urandom
547     ///
548     /// ```
549     /// # use base::MemoryMapping;
550     /// # use vm_memory::{GuestAddress, GuestMemory};
551     /// # use std::fs::File;
552     /// # use std::path::Path;
553     /// # fn test_read_random() -> Result<u32, ()> {
554     /// #     let start_addr = GuestAddress(0x1000);
555     /// #     let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
556     ///       let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
557     ///       let addr = GuestAddress(0x1010);
558     ///       gm.read_to_memory(addr, &mut file, 128).map_err(|_| ())?;
559     ///       let read_addr = addr.checked_add(8).ok_or(())?;
560     ///       let rand_val: u32 = gm.read_obj_from_addr(read_addr).map_err(|_| ())?;
561     /// #     Ok(rand_val)
562     /// # }
563     /// ```
read_to_memory( &self, guest_addr: GuestAddress, src: &dyn AsRawDescriptor, count: usize, ) -> Result<()>564     pub fn read_to_memory(
565         &self,
566         guest_addr: GuestAddress,
567         src: &dyn AsRawDescriptor,
568         count: usize,
569     ) -> Result<()> {
570         self.do_in_region(guest_addr, move |mapping, offset, _| {
571             mapping
572                 .read_to_memory(offset, src, count)
573                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
574         })
575     }
576 
577     /// Writes data from memory to a file descriptor.
578     ///
579     /// # Arguments
580     /// * `guest_addr` - Begin reading memory from this offset.
581     /// * `dst` - Write from memory to `dst`.
582     /// * `count` - Read `count` bytes from memory to `src`.
583     ///
584     /// # Examples
585     ///
586     /// * Write 128 bytes to /dev/null
587     ///
588     /// ```
589     /// # use base::MemoryMapping;
590     /// # use vm_memory::{GuestAddress, GuestMemory};
591     /// # use std::fs::File;
592     /// # use std::path::Path;
593     /// # fn test_write_null() -> Result<(), ()> {
594     /// #     let start_addr = GuestAddress(0x1000);
595     /// #     let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
596     ///       let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
597     ///       let addr = GuestAddress(0x1010);
598     ///       gm.write_from_memory(addr, &mut file, 128).map_err(|_| ())?;
599     /// #     Ok(())
600     /// # }
601     /// ```
write_from_memory( &self, guest_addr: GuestAddress, dst: &dyn AsRawDescriptor, count: usize, ) -> Result<()>602     pub fn write_from_memory(
603         &self,
604         guest_addr: GuestAddress,
605         dst: &dyn AsRawDescriptor,
606         count: usize,
607     ) -> Result<()> {
608         self.do_in_region(guest_addr, move |mapping, offset, _| {
609             mapping
610                 .write_from_memory(offset, dst, count)
611                 .map_err(|e| Error::MemoryAccess(guest_addr, e))
612         })
613     }
614 
615     /// Convert a GuestAddress into a pointer in the address space of this
616     /// process. This should only be necessary for giving addresses to the
617     /// kernel, as with vhost ioctls. Normal reads/writes to guest memory should
618     /// be done through `write_from_memory`, `read_obj_from_addr`, etc.
619     ///
620     /// # Arguments
621     /// * `guest_addr` - Guest address to convert.
622     ///
623     /// # Examples
624     ///
625     /// ```
626     /// # use vm_memory::{GuestAddress, GuestMemory};
627     /// # fn test_host_addr() -> Result<(), ()> {
628     ///     let start_addr = GuestAddress(0x1000);
629     ///     let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
630     ///     let addr = gm.get_host_address(GuestAddress(0x1200)).unwrap();
631     ///     println!("Host address is {:p}", addr);
632     ///     Ok(())
633     /// # }
634     /// ```
get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8>635     pub fn get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8> {
636         self.do_in_region(guest_addr, |mapping, offset, _| {
637             // This is safe; `do_in_region` already checks that offset is in
638             // bounds.
639             Ok(unsafe { mapping.as_ptr().add(offset) } as *const u8)
640         })
641     }
642 
643     /// Returns a reference to the SharedMemory region that backs the given address.
shm_region(&self, guest_addr: GuestAddress) -> Result<&SharedMemory>644     pub fn shm_region(&self, guest_addr: GuestAddress) -> Result<&SharedMemory> {
645         self.regions
646             .iter()
647             .find(|region| region.contains(guest_addr))
648             .ok_or(Error::InvalidGuestAddress(guest_addr))
649             .map(|region| region.shm.as_ref())
650     }
651 
652     /// Returns the region that contains the memory at `offset` from the base of guest memory.
offset_region(&self, offset: u64) -> Result<&SharedMemory>653     pub fn offset_region(&self, offset: u64) -> Result<&SharedMemory> {
654         self.shm_region(
655             self.checked_offset(self.regions[0].guest_base, offset)
656                 .ok_or(Error::InvalidOffset(offset))?,
657         )
658     }
659 
660     /// Loops over all guest memory regions of `self`, and performs the callback function `F` in
661     /// the target region that contains `guest_addr`.  The callback function `F` takes in:
662     ///
663     /// (i) the memory mapping associated with the target region.
664     /// (ii) the relative offset from the start of the target region to `guest_addr`.
665     /// (iii) the absolute offset from the start of the memory mapping to the target region.
666     ///
667     /// If no target region is found, an error is returned.  The callback function `F` may return
668     /// an Ok(`T`) on success or a `GuestMemoryError` on failure.
do_in_region<F, T>(&self, guest_addr: GuestAddress, cb: F) -> Result<T> where F: FnOnce(&MemoryMapping, usize, u64) -> Result<T>,669     pub fn do_in_region<F, T>(&self, guest_addr: GuestAddress, cb: F) -> Result<T>
670     where
671         F: FnOnce(&MemoryMapping, usize, u64) -> Result<T>,
672     {
673         self.regions
674             .iter()
675             .find(|region| region.contains(guest_addr))
676             .ok_or(Error::InvalidGuestAddress(guest_addr))
677             .and_then(|region| {
678                 cb(
679                     &region.mapping,
680                     guest_addr.offset_from(region.start()) as usize,
681                     region.shm_offset,
682                 )
683             })
684     }
685 
686     /// Convert a GuestAddress into an offset within the associated shm region.
687     ///
688     /// Due to potential gaps within GuestMemory, it is helpful to know the
689     /// offset within the shm where a given address is found. This offset
690     /// can then be passed to another process mapping the shm to read data
691     /// starting at that address.
692     ///
693     /// # Arguments
694     /// * `guest_addr` - Guest address to convert.
695     ///
696     /// # Examples
697     ///
698     /// ```
699     /// # use vm_memory::{GuestAddress, GuestMemory};
700     /// let addr_a = GuestAddress(0x1000);
701     /// let addr_b = GuestAddress(0x8000);
702     /// let mut gm = GuestMemory::new(&vec![
703     ///     (addr_a, 0x2000),
704     ///     (addr_b, 0x3000)]).expect("failed to create GuestMemory");
705     /// let offset = gm.offset_from_base(GuestAddress(0x9500))
706     ///                .expect("failed to get offset");
707     /// assert_eq!(offset, 0x3500);
708     /// ```
offset_from_base(&self, guest_addr: GuestAddress) -> Result<u64>709     pub fn offset_from_base(&self, guest_addr: GuestAddress) -> Result<u64> {
710         self.regions
711             .iter()
712             .find(|region| region.contains(guest_addr))
713             .ok_or(Error::InvalidGuestAddress(guest_addr))
714             .map(|region| region.shm_offset + guest_addr.offset_from(region.start()))
715     }
716 }
717 
718 // It is safe to implement BackingMemory because GuestMemory can be mutated any time already.
719 unsafe impl BackingMemory for GuestMemory {
get_volatile_slice( &self, mem_range: cros_async::MemRegion, ) -> mem::Result<VolatileSlice<'_>>720     fn get_volatile_slice(
721         &self,
722         mem_range: cros_async::MemRegion,
723     ) -> mem::Result<VolatileSlice<'_>> {
724         self.get_slice_at_addr(GuestAddress(mem_range.offset as u64), mem_range.len)
725             .map_err(|_| mem::Error::InvalidOffset(mem_range.offset, mem_range.len))
726     }
727 }
728 
729 #[cfg(test)]
730 mod tests {
731     use super::*;
732     use base::kernel_has_memfd;
733 
734     #[test]
test_alignment()735     fn test_alignment() {
736         let start_addr1 = GuestAddress(0x0);
737         let start_addr2 = GuestAddress(0x1000);
738 
739         assert!(GuestMemory::new(&[(start_addr1, 0x100), (start_addr2, 0x400)]).is_err());
740         assert!(GuestMemory::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).is_ok());
741     }
742 
743     #[test]
two_regions()744     fn two_regions() {
745         let start_addr1 = GuestAddress(0x0);
746         let start_addr2 = GuestAddress(0x4000);
747         assert!(GuestMemory::new(&[(start_addr1, 0x4000), (start_addr2, 0x4000)]).is_ok());
748     }
749 
750     #[test]
overlap_memory()751     fn overlap_memory() {
752         let start_addr1 = GuestAddress(0x0);
753         let start_addr2 = GuestAddress(0x1000);
754         assert!(GuestMemory::new(&[(start_addr1, 0x2000), (start_addr2, 0x2000)]).is_err());
755     }
756 
757     #[test]
region_hole()758     fn region_hole() {
759         let start_addr1 = GuestAddress(0x0);
760         let start_addr2 = GuestAddress(0x4000);
761         let gm = GuestMemory::new(&[(start_addr1, 0x2000), (start_addr2, 0x2000)]).unwrap();
762         assert_eq!(gm.address_in_range(GuestAddress(0x1000)), true);
763         assert_eq!(gm.address_in_range(GuestAddress(0x3000)), false);
764         assert_eq!(gm.address_in_range(GuestAddress(0x5000)), true);
765         assert_eq!(gm.address_in_range(GuestAddress(0x6000)), false);
766         assert_eq!(gm.address_in_range(GuestAddress(0x6000)), false);
767         assert_eq!(
768             gm.range_overlap(GuestAddress(0x1000), GuestAddress(0x3000)),
769             true
770         );
771         assert_eq!(
772             gm.range_overlap(GuestAddress(0x3000), GuestAddress(0x4000)),
773             false
774         );
775         assert_eq!(
776             gm.range_overlap(GuestAddress(0x3000), GuestAddress(0x7000)),
777             true
778         );
779         assert!(gm.checked_offset(GuestAddress(0x1000), 0x1000).is_none());
780         assert!(gm.checked_offset(GuestAddress(0x5000), 0x800).is_some());
781         assert!(gm.checked_offset(GuestAddress(0x5000), 0x1000).is_none());
782     }
783 
784     #[test]
test_read_u64()785     fn test_read_u64() {
786         let start_addr1 = GuestAddress(0x0);
787         let start_addr2 = GuestAddress(0x1000);
788         let gm = GuestMemory::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
789 
790         let val1: u64 = 0xaa55aa55aa55aa55;
791         let val2: u64 = 0x55aa55aa55aa55aa;
792         gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
793         gm.write_obj_at_addr(val2, GuestAddress(0x1000 + 32))
794             .unwrap();
795         let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
796         let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x1000 + 32)).unwrap();
797         assert_eq!(val1, num1);
798         assert_eq!(val2, num2);
799     }
800 
801     #[test]
test_ref_load_u64()802     fn test_ref_load_u64() {
803         let start_addr1 = GuestAddress(0x0);
804         let start_addr2 = GuestAddress(0x1000);
805         let gm = GuestMemory::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
806 
807         let val1: u64 = 0xaa55aa55aa55aa55;
808         let val2: u64 = 0x55aa55aa55aa55aa;
809         gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
810         gm.write_obj_at_addr(val2, GuestAddress(0x1000 + 32))
811             .unwrap();
812         let num1: u64 = gm.get_ref_at_addr(GuestAddress(0x500)).unwrap().load();
813         let num2: u64 = gm
814             .get_ref_at_addr(GuestAddress(0x1000 + 32))
815             .unwrap()
816             .load();
817         assert_eq!(val1, num1);
818         assert_eq!(val2, num2);
819     }
820 
821     #[test]
test_ref_store_u64()822     fn test_ref_store_u64() {
823         let start_addr1 = GuestAddress(0x0);
824         let start_addr2 = GuestAddress(0x1000);
825         let gm = GuestMemory::new(&[(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
826 
827         let val1: u64 = 0xaa55aa55aa55aa55;
828         let val2: u64 = 0x55aa55aa55aa55aa;
829         gm.get_ref_at_addr(GuestAddress(0x500)).unwrap().store(val1);
830         gm.get_ref_at_addr(GuestAddress(0x1000 + 32))
831             .unwrap()
832             .store(val2);
833         let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
834         let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x1000 + 32)).unwrap();
835         assert_eq!(val1, num1);
836         assert_eq!(val2, num2);
837     }
838 
839     #[test]
test_memory_size()840     fn test_memory_size() {
841         let start_region1 = GuestAddress(0x0);
842         let size_region1 = 0x1000;
843         let start_region2 = GuestAddress(0x10000);
844         let size_region2 = 0x2000;
845         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
846             .unwrap();
847 
848         let mem_size = gm.memory_size();
849         assert_eq!(mem_size, size_region1 + size_region2);
850     }
851 
852     // Get the base address of the mapping for a GuestAddress.
get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8>853     fn get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8> {
854         mem.do_in_region(addr, |mapping, _, _| Ok(mapping.as_ptr() as *const u8))
855     }
856 
857     #[test]
guest_to_host()858     fn guest_to_host() {
859         let start_addr1 = GuestAddress(0x0);
860         let start_addr2 = GuestAddress(0x1000);
861         let mem = GuestMemory::new(&[(start_addr1, 0x1000), (start_addr2, 0x4000)]).unwrap();
862 
863         // Verify the host addresses match what we expect from the mappings.
864         let addr1_base = get_mapping(&mem, start_addr1).unwrap();
865         let addr2_base = get_mapping(&mem, start_addr2).unwrap();
866         let host_addr1 = mem.get_host_address(start_addr1).unwrap();
867         let host_addr2 = mem.get_host_address(start_addr2).unwrap();
868         assert_eq!(host_addr1, addr1_base);
869         assert_eq!(host_addr2, addr2_base);
870 
871         // Check that a bad address returns an error.
872         let bad_addr = GuestAddress(0x123456);
873         assert!(mem.get_host_address(bad_addr).is_err());
874     }
875 
876     #[test]
shm_offset()877     fn shm_offset() {
878         if !kernel_has_memfd() {
879             return;
880         }
881 
882         let start_region1 = GuestAddress(0x0);
883         let size_region1 = 0x1000;
884         let start_region2 = GuestAddress(0x10000);
885         let size_region2 = 0x2000;
886         let gm = GuestMemory::new(&[(start_region1, size_region1), (start_region2, size_region2)])
887             .unwrap();
888 
889         gm.write_obj_at_addr(0x1337u16, GuestAddress(0x0)).unwrap();
890         gm.write_obj_at_addr(0x0420u16, GuestAddress(0x10000))
891             .unwrap();
892 
893         let _ = gm.with_regions::<_, ()>(|index, _, size, _, shm, shm_offset| {
894             let mmap = MemoryMappingBuilder::new(size)
895                 .from_shared_memory(shm)
896                 .offset(shm_offset)
897                 .build()
898                 .unwrap();
899 
900             if index == 0 {
901                 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x1337u16);
902             }
903 
904             if index == 1 {
905                 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x0420u16);
906             }
907 
908             Ok(())
909         });
910     }
911 }
912