1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! Track memory regions that are mapped to the guest VM.
6
7 use std::ffi::CStr;
8 use std::fmt::{self, Display};
9 use std::os::unix::io::{AsRawFd, RawFd};
10 use std::result;
11 use std::sync::Arc;
12
13 use crate::guest_address::GuestAddress;
14 use crate::mmap::{self, MemoryMapping};
15 use crate::shm::{MemfdSeals, SharedMemory};
16 use crate::{errno, pagesize};
17 use data_model::volatile_memory::*;
18 use data_model::DataInit;
19
20 #[derive(Debug)]
21 pub enum Error {
22 InvalidGuestAddress(GuestAddress),
23 MemoryAccess(GuestAddress, mmap::Error),
24 MemoryMappingFailed(mmap::Error),
25 MemoryRegionOverlap,
26 MemoryNotAligned,
27 MemoryCreationFailed(errno::Error),
28 MemorySetSizeFailed(errno::Error),
29 MemoryAddSealsFailed(errno::Error),
30 ShortWrite { expected: usize, completed: usize },
31 ShortRead { expected: usize, completed: usize },
32 }
33 pub type Result<T> = result::Result<T, Error>;
34
35 impl std::error::Error for Error {}
36
37 impl Display for Error {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result38 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
39 use self::Error::*;
40
41 match self {
42 InvalidGuestAddress(addr) => write!(f, "invalid guest address {}", addr),
43 MemoryAccess(addr, e) => {
44 write!(f, "invalid guest memory access at addr={}: {}", addr, e)
45 }
46 MemoryMappingFailed(e) => write!(f, "failed to map guest memory: {}", e),
47 MemoryRegionOverlap => write!(f, "memory regions overlap"),
48 MemoryNotAligned => write!(f, "memfd regions must be page aligned"),
49 MemoryCreationFailed(_) => write!(f, "failed to create memfd region"),
50 MemorySetSizeFailed(e) => write!(f, "failed to set memfd region size: {}", e),
51 MemoryAddSealsFailed(e) => write!(f, "failed to set seals on memfd region: {}", e),
52 ShortWrite {
53 expected,
54 completed,
55 } => write!(
56 f,
57 "incomplete write of {} instead of {} bytes",
58 completed, expected,
59 ),
60 ShortRead {
61 expected,
62 completed,
63 } => write!(
64 f,
65 "incomplete read of {} instead of {} bytes",
66 completed, expected,
67 ),
68 }
69 }
70 }
71
72 struct MemoryRegion {
73 mapping: MemoryMapping,
74 guest_base: GuestAddress,
75 memfd_offset: usize,
76 }
77
region_end(region: &MemoryRegion) -> GuestAddress78 fn region_end(region: &MemoryRegion) -> GuestAddress {
79 // unchecked_add is safe as the region bounds were checked when it was created.
80 region
81 .guest_base
82 .unchecked_add(region.mapping.size() as u64)
83 }
84
85 /// Tracks a memory region and where it is mapped in the guest, along with a shm
86 /// fd of the underlying memory regions.
87 #[derive(Clone)]
88 pub struct GuestMemory {
89 regions: Arc<Vec<MemoryRegion>>,
90 memfd: Option<Arc<SharedMemory>>,
91 }
92
93 impl AsRawFd for GuestMemory {
as_raw_fd(&self) -> RawFd94 fn as_raw_fd(&self) -> RawFd {
95 match &self.memfd {
96 Some(memfd) => memfd.as_raw_fd(),
97 None => panic!("GuestMemory is not backed by a memfd"),
98 }
99 }
100 }
101
102 impl GuestMemory {
103 /// Creates backing memfd for GuestMemory regions
create_memfd(ranges: &[(GuestAddress, u64)]) -> Result<SharedMemory>104 fn create_memfd(ranges: &[(GuestAddress, u64)]) -> Result<SharedMemory> {
105 let mut aligned_size = 0;
106 let pg_size = pagesize();
107 for range in ranges {
108 if range.1 % pg_size as u64 != 0 {
109 return Err(Error::MemoryNotAligned);
110 }
111
112 aligned_size += range.1;
113 }
114
115 let mut seals = MemfdSeals::new();
116
117 seals.set_shrink_seal();
118 seals.set_grow_seal();
119 seals.set_seal_seal();
120
121 let mut memfd =
122 SharedMemory::new(Some(CStr::from_bytes_with_nul(b"crosvm_guest\0").unwrap()))
123 .map_err(Error::MemoryCreationFailed)?;
124 memfd
125 .set_size(aligned_size)
126 .map_err(Error::MemorySetSizeFailed)?;
127 memfd
128 .add_seals(seals)
129 .map_err(Error::MemoryAddSealsFailed)?;
130
131 Ok(memfd)
132 }
133
134 /// Creates a container for guest memory regions.
135 /// Valid memory regions are specified as a Vec of (Address, Size) tuples sorted by Address.
new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory>136 pub fn new(ranges: &[(GuestAddress, u64)]) -> Result<GuestMemory> {
137 // Create memfd
138
139 // TODO(prilik) remove optional memfd once parallel CQ lands (crbug.com/942183).
140 // Many classic CQ builders run old kernels without memfd support, resulting in test
141 // failures. It's less effort to introduce this temporary optional path than to
142 // manually mark all affected tests as ignore.
143 let memfd = match GuestMemory::create_memfd(ranges) {
144 Err(Error::MemoryCreationFailed { .. }) => {
145 warn!("GuestMemory is not backed by a memfd");
146 None
147 }
148 Err(e) => return Err(e),
149 Ok(memfd) => Some(memfd),
150 };
151
152 // Create memory regions
153 let mut regions = Vec::<MemoryRegion>::new();
154 let mut offset = 0;
155
156 for range in ranges {
157 if let Some(last) = regions.last() {
158 if last
159 .guest_base
160 .checked_add(last.mapping.size() as u64)
161 .map_or(true, |a| a > range.0)
162 {
163 return Err(Error::MemoryRegionOverlap);
164 }
165 }
166
167 let mapping = match &memfd {
168 Some(memfd) => MemoryMapping::from_fd_offset(memfd, range.1 as usize, offset),
169 None => MemoryMapping::new(range.1 as usize),
170 }
171 .map_err(Error::MemoryMappingFailed)?;
172
173 regions.push(MemoryRegion {
174 mapping,
175 guest_base: range.0,
176 memfd_offset: offset,
177 });
178
179 offset += range.1 as usize;
180 }
181
182 Ok(GuestMemory {
183 regions: Arc::new(regions),
184 memfd: match memfd {
185 Some(memfd) => Some(Arc::new(memfd)),
186 None => None,
187 },
188 })
189 }
190
191 /// Returns the end address of memory.
192 ///
193 /// # Examples
194 ///
195 /// ```
196 /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
197 /// # fn test_end_addr() -> Result<(), ()> {
198 /// let start_addr = GuestAddress(0x1000);
199 /// let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
200 /// assert_eq!(start_addr.checked_add(0x400), Some(gm.end_addr()));
201 /// Ok(())
202 /// # }
203 /// ```
end_addr(&self) -> GuestAddress204 pub fn end_addr(&self) -> GuestAddress {
205 self.regions
206 .iter()
207 .max_by_key(|region| region.guest_base)
208 .map_or(GuestAddress(0), |region| region_end(region))
209 }
210
211 /// Returns the total size of memory in bytes.
memory_size(&self) -> u64212 pub fn memory_size(&self) -> u64 {
213 self.regions
214 .iter()
215 .map(|region| region.mapping.size() as u64)
216 .sum()
217 }
218
219 /// Returns true if the given address is within the memory range available to the guest.
address_in_range(&self, addr: GuestAddress) -> bool220 pub fn address_in_range(&self, addr: GuestAddress) -> bool {
221 addr < self.end_addr()
222 }
223
224 /// Returns the address plus the offset if it is in range.
checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress>225 pub fn checked_offset(&self, addr: GuestAddress, offset: u64) -> Option<GuestAddress> {
226 addr.checked_add(offset)
227 .and_then(|a| if a < self.end_addr() { Some(a) } else { None })
228 }
229
230 /// Returns the size of the memory region in bytes.
num_regions(&self) -> u64231 pub fn num_regions(&self) -> u64 {
232 self.regions.len() as u64
233 }
234
235 /// Madvise away the address range in the host that is associated with the given guest range.
remove_range(&self, addr: GuestAddress, count: u64) -> Result<()>236 pub fn remove_range(&self, addr: GuestAddress, count: u64) -> Result<()> {
237 self.do_in_region(addr, move |mapping, offset| {
238 mapping
239 .remove_range(offset, count as usize)
240 .map_err(|e| Error::MemoryAccess(addr, e))
241 })
242 }
243
244 /// Perform the specified action on each region's addresses.
245 ///
246 /// Callback is called with arguments:
247 /// * index: usize
248 /// * guest_addr : GuestAddress
249 /// * size: usize
250 /// * host_addr: usize
251 /// * memfd_offset: usize
with_regions<F, E>(&self, mut cb: F) -> result::Result<(), E> where F: FnMut(usize, GuestAddress, usize, usize, usize) -> result::Result<(), E>,252 pub fn with_regions<F, E>(&self, mut cb: F) -> result::Result<(), E>
253 where
254 F: FnMut(usize, GuestAddress, usize, usize, usize) -> result::Result<(), E>,
255 {
256 for (index, region) in self.regions.iter().enumerate() {
257 cb(
258 index,
259 region.guest_base,
260 region.mapping.size(),
261 region.mapping.as_ptr() as usize,
262 region.memfd_offset,
263 )?;
264 }
265 Ok(())
266 }
267
268 /// Writes a slice to guest memory at the specified guest address.
269 /// Returns the number of bytes written. The number of bytes written can
270 /// be less than the length of the slice if there isn't enough room in the
271 /// memory region.
272 ///
273 /// # Examples
274 /// * Write a slice at guestaddress 0x200.
275 ///
276 /// ```
277 /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
278 /// # fn test_write_u64() -> Result<(), ()> {
279 /// # let start_addr = GuestAddress(0x1000);
280 /// # let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
281 /// let res = gm.write_at_addr(&[1,2,3,4,5], GuestAddress(0x200)).map_err(|_| ())?;
282 /// assert_eq!(5, res);
283 /// Ok(())
284 /// # }
285 /// ```
write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize>286 pub fn write_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<usize> {
287 self.do_in_region(guest_addr, move |mapping, offset| {
288 mapping
289 .write_slice(buf, offset)
290 .map_err(|e| Error::MemoryAccess(guest_addr, e))
291 })
292 }
293
294 /// Writes the entire contents of a slice to guest memory at the specified
295 /// guest address.
296 ///
297 /// Returns an error if there isn't enough room in the memory region to
298 /// complete the entire write. Part of the data may have been written
299 /// nevertheless.
300 ///
301 /// # Examples
302 ///
303 /// ```
304 /// use sys_util::{guest_memory, GuestAddress, GuestMemory};
305 ///
306 /// fn test_write_all() -> guest_memory::Result<()> {
307 /// let ranges = &[(GuestAddress(0x1000), 0x400)];
308 /// let gm = GuestMemory::new(ranges)?;
309 /// gm.write_all_at_addr(b"zyxwvut", GuestAddress(0x1200))
310 /// }
311 /// ```
write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()>312 pub fn write_all_at_addr(&self, buf: &[u8], guest_addr: GuestAddress) -> Result<()> {
313 let expected = buf.len();
314 let completed = self.write_at_addr(buf, guest_addr)?;
315 if expected == completed {
316 Ok(())
317 } else {
318 Err(Error::ShortWrite {
319 expected,
320 completed,
321 })
322 }
323 }
324
325 /// Reads to a slice from guest memory at the specified guest address.
326 /// Returns the number of bytes read. The number of bytes read can
327 /// be less than the length of the slice if there isn't enough room in the
328 /// memory region.
329 ///
330 /// # Examples
331 /// * Read a slice of length 16 at guestaddress 0x200.
332 ///
333 /// ```
334 /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
335 /// # fn test_write_u64() -> Result<(), ()> {
336 /// # let start_addr = GuestAddress(0x1000);
337 /// # let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
338 /// let buf = &mut [0u8; 16];
339 /// let res = gm.read_at_addr(buf, GuestAddress(0x200)).map_err(|_| ())?;
340 /// assert_eq!(16, res);
341 /// Ok(())
342 /// # }
343 /// ```
read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize>344 pub fn read_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<usize> {
345 self.do_in_region(guest_addr, move |mapping, offset| {
346 mapping
347 .read_slice(buf, offset)
348 .map_err(|e| Error::MemoryAccess(guest_addr, e))
349 })
350 }
351
352 /// Reads from guest memory at the specified address to fill the entire
353 /// buffer.
354 ///
355 /// Returns an error if there isn't enough room in the memory region to fill
356 /// the entire buffer. Part of the buffer may have been filled nevertheless.
357 ///
358 /// # Examples
359 ///
360 /// ```
361 /// use sys_util::{guest_memory, GuestAddress, GuestMemory, MemoryMapping};
362 ///
363 /// fn test_read_exact() -> guest_memory::Result<()> {
364 /// let ranges = &[(GuestAddress(0x1000), 0x400)];
365 /// let gm = GuestMemory::new(ranges)?;
366 /// let mut buffer = [0u8; 0x200];
367 /// gm.read_exact_at_addr(&mut buffer, GuestAddress(0x1200))
368 /// }
369 /// ```
read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()>370 pub fn read_exact_at_addr(&self, buf: &mut [u8], guest_addr: GuestAddress) -> Result<()> {
371 let expected = buf.len();
372 let completed = self.read_at_addr(buf, guest_addr)?;
373 if expected == completed {
374 Ok(())
375 } else {
376 Err(Error::ShortRead {
377 expected,
378 completed,
379 })
380 }
381 }
382
383 /// Reads an object from guest memory at the given guest address.
384 /// Reading from a volatile area isn't strictly safe as it could change
385 /// mid-read. However, as long as the type T is plain old data and can
386 /// handle random initialization, everything will be OK.
387 ///
388 /// # Examples
389 /// * Read a u64 from two areas of guest memory backed by separate mappings.
390 ///
391 /// ```
392 /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
393 /// # fn test_read_u64() -> Result<u64, ()> {
394 /// # let start_addr1 = GuestAddress(0x0);
395 /// # let start_addr2 = GuestAddress(0x400);
396 /// # let mut gm = GuestMemory::new(&vec![(start_addr1, 0x400), (start_addr2, 0x400)])
397 /// # .map_err(|_| ())?;
398 /// let num1: u64 = gm.read_obj_from_addr(GuestAddress(32)).map_err(|_| ())?;
399 /// let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x400+32)).map_err(|_| ())?;
400 /// # Ok(num1 + num2)
401 /// # }
402 /// ```
read_obj_from_addr<T: DataInit>(&self, guest_addr: GuestAddress) -> Result<T>403 pub fn read_obj_from_addr<T: DataInit>(&self, guest_addr: GuestAddress) -> Result<T> {
404 self.do_in_region(guest_addr, |mapping, offset| {
405 mapping
406 .read_obj(offset)
407 .map_err(|e| Error::MemoryAccess(guest_addr, e))
408 })
409 }
410
411 /// Writes an object to the memory region at the specified guest address.
412 /// Returns Ok(()) if the object fits, or Err if it extends past the end.
413 ///
414 /// # Examples
415 /// * Write a u64 at guest address 0x1100.
416 ///
417 /// ```
418 /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
419 /// # fn test_write_u64() -> Result<(), ()> {
420 /// # let start_addr = GuestAddress(0x1000);
421 /// # let mut gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
422 /// gm.write_obj_at_addr(55u64, GuestAddress(0x1100))
423 /// .map_err(|_| ())
424 /// # }
425 /// ```
write_obj_at_addr<T: DataInit>(&self, val: T, guest_addr: GuestAddress) -> Result<()>426 pub fn write_obj_at_addr<T: DataInit>(&self, val: T, guest_addr: GuestAddress) -> Result<()> {
427 self.do_in_region(guest_addr, move |mapping, offset| {
428 mapping
429 .write_obj(val, offset)
430 .map_err(|e| Error::MemoryAccess(guest_addr, e))
431 })
432 }
433
434 /// Reads data from a file descriptor and writes it to guest memory.
435 ///
436 /// # Arguments
437 /// * `guest_addr` - Begin writing memory at this offset.
438 /// * `src` - Read from `src` to memory.
439 /// * `count` - Read `count` bytes from `src` to memory.
440 ///
441 /// # Examples
442 ///
443 /// * Read bytes from /dev/urandom
444 ///
445 /// ```
446 /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
447 /// # use std::fs::File;
448 /// # use std::path::Path;
449 /// # fn test_read_random() -> Result<u32, ()> {
450 /// # let start_addr = GuestAddress(0x1000);
451 /// # let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
452 /// let mut file = File::open(Path::new("/dev/urandom")).map_err(|_| ())?;
453 /// let addr = GuestAddress(0x1010);
454 /// gm.read_to_memory(addr, &mut file, 128).map_err(|_| ())?;
455 /// let read_addr = addr.checked_add(8).ok_or(())?;
456 /// let rand_val: u32 = gm.read_obj_from_addr(read_addr).map_err(|_| ())?;
457 /// # Ok(rand_val)
458 /// # }
459 /// ```
read_to_memory( &self, guest_addr: GuestAddress, src: &AsRawFd, count: usize, ) -> Result<()>460 pub fn read_to_memory(
461 &self,
462 guest_addr: GuestAddress,
463 src: &AsRawFd,
464 count: usize,
465 ) -> Result<()> {
466 self.do_in_region(guest_addr, move |mapping, offset| {
467 mapping
468 .read_to_memory(offset, src, count)
469 .map_err(|e| Error::MemoryAccess(guest_addr, e))
470 })
471 }
472
473 /// Writes data from memory to a file descriptor.
474 ///
475 /// # Arguments
476 /// * `guest_addr` - Begin reading memory from this offset.
477 /// * `dst` - Write from memory to `dst`.
478 /// * `count` - Read `count` bytes from memory to `src`.
479 ///
480 /// # Examples
481 ///
482 /// * Write 128 bytes to /dev/null
483 ///
484 /// ```
485 /// # use sys_util::{GuestAddress, GuestMemory, MemoryMapping};
486 /// # use std::fs::File;
487 /// # use std::path::Path;
488 /// # fn test_write_null() -> Result<(), ()> {
489 /// # let start_addr = GuestAddress(0x1000);
490 /// # let gm = GuestMemory::new(&vec![(start_addr, 0x400)]).map_err(|_| ())?;
491 /// let mut file = File::open(Path::new("/dev/null")).map_err(|_| ())?;
492 /// let addr = GuestAddress(0x1010);
493 /// gm.write_from_memory(addr, &mut file, 128).map_err(|_| ())?;
494 /// # Ok(())
495 /// # }
496 /// ```
write_from_memory( &self, guest_addr: GuestAddress, dst: &AsRawFd, count: usize, ) -> Result<()>497 pub fn write_from_memory(
498 &self,
499 guest_addr: GuestAddress,
500 dst: &AsRawFd,
501 count: usize,
502 ) -> Result<()> {
503 self.do_in_region(guest_addr, move |mapping, offset| {
504 mapping
505 .write_from_memory(offset, dst, count)
506 .map_err(|e| Error::MemoryAccess(guest_addr, e))
507 })
508 }
509
510 /// Convert a GuestAddress into a pointer in the address space of this
511 /// process. This should only be necessary for giving addresses to the
512 /// kernel, as with vhost ioctls. Normal reads/writes to guest memory should
513 /// be done through `write_from_memory`, `read_obj_from_addr`, etc.
514 ///
515 /// # Arguments
516 /// * `guest_addr` - Guest address to convert.
517 ///
518 /// # Examples
519 ///
520 /// ```
521 /// # use sys_util::{GuestAddress, GuestMemory};
522 /// # fn test_host_addr() -> Result<(), ()> {
523 /// let start_addr = GuestAddress(0x1000);
524 /// let mut gm = GuestMemory::new(&vec![(start_addr, 0x500)]).map_err(|_| ())?;
525 /// let addr = gm.get_host_address(GuestAddress(0x1200)).unwrap();
526 /// println!("Host address is {:p}", addr);
527 /// Ok(())
528 /// # }
529 /// ```
get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8>530 pub fn get_host_address(&self, guest_addr: GuestAddress) -> Result<*const u8> {
531 self.do_in_region(guest_addr, |mapping, offset| {
532 // This is safe; `do_in_region` already checks that offset is in
533 // bounds.
534 Ok(unsafe { mapping.as_ptr().add(offset) } as *const u8)
535 })
536 }
537
do_in_region<F, T>(&self, guest_addr: GuestAddress, cb: F) -> Result<T> where F: FnOnce(&MemoryMapping, usize) -> Result<T>,538 pub fn do_in_region<F, T>(&self, guest_addr: GuestAddress, cb: F) -> Result<T>
539 where
540 F: FnOnce(&MemoryMapping, usize) -> Result<T>,
541 {
542 for region in self.regions.iter() {
543 if guest_addr >= region.guest_base && guest_addr < region_end(region) {
544 return cb(
545 ®ion.mapping,
546 guest_addr.offset_from(region.guest_base) as usize,
547 );
548 }
549 }
550 Err(Error::InvalidGuestAddress(guest_addr))
551 }
552 }
553
554 impl VolatileMemory for GuestMemory {
get_slice(&self, offset: u64, count: u64) -> VolatileMemoryResult<VolatileSlice>555 fn get_slice(&self, offset: u64, count: u64) -> VolatileMemoryResult<VolatileSlice> {
556 for region in self.regions.iter() {
557 if offset >= region.guest_base.0 && offset < region_end(region).0 {
558 return region
559 .mapping
560 .get_slice(offset - region.guest_base.0, count);
561 }
562 }
563 Err(VolatileMemoryError::OutOfBounds { addr: offset })
564 }
565 }
566
567 #[cfg(test)]
568 mod tests {
569 use super::*;
570 use crate::kernel_has_memfd;
571
572 #[test]
test_alignment()573 fn test_alignment() {
574 let start_addr1 = GuestAddress(0x0);
575 let start_addr2 = GuestAddress(0x1000);
576
577 assert!(GuestMemory::new(&vec![(start_addr1, 0x100), (start_addr2, 0x400)]).is_err());
578 assert!(GuestMemory::new(&vec![(start_addr1, 0x1000), (start_addr2, 0x1000)]).is_ok());
579 }
580
581 #[test]
two_regions()582 fn two_regions() {
583 let start_addr1 = GuestAddress(0x0);
584 let start_addr2 = GuestAddress(0x4000);
585 assert!(GuestMemory::new(&vec![(start_addr1, 0x4000), (start_addr2, 0x4000)]).is_ok());
586 }
587
588 #[test]
overlap_memory()589 fn overlap_memory() {
590 let start_addr1 = GuestAddress(0x0);
591 let start_addr2 = GuestAddress(0x1000);
592 assert!(GuestMemory::new(&vec![(start_addr1, 0x2000), (start_addr2, 0x2000)]).is_err());
593 }
594
595 #[test]
test_read_u64()596 fn test_read_u64() {
597 let start_addr1 = GuestAddress(0x0);
598 let start_addr2 = GuestAddress(0x1000);
599 let gm = GuestMemory::new(&vec![(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
600
601 let val1: u64 = 0xaa55aa55aa55aa55;
602 let val2: u64 = 0x55aa55aa55aa55aa;
603 gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
604 gm.write_obj_at_addr(val2, GuestAddress(0x1000 + 32))
605 .unwrap();
606 let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
607 let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x1000 + 32)).unwrap();
608 assert_eq!(val1, num1);
609 assert_eq!(val2, num2);
610 }
611
612 #[test]
test_ref_load_u64()613 fn test_ref_load_u64() {
614 let start_addr1 = GuestAddress(0x0);
615 let start_addr2 = GuestAddress(0x1000);
616 let gm = GuestMemory::new(&vec![(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
617
618 let val1: u64 = 0xaa55aa55aa55aa55;
619 let val2: u64 = 0x55aa55aa55aa55aa;
620 gm.write_obj_at_addr(val1, GuestAddress(0x500)).unwrap();
621 gm.write_obj_at_addr(val2, GuestAddress(0x1000 + 32))
622 .unwrap();
623 let num1: u64 = gm.get_ref(0x500).unwrap().load();
624 let num2: u64 = gm.get_ref(0x1000 + 32).unwrap().load();
625 assert_eq!(val1, num1);
626 assert_eq!(val2, num2);
627 }
628
629 #[test]
test_ref_store_u64()630 fn test_ref_store_u64() {
631 let start_addr1 = GuestAddress(0x0);
632 let start_addr2 = GuestAddress(0x1000);
633 let gm = GuestMemory::new(&vec![(start_addr1, 0x1000), (start_addr2, 0x1000)]).unwrap();
634
635 let val1: u64 = 0xaa55aa55aa55aa55;
636 let val2: u64 = 0x55aa55aa55aa55aa;
637 gm.get_ref(0x500).unwrap().store(val1);
638 gm.get_ref(0x1000 + 32).unwrap().store(val2);
639 let num1: u64 = gm.read_obj_from_addr(GuestAddress(0x500)).unwrap();
640 let num2: u64 = gm.read_obj_from_addr(GuestAddress(0x1000 + 32)).unwrap();
641 assert_eq!(val1, num1);
642 assert_eq!(val2, num2);
643 }
644
645 #[test]
test_memory_size()646 fn test_memory_size() {
647 let start_region1 = GuestAddress(0x0);
648 let size_region1 = 0x1000;
649 let start_region2 = GuestAddress(0x10000);
650 let size_region2 = 0x2000;
651 let gm = GuestMemory::new(&vec![
652 (start_region1, size_region1),
653 (start_region2, size_region2),
654 ])
655 .unwrap();
656
657 let mem_size = gm.memory_size();
658 assert_eq!(mem_size, size_region1 + size_region2);
659 }
660
661 // Get the base address of the mapping for a GuestAddress.
get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8>662 fn get_mapping(mem: &GuestMemory, addr: GuestAddress) -> Result<*const u8> {
663 mem.do_in_region(addr, |mapping, _| Ok(mapping.as_ptr() as *const u8))
664 }
665
666 #[test]
guest_to_host()667 fn guest_to_host() {
668 let start_addr1 = GuestAddress(0x0);
669 let start_addr2 = GuestAddress(0x1000);
670 let mem = GuestMemory::new(&vec![(start_addr1, 0x1000), (start_addr2, 0x4000)]).unwrap();
671
672 // Verify the host addresses match what we expect from the mappings.
673 let addr1_base = get_mapping(&mem, start_addr1).unwrap();
674 let addr2_base = get_mapping(&mem, start_addr2).unwrap();
675 let host_addr1 = mem.get_host_address(start_addr1).unwrap();
676 let host_addr2 = mem.get_host_address(start_addr2).unwrap();
677 assert_eq!(host_addr1, addr1_base);
678 assert_eq!(host_addr2, addr2_base);
679
680 // Check that a bad address returns an error.
681 let bad_addr = GuestAddress(0x123456);
682 assert!(mem.get_host_address(bad_addr).is_err());
683 }
684
685 #[test]
memfd_offset()686 fn memfd_offset() {
687 if !kernel_has_memfd() {
688 return;
689 }
690
691 let start_region1 = GuestAddress(0x0);
692 let size_region1 = 0x1000;
693 let start_region2 = GuestAddress(0x10000);
694 let size_region2 = 0x2000;
695 let gm = GuestMemory::new(&vec![
696 (start_region1, size_region1),
697 (start_region2, size_region2),
698 ])
699 .unwrap();
700
701 gm.write_obj_at_addr(0x1337u16, GuestAddress(0x0)).unwrap();
702 gm.write_obj_at_addr(0x0420u16, GuestAddress(0x10000))
703 .unwrap();
704
705 let _ = gm.with_regions::<_, ()>(|index, _, size, _, memfd_offset| {
706 let mmap = MemoryMapping::from_fd_offset(&gm, size, memfd_offset).unwrap();
707
708 if index == 0 {
709 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x1337u16);
710 }
711
712 if index == 1 {
713 assert!(mmap.read_obj::<u16>(0x0).unwrap() == 0x0420u16);
714 }
715
716 Ok(())
717 });
718 }
719 }
720