1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::cmp::min;
6 use std::fs::File;
7 use std::intrinsics::copy_nonoverlapping;
8 use std::io;
9 use std::mem::size_of;
10 use std::ptr::read_unaligned;
11 use std::ptr::read_volatile;
12 use std::ptr::write_unaligned;
13 use std::ptr::write_volatile;
14 use std::sync::atomic::fence;
15 use std::sync::atomic::Ordering;
16 use std::sync::OnceLock;
17
18 use remain::sorted;
19 use serde::Deserialize;
20 use serde::Serialize;
21 use zerocopy::FromBytes;
22 use zerocopy::Immutable;
23 use zerocopy::IntoBytes;
24
25 use crate::descriptor::AsRawDescriptor;
26 use crate::descriptor::SafeDescriptor;
27 use crate::platform::MemoryMapping as PlatformMmap;
28 use crate::SharedMemory;
29 use crate::VolatileMemory;
30 use crate::VolatileMemoryError;
31 use crate::VolatileMemoryResult;
32 use crate::VolatileSlice;
33
34 static CACHELINE_SIZE: OnceLock<usize> = OnceLock::new();
35
36 #[allow(unused_assignments)]
get_cacheline_size_once() -> usize37 fn get_cacheline_size_once() -> usize {
38 let mut assume_reason: &str = "unknown";
39 cfg_if::cfg_if! {
40 if #[cfg(all(any(target_os = "android", target_os = "linux"), not(target_env = "musl")))] {
41 // TODO: Remove once available in libc bindings
42 #[cfg(target_os = "android")]
43 const _SC_LEVEL1_DCACHE_LINESIZE: i32 = 0x0094;
44 #[cfg(target_os = "linux")]
45 use libc::_SC_LEVEL1_DCACHE_LINESIZE;
46
47 // SAFETY:
48 // Safe because we check the return value for errors or unsupported requests
49 let linesize = unsafe { libc::sysconf(_SC_LEVEL1_DCACHE_LINESIZE) };
50 if linesize > 0 {
51 return linesize as usize;
52 } else {
53 assume_reason = "sysconf cacheline size query failed";
54 }
55 } else {
56 assume_reason = "cacheline size query not implemented for platform/arch";
57 }
58 }
59
60 let assumed_size = 64;
61 log::debug!(
62 "assuming cacheline_size={}; reason: {}.",
63 assumed_size,
64 assume_reason
65 );
66 assumed_size
67 }
68
69 /// Returns the system's effective cacheline size (e.g. the granularity at which arch-specific
70 /// cacheline management, such as with the clflush instruction, is expected to occur).
71 #[inline(always)]
get_cacheline_size() -> usize72 fn get_cacheline_size() -> usize {
73 let size = *CACHELINE_SIZE.get_or_init(get_cacheline_size_once);
74 assert!(size > 0);
75 size
76 }
77
78 #[sorted]
79 #[derive(Debug, thiserror::Error)]
80 pub enum Error {
81 #[error("`add_fd_mapping` is unsupported")]
82 AddFdMappingIsUnsupported,
83 #[error("requested memory out of range")]
84 InvalidAddress,
85 #[error("requested alignment is incompatible")]
86 InvalidAlignment,
87 #[error("invalid argument provided when creating mapping")]
88 InvalidArgument,
89 #[error("requested offset is out of range of off_t")]
90 InvalidOffset,
91 #[error("requested memory range spans past the end of the region: offset={0} count={1} region_size={2}")]
92 InvalidRange(usize, usize, usize),
93 #[error("operation is not implemented on platform/architecture: {0}")]
94 NotImplemented(&'static str),
95 #[error("requested memory is not page aligned")]
96 NotPageAligned,
97 #[error("failed to read from file to memory: {0}")]
98 ReadToMemory(#[source] io::Error),
99 #[error("`remove_mapping` is unsupported")]
100 RemoveMappingIsUnsupported,
101 #[error("system call failed while creating the mapping: {0}")]
102 StdSyscallFailed(io::Error),
103 #[error("mmap related system call failed: {0}")]
104 SystemCallFailed(#[source] crate::Error),
105 #[error("failed to write from memory to file: {0}")]
106 WriteFromMemory(#[source] io::Error),
107 }
108 pub type Result<T> = std::result::Result<T, Error>;
109
110 /// Memory access type for anonymous shared memory mapping.
111 #[derive(Copy, Clone, Default, Eq, PartialEq, Serialize, Deserialize, Debug)]
112 pub struct Protection {
113 pub(crate) read: bool,
114 pub(crate) write: bool,
115 }
116
117 impl Protection {
118 /// Returns Protection allowing read/write access.
119 #[inline(always)]
read_write() -> Protection120 pub fn read_write() -> Protection {
121 Protection {
122 read: true,
123 write: true,
124 }
125 }
126
127 /// Returns Protection allowing read access.
128 #[inline(always)]
read() -> Protection129 pub fn read() -> Protection {
130 Protection {
131 read: true,
132 ..Default::default()
133 }
134 }
135
136 /// Returns Protection allowing write access.
137 #[inline(always)]
write() -> Protection138 pub fn write() -> Protection {
139 Protection {
140 write: true,
141 ..Default::default()
142 }
143 }
144
145 /// Set read events.
146 #[inline(always)]
set_read(self) -> Protection147 pub fn set_read(self) -> Protection {
148 Protection { read: true, ..self }
149 }
150
151 /// Set write events.
152 #[inline(always)]
set_write(self) -> Protection153 pub fn set_write(self) -> Protection {
154 Protection {
155 write: true,
156 ..self
157 }
158 }
159
160 /// Returns true if all access allowed by |other| is also allowed by |self|.
161 #[inline(always)]
allows(&self, other: &Protection) -> bool162 pub fn allows(&self, other: &Protection) -> bool {
163 self.read >= other.read && self.write >= other.write
164 }
165 }
166
167 /// See [MemoryMapping](crate::platform::MemoryMapping) for struct- and method-level
168 /// documentation.
169 #[derive(Debug)]
170 pub struct MemoryMapping {
171 pub(crate) mapping: PlatformMmap,
172
173 // File backed mappings on Windows need to keep the underlying file open while the mapping is
174 // open.
175 // This will be a None in non-windows case. The variable will not be read so the '^_'.
176 //
177 // TODO(b:230902713) There was a concern about relying on the kernel's refcounting to keep the
178 // file object's locks (e.g. exclusive read/write) in place. We need to revisit/validate that
179 // concern.
180 pub(crate) _file_descriptor: Option<SafeDescriptor>,
181 }
182
183 #[inline(always)]
flush_one(_addr: *const u8) -> Result<()>184 unsafe fn flush_one(_addr: *const u8) -> Result<()> {
185 cfg_if::cfg_if! {
186 if #[cfg(target_arch = "x86_64")] {
187 // As per table 11-7 of the SDM, processors are not required to
188 // snoop UC mappings, so flush the target to memory.
189 // SAFETY: assumes that the caller has supplied a valid address.
190 unsafe { core::arch::x86_64::_mm_clflush(_addr) };
191 Ok(())
192 } else if #[cfg(target_arch = "aarch64")] {
193 // Data cache clean by VA to PoC.
194 std::arch::asm!("DC CVAC, {x}", x = in(reg) _addr);
195 Ok(())
196 } else if #[cfg(target_arch = "arm")] {
197 Err(Error::NotImplemented("Userspace cannot flush to PoC"))
198 } else {
199 Err(Error::NotImplemented("Cache flush not implemented"))
200 }
201 }
202 }
203
204 impl MemoryMapping {
write_slice(&self, buf: &[u8], offset: usize) -> Result<usize>205 pub fn write_slice(&self, buf: &[u8], offset: usize) -> Result<usize> {
206 match self.mapping.size().checked_sub(offset) {
207 Some(size_past_offset) => {
208 let bytes_copied = min(size_past_offset, buf.len());
209 // SAFETY:
210 // The bytes_copied equation above ensures we don't copy bytes out of range of
211 // either buf or this slice. We also know that the buffers do not overlap because
212 // slices can never occupy the same memory as a volatile slice.
213 unsafe {
214 copy_nonoverlapping(buf.as_ptr(), self.as_ptr().add(offset), bytes_copied);
215 }
216 Ok(bytes_copied)
217 }
218 None => Err(Error::InvalidAddress),
219 }
220 }
221
read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize>222 pub fn read_slice(&self, buf: &mut [u8], offset: usize) -> Result<usize> {
223 match self.size().checked_sub(offset) {
224 Some(size_past_offset) => {
225 let bytes_copied = min(size_past_offset, buf.len());
226 // SAFETY:
227 // The bytes_copied equation above ensures we don't copy bytes out of range of
228 // either buf or this slice. We also know that the buffers do not overlap because
229 // slices can never occupy the same memory as a volatile slice.
230 unsafe {
231 copy_nonoverlapping(self.as_ptr().add(offset), buf.as_mut_ptr(), bytes_copied);
232 }
233 Ok(bytes_copied)
234 }
235 None => Err(Error::InvalidAddress),
236 }
237 }
238
239 /// Writes an object to the memory region at the specified offset.
240 /// Returns Ok(()) if the object fits, or Err if it extends past the end.
241 ///
242 /// This method is for writing to regular memory. If writing to a mapped
243 /// I/O region, use [`MemoryMapping::write_obj_volatile`].
244 ///
245 /// # Examples
246 /// * Write a u64 at offset 16.
247 ///
248 /// ```
249 /// # use base::MemoryMappingBuilder;
250 /// # use base::SharedMemory;
251 /// # let shm = SharedMemory::new("test", 1024).unwrap();
252 /// # let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
253 /// let res = mem_map.write_obj(55u64, 16);
254 /// assert!(res.is_ok());
255 /// ```
write_obj<T: IntoBytes + Immutable>(&self, val: T, offset: usize) -> Result<()>256 pub fn write_obj<T: IntoBytes + Immutable>(&self, val: T, offset: usize) -> Result<()> {
257 self.mapping.range_end(offset, size_of::<T>())?;
258 // SAFETY:
259 // This is safe because we checked the bounds above.
260 unsafe {
261 write_unaligned(self.as_ptr().add(offset) as *mut T, val);
262 }
263 Ok(())
264 }
265
266 /// Reads on object from the memory region at the given offset.
267 /// Reading from a volatile area isn't strictly safe as it could change
268 /// mid-read. However, as long as the type T is plain old data and can
269 /// handle random initialization, everything will be OK.
270 ///
271 /// This method is for reading from regular memory. If reading from a
272 /// mapped I/O region, use [`MemoryMapping::read_obj_volatile`].
273 ///
274 /// # Examples
275 /// * Read a u64 written to offset 32.
276 ///
277 /// ```
278 /// # use base::MemoryMappingBuilder;
279 /// # let mut mem_map = MemoryMappingBuilder::new(1024).build().unwrap();
280 /// let res = mem_map.write_obj(55u64, 32);
281 /// assert!(res.is_ok());
282 /// let num: u64 = mem_map.read_obj(32).unwrap();
283 /// assert_eq!(55, num);
284 /// ```
read_obj<T: FromBytes>(&self, offset: usize) -> Result<T>285 pub fn read_obj<T: FromBytes>(&self, offset: usize) -> Result<T> {
286 self.mapping.range_end(offset, size_of::<T>())?;
287 // SAFETY:
288 // This is safe because by definition Copy types can have their bits set arbitrarily and
289 // still be valid.
290 unsafe {
291 Ok(read_unaligned(
292 self.as_ptr().add(offset) as *const u8 as *const T
293 ))
294 }
295 }
296
297 /// Writes an object to the memory region at the specified offset.
298 /// Returns Ok(()) if the object fits, or Err if it extends past the end.
299 ///
300 /// The write operation will be volatile, i.e. it will not be reordered by
301 /// the compiler and is suitable for I/O, but must be aligned. When writing
302 /// to regular memory, prefer [`MemoryMapping::write_obj`].
303 ///
304 /// # Examples
305 /// * Write a u32 at offset 16.
306 ///
307 /// ```
308 /// # use base::MemoryMappingBuilder;
309 /// # use base::SharedMemory;
310 /// # let shm = SharedMemory::new("test", 1024).unwrap();
311 /// # let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
312 /// let res = mem_map.write_obj_volatile(0xf00u32, 16);
313 /// assert!(res.is_ok());
314 /// ```
write_obj_volatile<T: IntoBytes + Immutable>( &self, val: T, offset: usize, ) -> Result<()>315 pub fn write_obj_volatile<T: IntoBytes + Immutable>(
316 &self,
317 val: T,
318 offset: usize,
319 ) -> Result<()> {
320 self.mapping.range_end(offset, size_of::<T>())?;
321 // Make sure writes to memory have been committed before performing I/O that could
322 // potentially depend on them.
323 fence(Ordering::SeqCst);
324 // SAFETY:
325 // This is safe because we checked the bounds above.
326 unsafe {
327 write_volatile(self.as_ptr().add(offset) as *mut T, val);
328 }
329 Ok(())
330 }
331
332 /// Reads on object from the memory region at the given offset.
333 /// Reading from a volatile area isn't strictly safe as it could change
334 /// mid-read. However, as long as the type T is plain old data and can
335 /// handle random initialization, everything will be OK.
336 ///
337 /// The read operation will be volatile, i.e. it will not be reordered by
338 /// the compiler and is suitable for I/O, but must be aligned. When reading
339 /// from regular memory, prefer [`MemoryMapping::read_obj`].
340 ///
341 /// # Examples
342 /// * Read a u32 written to offset 16.
343 ///
344 /// ```
345 /// # use base::MemoryMappingBuilder;
346 /// # use base::SharedMemory;
347 /// # let shm = SharedMemory::new("test", 1024).unwrap();
348 /// # let mut mem_map = MemoryMappingBuilder::new(1024).from_shared_memory(&shm).build().unwrap();
349 /// let res = mem_map.write_obj(0xf00u32, 16);
350 /// assert!(res.is_ok());
351 /// let num: u32 = mem_map.read_obj_volatile(16).unwrap();
352 /// assert_eq!(0xf00, num);
353 /// ```
read_obj_volatile<T: FromBytes>(&self, offset: usize) -> Result<T>354 pub fn read_obj_volatile<T: FromBytes>(&self, offset: usize) -> Result<T> {
355 self.mapping.range_end(offset, size_of::<T>())?;
356 // SAFETY:
357 // This is safe because by definition Copy types can have their bits set arbitrarily and
358 // still be valid.
359 unsafe {
360 Ok(read_volatile(
361 self.as_ptr().add(offset) as *const u8 as *const T
362 ))
363 }
364 }
365
msync(&self) -> Result<()>366 pub fn msync(&self) -> Result<()> {
367 self.mapping.msync()
368 }
369
370 /// Flush a region of the MemoryMapping from the system's caching hierarchy.
371 /// There are several uses for flushing:
372 ///
373 /// * Cached memory which the guest may be reading through an uncached mapping:
374 ///
375 /// Guest reads via an uncached mapping can bypass the cache and directly access main
376 /// memory. This is outside the memory model of Rust, which means that even with proper
377 /// synchronization, guest reads via an uncached mapping might not see updates from the
378 /// host. As such, it is necessary to perform architectural cache maintainance to flush the
379 /// host writes to main memory.
380 ///
381 /// Note that this does not support writable uncached guest mappings, as doing so
382 /// requires invalidating the cache, not flushing the cache.
383 ///
384 /// * Uncached memory which the guest may be writing through a cached mapping:
385 ///
386 /// Guest writes via a cached mapping of a host's uncached memory may never make it to
387 /// system/device memory prior to being read. In such cases, explicit flushing of the cached
388 /// writes is necessary, since other managers of the host's uncached mapping (e.g. DRM) see
389 /// no need to flush, as they believe all writes would explicitly bypass the caches.
390 ///
391 /// Currently only supported on x86_64 and aarch64. Cannot be supported on 32-bit arm.
flush_region(&self, offset: usize, len: usize) -> Result<()>392 pub fn flush_region(&self, offset: usize, len: usize) -> Result<()> {
393 let addr: *const u8 = self.as_ptr();
394 let size = self.size();
395
396 // disallow overflow/wrapping ranges and subregion extending beyond mapped range
397 if usize::MAX - size < addr as usize || offset >= size || size - offset < len {
398 return Err(Error::InvalidRange(offset, len, size));
399 }
400
401 // SAFETY:
402 // Safe because already validated that `next` will be an address in the mapping:
403 // * mapped region is non-wrapping
404 // * subregion is bounded within the mapped region
405 let mut next: *const u8 = unsafe { addr.add(offset) };
406
407 let cacheline_size = get_cacheline_size();
408 let cacheline_count = len.div_ceil(cacheline_size);
409
410 for _ in 0..cacheline_count {
411 // SAFETY:
412 // Safe because `next` is guaranteed to be within the mapped region (see earlier
413 // validations), and flushing the cache doesn't affect any rust safety properties.
414 unsafe { flush_one(next)? };
415
416 // SAFETY:
417 // Safe because we never use next if it goes out of the mapped region or overflows its
418 // storage type (based on earlier validations and the loop bounds).
419 next = unsafe { next.add(cacheline_size) };
420 }
421 Ok(())
422 }
423
424 /// Flush all backing memory for a mapping in an arch-specific manner (see `flush_region()`).
flush_all(&self) -> Result<()>425 pub fn flush_all(&self) -> Result<()> {
426 self.flush_region(0, self.size())
427 }
428 }
429
430 pub struct MemoryMappingBuilder<'a> {
431 pub(crate) descriptor: Option<&'a dyn AsRawDescriptor>,
432 pub(crate) is_file_descriptor: bool,
433 #[cfg_attr(target_os = "macos", allow(unused))]
434 pub(crate) size: usize,
435 pub(crate) offset: Option<u64>,
436 pub(crate) align: Option<u64>,
437 pub(crate) protection: Option<Protection>,
438 #[cfg_attr(target_os = "macos", allow(unused))]
439 #[cfg_attr(windows, allow(unused))]
440 pub(crate) populate: bool,
441 }
442
443 /// Builds a MemoryMapping object from the specified arguments.
444 impl<'a> MemoryMappingBuilder<'a> {
445 /// Creates a new builder specifying size of the memory region in bytes.
new(size: usize) -> MemoryMappingBuilder<'a>446 pub fn new(size: usize) -> MemoryMappingBuilder<'a> {
447 MemoryMappingBuilder {
448 descriptor: None,
449 size,
450 is_file_descriptor: false,
451 offset: None,
452 align: None,
453 protection: None,
454 populate: false,
455 }
456 }
457
458 /// Build the memory mapping given the specified File to mapped memory
459 ///
460 /// Default: Create a new memory mapping.
461 ///
462 /// Note: this is a forward looking interface to accomodate platforms that
463 /// require special handling for file backed mappings.
464 #[allow(clippy::wrong_self_convention, unused_mut)]
from_file(mut self, file: &'a File) -> MemoryMappingBuilder<'a>465 pub fn from_file(mut self, file: &'a File) -> MemoryMappingBuilder<'a> {
466 // On Windows, files require special handling (next day shipping if possible).
467 self.is_file_descriptor = true;
468
469 self.descriptor = Some(file as &dyn AsRawDescriptor);
470 self
471 }
472
473 /// Build the memory mapping given the specified SharedMemory to mapped memory
474 ///
475 /// Default: Create a new memory mapping.
from_shared_memory(mut self, shm: &'a SharedMemory) -> MemoryMappingBuilder<'a>476 pub fn from_shared_memory(mut self, shm: &'a SharedMemory) -> MemoryMappingBuilder<'a> {
477 self.descriptor = Some(shm as &dyn AsRawDescriptor);
478 self
479 }
480
481 /// Offset in bytes from the beginning of the mapping to start the mmap.
482 ///
483 /// Default: No offset
offset(mut self, offset: u64) -> MemoryMappingBuilder<'a>484 pub fn offset(mut self, offset: u64) -> MemoryMappingBuilder<'a> {
485 self.offset = Some(offset);
486 self
487 }
488
489 /// Protection (e.g. readable/writable) of the memory region.
490 ///
491 /// Default: Read/write
protection(mut self, protection: Protection) -> MemoryMappingBuilder<'a>492 pub fn protection(mut self, protection: Protection) -> MemoryMappingBuilder<'a> {
493 self.protection = Some(protection);
494 self
495 }
496
497 /// Alignment of the memory region mapping in bytes.
498 ///
499 /// Default: No alignment
align(mut self, alignment: u64) -> MemoryMappingBuilder<'a>500 pub fn align(mut self, alignment: u64) -> MemoryMappingBuilder<'a> {
501 self.align = Some(alignment);
502 self
503 }
504 }
505
506 impl VolatileMemory for MemoryMapping {
get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice>507 fn get_slice(&self, offset: usize, count: usize) -> VolatileMemoryResult<VolatileSlice> {
508 let mem_end = offset
509 .checked_add(count)
510 .ok_or(VolatileMemoryError::Overflow {
511 base: offset,
512 offset: count,
513 })?;
514
515 if mem_end > self.size() {
516 return Err(VolatileMemoryError::OutOfBounds { addr: mem_end });
517 }
518
519 let new_addr =
520 (self.as_ptr() as usize)
521 .checked_add(offset)
522 .ok_or(VolatileMemoryError::Overflow {
523 base: self.as_ptr() as usize,
524 offset,
525 })?;
526
527 // SAFETY:
528 // Safe because we checked that offset + count was within our range and we only ever hand
529 // out volatile accessors.
530 Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
531 }
532 }
533
534 /// A range of memory that can be msynced, for abstracting over different types of memory mappings.
535 ///
536 /// # Safety
537 /// Safe when implementers guarantee `ptr`..`ptr+size` is an mmaped region owned by this object that
538 /// can't be unmapped during the `MappedRegion`'s lifetime.
539 pub unsafe trait MappedRegion: Send + Sync {
540 // SAFETY:
541 /// Returns a pointer to the beginning of the memory region. Should only be
542 /// used for passing this region to ioctls for setting guest memory.
as_ptr(&self) -> *mut u8543 fn as_ptr(&self) -> *mut u8;
544
545 /// Returns the size of the memory region in bytes.
size(&self) -> usize546 fn size(&self) -> usize;
547
548 /// Maps `size` bytes starting at `fd_offset` bytes from within the given `fd`
549 /// at `offset` bytes from the start of the region with `prot` protections.
550 /// `offset` must be page aligned.
551 ///
552 /// # Arguments
553 /// * `offset` - Page aligned offset into the arena in bytes.
554 /// * `size` - Size of memory region in bytes.
555 /// * `fd` - File descriptor to mmap from.
556 /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
557 /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_mapping( &mut self, _offset: usize, _size: usize, _fd: &dyn AsRawDescriptor, _fd_offset: u64, _prot: Protection, ) -> Result<()>558 fn add_fd_mapping(
559 &mut self,
560 _offset: usize,
561 _size: usize,
562 _fd: &dyn AsRawDescriptor,
563 _fd_offset: u64,
564 _prot: Protection,
565 ) -> Result<()> {
566 Err(Error::AddFdMappingIsUnsupported)
567 }
568
569 /// Remove `size`-byte mapping starting at `offset`.
remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()>570 fn remove_mapping(&mut self, _offset: usize, _size: usize) -> Result<()> {
571 Err(Error::RemoveMappingIsUnsupported)
572 }
573 }
574
575 // SAFETY:
576 // Safe because it exclusively forwards calls to a safe implementation.
577 unsafe impl MappedRegion for MemoryMapping {
as_ptr(&self) -> *mut u8578 fn as_ptr(&self) -> *mut u8 {
579 self.mapping.as_ptr()
580 }
581
size(&self) -> usize582 fn size(&self) -> usize {
583 self.mapping.size()
584 }
585 }
586
587 #[derive(Debug, PartialEq, Eq)]
588 pub struct ExternalMapping {
589 pub ptr: u64,
590 pub size: usize,
591 }
592
593 // SAFETY:
594 // `ptr`..`ptr+size` is an mmaped region and is owned by this object. Caller
595 // needs to ensure that the region is not unmapped during the `MappedRegion`'s
596 // lifetime.
597 unsafe impl MappedRegion for ExternalMapping {
598 /// used for passing this region to ioctls for setting guest memory.
as_ptr(&self) -> *mut u8599 fn as_ptr(&self) -> *mut u8 {
600 self.ptr as *mut u8
601 }
602
603 /// Returns the size of the memory region in bytes.
size(&self) -> usize604 fn size(&self) -> usize {
605 self.size
606 }
607 }
608