• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use base::linux::FileDataIterator;
6 use base::linux::MemfdSeals;
7 use base::linux::MemoryMappingUnix;
8 use base::linux::SharedMemoryLinux;
9 use base::MappedRegion;
10 use base::SharedMemory;
11 use bitflags::bitflags;
12 
13 use crate::Error;
14 use crate::GuestAddress;
15 use crate::GuestMemory;
16 use crate::MemoryRegion;
17 use crate::Result;
18 
19 bitflags! {
20     #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)]
21     #[repr(transparent)]
22     pub struct MemoryPolicy: u32 {
23         const USE_HUGEPAGES = 1;
24         const LOCK_GUEST_MEMORY = (1 << 1);
25     }
26 }
27 
finalize_shm(shm: &mut SharedMemory) -> Result<()>28 pub(crate) fn finalize_shm(shm: &mut SharedMemory) -> Result<()> {
29     // Seals are only a concept on Unix systems, so we must add them in conditional
30     // compilation. On Windows, SharedMemory allocation cannot be updated after creation
31     // regardless, so the same operation is done implicitly.
32     let mut seals = MemfdSeals::new();
33 
34     seals.set_shrink_seal();
35     seals.set_grow_seal();
36     seals.set_seal_seal();
37 
38     shm.add_seals(seals).map_err(Error::MemoryAddSealsFailed)
39 }
40 
41 impl GuestMemory {
42     /// Madvise away the address range in the host that is associated with the given guest range.
43     ///
44     /// This feature is only available on Unix, where a MemoryMapping can remove a mapped range.
remove_range(&self, addr: GuestAddress, count: u64) -> Result<()>45     pub fn remove_range(&self, addr: GuestAddress, count: u64) -> Result<()> {
46         let (mapping, offset, _) = self.find_region(addr)?;
47         mapping
48             .remove_range(offset, count as usize)
49             .map_err(|e| Error::MemoryAccess(addr, e))
50     }
51 
52     /// Handles guest memory policy hints/advices.
set_memory_policy(&self, mem_policy: MemoryPolicy)53     pub fn set_memory_policy(&self, mem_policy: MemoryPolicy) {
54         if mem_policy.is_empty() {
55             return;
56         }
57 
58         for (_, region) in self.regions.iter().enumerate() {
59             if mem_policy.contains(MemoryPolicy::USE_HUGEPAGES) {
60                 let ret = region.mapping.use_hugepages();
61 
62                 if let Err(err) = ret {
63                     println!("Failed to enable HUGEPAGE for mapping {}", err);
64                 }
65             }
66 
67             if mem_policy.contains(MemoryPolicy::LOCK_GUEST_MEMORY) {
68                 // This is done in coordination with remove_range() calls, which are
69                 // performed by the virtio-balloon process (they must be performed by
70                 // a different process from the one that issues the locks).
71                 // We also prevent this from happening in single-process configurations,
72                 // when we compute configuration flags.
73                 let ret = region.mapping.lock_all();
74 
75                 if let Err(err) = ret {
76                     println!("Failed to lock memory for mapping {}", err);
77                 }
78             }
79         }
80     }
81 
use_dontfork(&self) -> anyhow::Result<()>82     pub fn use_dontfork(&self) -> anyhow::Result<()> {
83         for region in self.regions.iter() {
84             region.mapping.use_dontfork()?;
85         }
86         Ok(())
87     }
88 }
89 
90 impl MemoryRegion {
91     /// Finds ranges of memory that might have non-zero data (i.e. not unallocated memory). The
92     /// ranges are offsets into the region's mmap, not offsets into the backing file.
93     ///
94     /// For example, if there were three bytes and the second byte was a hole, the return would be
95     /// `[1..2]` (in practice these are probably always at least page sized).
find_data_ranges(&self) -> anyhow::Result<Vec<std::ops::Range<usize>>>96     pub(crate) fn find_data_ranges(&self) -> anyhow::Result<Vec<std::ops::Range<usize>>> {
97         FileDataIterator::new(
98             &self.shared_obj,
99             self.obj_offset,
100             u64::try_from(self.mapping.size()).unwrap(),
101         )
102         .map(|range| {
103             let range = range?;
104             // Convert from file offsets to mmap offsets.
105             Ok(usize::try_from(range.start - self.obj_offset).unwrap()
106                 ..usize::try_from(range.end - self.obj_offset).unwrap())
107         })
108         .collect()
109     }
110 
zero_range(&self, offset: usize, size: usize) -> anyhow::Result<()>111     pub(crate) fn zero_range(&self, offset: usize, size: usize) -> anyhow::Result<()> {
112         self.mapping.remove_range(offset, size)?;
113         Ok(())
114     }
115 }
116