• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #![deny(missing_docs)]
6 
7 use std::ops::Range;
8 use std::ptr::copy_nonoverlapping;
9 
10 use base::error;
11 use base::MemoryMapping;
12 use base::MemoryMappingBuilder;
13 use base::MemoryMappingUnix;
14 use base::MmapError;
15 use base::SharedMemory;
16 use data_model::VolatileMemory;
17 use data_model::VolatileMemoryError;
18 use data_model::VolatileSlice;
19 use thiserror::Error as ThisError;
20 
21 use crate::pagesize::pages_to_bytes;
22 use crate::present_list::PresentList;
23 
24 pub type Result<T> = std::result::Result<T, Error>;
25 
26 #[derive(ThisError, Debug)]
27 pub enum Error {
28     #[error("failed to mmap operation: {0}")]
29     Mmap(MmapError),
30     #[error("failed to volatile memory operation: {0}")]
31     VolatileMemory(VolatileMemoryError),
32     #[error("index is out of range")]
33     OutOfRange,
34 }
35 
36 impl From<MmapError> for Error {
from(e: MmapError) -> Self37     fn from(e: MmapError) -> Self {
38         Self::Mmap(e)
39     }
40 }
41 
42 impl From<VolatileMemoryError> for Error {
from(e: VolatileMemoryError) -> Self43     fn from(e: VolatileMemoryError) -> Self {
44         Self::VolatileMemory(e)
45     }
46 }
47 
48 /// Copy operation from the guest memory to the staging memory.
49 pub struct CopyOp {
50     src_addr: *const u8,
51     dst_addr: *mut u8,
52     size: usize,
53 }
54 
55 /// CopyOp is safe to be sent to other threads because:
56 ///   * The source memory region (guest memory) is alive for the monitor process lifetime.
57 ///   * The destination memory region (staging memory) is alive until all the [CopyOp] are executed.
58 ///   * [CopyOp] accesses both src/dst memory region exclusively.
59 unsafe impl Send for CopyOp {}
60 
61 impl CopyOp {
62     /// Copies the specified the guest memory to the staging memory.
execute(self)63     pub fn execute(self) {
64         // Safe because:
65         // * the source memory is in guest memory and no processes access it.
66         // * src_addr and dst_addr are aligned with the page size.
67         // * src and dst does not overlap since src_addr is from the guest memory and dst_addr
68         //   is from the staging memory.
69         unsafe {
70             copy_nonoverlapping(self.src_addr, self.dst_addr, self.size);
71         }
72     }
73 }
74 
75 /// [StagingMemory] stores active pages from the guest memory in anonymous private memory.
76 ///
77 /// [StagingMemory] is created per memory region.
78 ///
79 /// On `crosvm swap enable` command, the monitor process moves all the active pages in the guest
80 /// memory to this staging memory. [StagingMemory] has several advantages over writing all
81 /// pages from the guest memory to the swap file directly.
82 ///
83 /// * Less non-responsive time
84 ///   * While moving the guest memory pages, the monitor process have to freeze whole crosvm
85 ///   * processes to guarantee no updates on the guest memory. Moving memory is faster than writing
86 ///   * them to disk.
87 /// * Hot pages bypass the disk
88 ///   * The faulting pages between `crosvm swap enable` and `crosvm swap out` are swapped in from
89 ///   * this [StagingMemory] directly without written into the swap file. This saves disk resouces
90 ///   * and latency of page fault handling.
91 ///
92 /// NB: Staging memory is a memfd instead of private anonymous memory to match GuestMemory. This is
93 /// done to make accounting easier when calculating total guest memory consumption.
94 pub struct StagingMemory {
95     mmap: MemoryMapping,
96     // Tracks which pages are present, indexed by page index within the memory region.
97     present_list: PresentList,
98 }
99 
100 impl StagingMemory {
101     /// Creates [StagingMemory].
102     ///
103     /// # Arguments
104     ///
105     /// * `shmem` - [SharedMemory] to mmap from.
106     /// * `offset_bytes` - The offset in bytes from the head of the `shmem`.
107     /// * `num_of_pages` - The number of pages in the region.
new(shmem: &SharedMemory, offset_bytes: u64, num_of_pages: usize) -> Result<Self>108     pub fn new(shmem: &SharedMemory, offset_bytes: u64, num_of_pages: usize) -> Result<Self> {
109         let mmap = MemoryMappingBuilder::new(pages_to_bytes(num_of_pages))
110             .from_shared_memory(shmem)
111             .offset(offset_bytes)
112             .build()?;
113         Ok(Self {
114             mmap,
115             present_list: PresentList::new(num_of_pages),
116         })
117     }
118 
119     /// Copy the guest memory pages into the staging memory.
120     ///
121     /// # Arguments
122     ///
123     /// * `src_addr` - the head address of the pages on the guest memory.
124     /// * `idx` - the index of the head of the pages.
125     /// * `pages` - the number of pages to copy.
126     ///
127     /// # Safety
128     ///
129     /// * `src_addr` must be aligned with the page size.
130     /// * The pages indicated by `src_addr` + `pages` must be within the guest memory.
131     #[deny(unsafe_op_in_unsafe_fn)]
copy(&mut self, src_addr: *const u8, idx: usize, pages: usize) -> Result<CopyOp>132     pub unsafe fn copy(&mut self, src_addr: *const u8, idx: usize, pages: usize) -> Result<CopyOp> {
133         let idx_range = idx..idx + pages;
134         let dst_slice = self.get_slice(idx_range.clone())?;
135 
136         let copy_op = CopyOp {
137             src_addr,
138             dst_addr: dst_slice.as_mut_ptr(),
139             size: dst_slice.size(),
140         };
141         if !self.present_list.mark_as_present(idx_range) {
142             unreachable!("idx_range is already validated by get_slice().");
143         }
144         Ok(copy_op)
145     }
146 
147     /// Returns a content of the page corresponding to the index.
148     ///
149     /// Returns [Option::None] if no content in the staging memory.
150     ///
151     /// Returns [Error::OutOfRange] if the `idx` is out of range.
152     ///
153     /// # Arguments
154     ///
155     /// * `idx` - the index of the page from the head of the pages.
page_content(&self, idx: usize) -> Result<Option<VolatileSlice>>156     pub fn page_content(&self, idx: usize) -> Result<Option<VolatileSlice>> {
157         match self.present_list.get(idx) {
158             Some(is_present) => {
159                 if *is_present {
160                     Ok(Some(self.get_slice(idx..idx + 1)?))
161                 } else {
162                     Ok(None)
163                 }
164             }
165             None => Err(Error::OutOfRange),
166         }
167     }
168 
169     /// Clears the pages in the staging memory corresponding to the indices.
170     ///
171     /// # Arguments
172     ///
173     /// * `idx_range` - the indices of consecutive pages to be cleared.
clear_range(&mut self, idx_range: Range<usize>) -> Result<()>174     pub fn clear_range(&mut self, idx_range: Range<usize>) -> Result<()> {
175         if !self.present_list.clear_range(idx_range.clone()) {
176             return Err(Error::OutOfRange);
177         }
178         self.mmap.remove_range(
179             pages_to_bytes(idx_range.start),
180             pages_to_bytes(idx_range.end - idx_range.start),
181         )?;
182         Ok(())
183     }
184 
185     /// Returns the first range of indices of consecutive pages present in the staging memory.
186     ///
187     /// # Arguments
188     ///
189     /// * `max_pages` - the max size of the returned chunk even if the chunk of consecutive present
190     ///   pages is longer than this.
first_data_range(&mut self, max_pages: usize) -> Option<Range<usize>>191     pub fn first_data_range(&mut self, max_pages: usize) -> Option<Range<usize>> {
192         self.present_list.first_data_range(max_pages)
193     }
194 
195     /// Returns the [VolatileSlice] corresponding to the indices.
196     ///
197     /// If the range is out of the region, this returns [Error::OutOfRange].
198     ///
199     /// # Arguments
200     ///
201     /// * `idx_range` - the indices of the pages.
get_slice(&self, idx_range: Range<usize>) -> Result<VolatileSlice>202     pub fn get_slice(&self, idx_range: Range<usize>) -> Result<VolatileSlice> {
203         match self.mmap.get_slice(
204             pages_to_bytes(idx_range.start),
205             pages_to_bytes(idx_range.end - idx_range.start),
206         ) {
207             Ok(slice) => Ok(slice),
208             Err(VolatileMemoryError::OutOfBounds { .. }) => Err(Error::OutOfRange),
209             Err(e) => Err(e.into()),
210         }
211     }
212 
213     /// Returns the count of present pages in the staging memory.
present_pages(&self) -> usize214     pub fn present_pages(&self) -> usize {
215         self.present_list.all_present_pages()
216     }
217 }
218 
219 #[cfg(test)]
220 mod tests {
221     use base::pagesize;
222     use base::MappedRegion;
223 
224     use super::*;
225 
226     #[test]
new_success()227     fn new_success() {
228         let shmem = SharedMemory::new("test staging memory", 200 * pagesize() as u64).unwrap();
229         assert!(StagingMemory::new(&shmem, 0, 200).is_ok());
230     }
231 
create_mmap(value: u8, pages: usize) -> MemoryMapping232     fn create_mmap(value: u8, pages: usize) -> MemoryMapping {
233         let size = pages_to_bytes(pages);
234         let mmap = MemoryMappingBuilder::new(size).build().unwrap();
235         for i in 0..size {
236             mmap.write_obj(value, i).unwrap();
237         }
238         mmap
239     }
240 
241     #[test]
copy_marks_as_present()242     fn copy_marks_as_present() {
243         let shmem = SharedMemory::new("test staging memory", 200 * pagesize() as u64).unwrap();
244         let mmap = create_mmap(1, 4);
245         let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap();
246 
247         let src_addr = mmap.as_ptr();
248         unsafe {
249             staging_memory.copy(src_addr, 1, 4).unwrap();
250             // empty
251             staging_memory.copy(src_addr, 10, 0).unwrap();
252             // single
253             staging_memory.copy(src_addr, 12, 1).unwrap();
254         }
255 
256         assert!(staging_memory.page_content(0).unwrap().is_none());
257         for i in 1..5 {
258             assert!(staging_memory.page_content(i).unwrap().is_some());
259         }
260         for i in 5..12 {
261             assert!(staging_memory.page_content(i).unwrap().is_none());
262         }
263         assert!(staging_memory.page_content(12).unwrap().is_some());
264         for i in 13..200 {
265             assert!(staging_memory.page_content(i).unwrap().is_none());
266         }
267     }
268 
269     #[test]
page_content_default_is_none()270     fn page_content_default_is_none() {
271         let shmem = SharedMemory::new("test staging memory", 200 * pagesize() as u64).unwrap();
272         let staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap();
273 
274         assert!(staging_memory.page_content(0).unwrap().is_none());
275     }
276 
277     #[test]
page_content_returns_content()278     fn page_content_returns_content() {
279         let shmem = SharedMemory::new("test staging memory", 200 * pagesize() as u64).unwrap();
280         let mmap = create_mmap(1, 1);
281         let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap();
282 
283         unsafe {
284             staging_memory.copy(mmap.as_ptr(), 0, 1).unwrap().execute();
285         }
286 
287         let page = staging_memory.page_content(0).unwrap().unwrap();
288         let result = unsafe { std::slice::from_raw_parts(page.as_ptr() as *const u8, page.size()) };
289         assert_eq!(result, &vec![1; pagesize()]);
290     }
291 
292     #[test]
page_content_out_of_range()293     fn page_content_out_of_range() {
294         let shmem = SharedMemory::new("test staging memory", 200 * pagesize() as u64).unwrap();
295         let staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap();
296 
297         assert!(staging_memory.page_content(199).is_ok());
298         match staging_memory.page_content(200) {
299             Err(Error::OutOfRange) => {}
300             _ => unreachable!("not out of range"),
301         }
302     }
303 
304     #[test]
clear_range()305     fn clear_range() {
306         let shmem = SharedMemory::new("test staging memory", 200 * pagesize() as u64).unwrap();
307         let mmap = create_mmap(1, 5);
308         let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap();
309 
310         unsafe {
311             staging_memory.copy(mmap.as_ptr(), 0, 5).unwrap();
312         }
313         staging_memory.clear_range(1..3).unwrap();
314 
315         assert!(staging_memory.page_content(0).unwrap().is_some());
316         assert!(staging_memory.page_content(1).unwrap().is_none());
317         assert!(staging_memory.page_content(2).unwrap().is_none());
318         assert!(staging_memory.page_content(3).unwrap().is_some());
319         assert!(staging_memory.page_content(4).unwrap().is_some());
320     }
321 
322     #[test]
clear_range_out_of_range()323     fn clear_range_out_of_range() {
324         let shmem = SharedMemory::new("test staging memory", 200 * pagesize() as u64).unwrap();
325         let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap();
326 
327         assert!(staging_memory.clear_range(199..200).is_ok());
328         match staging_memory.clear_range(199..201) {
329             Err(Error::OutOfRange) => {}
330             _ => unreachable!("not out of range"),
331         };
332     }
333 
334     #[test]
first_data_range()335     fn first_data_range() {
336         let shmem = SharedMemory::new("test staging memory", 200 * pagesize() as u64).unwrap();
337         let mmap = create_mmap(1, 2);
338         let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap();
339 
340         let src_addr = mmap.as_ptr();
341         unsafe {
342             staging_memory.copy(src_addr, 1, 2).unwrap();
343             staging_memory.copy(src_addr, 3, 1).unwrap();
344         }
345 
346         assert_eq!(staging_memory.first_data_range(200).unwrap(), 1..4);
347         assert_eq!(staging_memory.first_data_range(2).unwrap(), 1..3);
348         staging_memory.clear_range(1..3).unwrap();
349         assert_eq!(staging_memory.first_data_range(2).unwrap(), 3..4);
350         staging_memory.clear_range(3..4).unwrap();
351         assert!(staging_memory.first_data_range(2).is_none());
352     }
353 
354     #[test]
get_slice()355     fn get_slice() {
356         let shmem = SharedMemory::new("test staging memory", 200 * pagesize() as u64).unwrap();
357         let mmap1 = create_mmap(1, 1);
358         let mmap2 = create_mmap(2, 1);
359         let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap();
360 
361         let src_addr1 = mmap1.as_ptr();
362         let src_addr2 = mmap2.as_ptr();
363         unsafe {
364             staging_memory.copy(src_addr1, 1, 1).unwrap().execute();
365             staging_memory.copy(src_addr2, 2, 1).unwrap().execute();
366         }
367 
368         let slice = staging_memory.get_slice(1..3).unwrap();
369         assert_eq!(slice.size(), 2 * pagesize());
370         for i in 0..pagesize() {
371             let mut byte = [0u8; 1];
372             slice.get_slice(i, 1).unwrap().copy_to(&mut byte);
373             assert_eq!(byte[0], 1);
374         }
375         for i in pagesize()..2 * pagesize() {
376             let mut byte = [0u8; 1];
377             slice.get_slice(i, 1).unwrap().copy_to(&mut byte);
378             assert_eq!(byte[0], 2);
379         }
380     }
381 
382     #[test]
get_slice_out_of_range()383     fn get_slice_out_of_range() {
384         let shmem = SharedMemory::new("test staging memory", 200 * pagesize() as u64).unwrap();
385         let staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap();
386 
387         match staging_memory.get_slice(200..201) {
388             Err(Error::OutOfRange) => {}
389             other => {
390                 unreachable!("unexpected result {:?}", other);
391             }
392         }
393     }
394 
395     #[test]
present_pages()396     fn present_pages() {
397         let shmem = SharedMemory::new("test staging memory", 200 * pagesize() as u64).unwrap();
398         let mmap = create_mmap(1, 5);
399         let mut staging_memory = StagingMemory::new(&shmem, 0, 200).unwrap();
400 
401         let src_addr = mmap.as_ptr();
402         unsafe {
403             staging_memory.copy(src_addr, 1, 4).unwrap();
404             staging_memory.copy(src_addr, 12, 1).unwrap();
405         }
406 
407         assert_eq!(staging_memory.present_pages(), 5);
408     }
409 }
410