• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #![deny(missing_docs)]
6 
7 use std::fs::File;
8 use std::ops::Range;
9 use std::os::unix::fs::FileExt;
10 
11 use base::error;
12 use base::MemoryMapping;
13 use base::MemoryMappingBuilder;
14 use base::MemoryMappingUnix;
15 use base::MmapError;
16 use base::Protection;
17 use base::PunchHole;
18 use data_model::VolatileMemory;
19 use data_model::VolatileMemoryError;
20 use data_model::VolatileSlice;
21 use thiserror::Error as ThisError;
22 
23 use crate::pagesize::bytes_to_pages;
24 use crate::pagesize::is_page_aligned;
25 use crate::pagesize::pages_to_bytes;
26 use crate::present_list::PresentList;
27 
28 pub type Result<T> = std::result::Result<T, Error>;
29 
30 #[derive(ThisError, Debug)]
31 pub enum Error {
32     #[error("failed to io: {0}")]
33     Io(#[from] std::io::Error),
34     #[error("failed to mmap operation ({0}): {1}")]
35     Mmap(&'static str, MmapError),
36     #[error("failed to volatile memory operation: {0}")]
37     VolatileMemory(#[from] VolatileMemoryError),
38     #[error("index is out of range")]
39     OutOfRange,
40     #[error("data size is invalid")]
41     InvalidSize,
42 }
43 
44 /// [SwapFile] stores active pages in a memory region.
45 ///
46 /// This shares the swap file with other regions and creates mmap corresponding range in the file.
47 ///
48 /// TODO(kawasin): The file structure is straightforward and is not optimized yet.
49 /// Each page in the file corresponds to the page in the memory region.
50 #[derive(Debug)]
51 pub struct SwapFile<'a> {
52     file: &'a File,
53     offset: u64,
54     file_mmap: MemoryMapping,
55     // Tracks which pages are present, indexed by page index within the memory region.
56     present_list: PresentList,
57     // All the data pages before this index are mlock(2)ed.
58     cursor_mlock: usize,
59 }
60 
61 impl<'a> SwapFile<'a> {
62     /// Creates an initialized [SwapFile] for a memory region.
63     ///
64     /// The all pages are marked as empty at first time.
65     ///
66     /// # Arguments
67     ///
68     /// * `file` - The swap file.
69     /// * `offset_pages` - The starting offset in pages of the region in the swap file.
70     /// * `num_of_pages` - The number of pages in the region.
new(file: &'a File, offset_pages: usize, num_of_pages: usize) -> Result<Self>71     pub fn new(file: &'a File, offset_pages: usize, num_of_pages: usize) -> Result<Self> {
72         let offset = pages_to_bytes(offset_pages) as u64;
73         let file_mmap = MemoryMappingBuilder::new(pages_to_bytes(num_of_pages))
74             .from_file(file)
75             .offset(offset)
76             .protection(Protection::read())
77             .build()
78             .map_err(|e| Error::Mmap("create", e))?;
79         Ok(Self {
80             file,
81             offset,
82             file_mmap,
83             present_list: PresentList::new(num_of_pages),
84             cursor_mlock: 0,
85         })
86     }
87 
base_offset(&self) -> u6488     pub(crate) fn base_offset(&self) -> u64 {
89         self.offset
90     }
91 
92     /// Returns the total count of managed pages.
num_pages(&self) -> usize93     pub fn num_pages(&self) -> usize {
94         self.present_list.len()
95     }
96 
97     /// Returns a content of the page corresponding to the index.
98     ///
99     /// Returns [Option::None] if no content in the file.
100     ///
101     /// Returns [Error::OutOfRange] if the `idx` is out of range.
102     ///
103     /// # Arguments
104     ///
105     /// * `idx` - the index of the page from the head of the pages.
page_content(&self, idx: usize) -> Result<Option<VolatileSlice>>106     pub fn page_content(&self, idx: usize) -> Result<Option<VolatileSlice>> {
107         match self.present_list.get(idx) {
108             Some(is_present) => {
109                 if *is_present {
110                     Ok(Some(self.get_slice(idx..idx + 1)?))
111                 } else {
112                     Ok(None)
113                 }
114             }
115             None => Err(Error::OutOfRange),
116         }
117     }
118 
119     /// Start readahead the swap file into the page cache from the head.
120     ///
121     /// This also `mlock2(2)` the pages not to be dropped again after populated. This does not block
122     /// the caller thread by I/O wait because:
123     ///
124     /// * `mlock2(2)` is executed with `MLOCK_ONFAULT`.
125     /// * `MADV_WILLNEED` is the same as `readahead(2)` which triggers the readahead background.
126     ///   * However Linux has a bug that `readahead(2)` (and also `MADV_WILLNEED`) may block due to
127     ///     reading the filesystem metadata.
128     ///
129     /// This returns the number of consecutive pages which are newly mlock(2)ed. Returning `0` means
130     /// that there is no more data to be mlock(2)ed in this file.
131     ///
132     /// The caller must track the number of pages mlock(2)ed not to mlock(2) more pages than
133     /// `RLIMIT_MEMLOCK` if it does not have `CAP_IPC_LOCK`.
134     ///
135     /// # Arguments
136     ///
137     /// * `max_pages` - The maximum number of pages to be mlock(2)ed at once.
lock_and_async_prefetch(&mut self, max_pages: usize) -> Result<usize>138     pub fn lock_and_async_prefetch(&mut self, max_pages: usize) -> Result<usize> {
139         match self
140             .present_list
141             .find_data_range(self.cursor_mlock, max_pages)
142         {
143             Some(idx_range) => {
144                 let pages = idx_range.end - idx_range.start;
145                 let mem_offset = pages_to_bytes(idx_range.start);
146                 let size_in_bytes = pages_to_bytes(pages);
147                 self.file_mmap
148                     .lock_on_fault(mem_offset, size_in_bytes)
149                     .map_err(|e| Error::Mmap("mlock", e))?;
150                 self.file_mmap
151                     .async_prefetch(mem_offset, size_in_bytes)
152                     .map_err(|e| Error::Mmap("madvise willneed", e))?;
153                 self.cursor_mlock = idx_range.end;
154                 Ok(pages)
155             }
156             None => {
157                 self.cursor_mlock = self.present_list.len();
158                 Ok(0)
159             }
160         }
161     }
162 
163     /// Mark the pages in the file corresponding to the index as cleared.
164     ///
165     /// The contents on the swap file are preserved and will be reused by
166     /// `SwapFile::mark_as_present()` and reduce disk I/O.
167     ///
168     /// If the pages are mlock(2)ed, unlock them before MADV_DONTNEED. This returns the number of
169     /// pages munlock(2)ed.
170     ///
171     /// # Arguments
172     ///
173     /// * `idx_range` - The indices of consecutive pages to be cleared. All the pages must be
174     ///   present.
clear_range(&mut self, idx_range: Range<usize>) -> Result<usize>175     pub fn clear_range(&mut self, idx_range: Range<usize>) -> Result<usize> {
176         if self.present_list.clear_range(idx_range.clone()) {
177             let offset = pages_to_bytes(idx_range.start);
178             let munlocked_size = if idx_range.start < self.cursor_mlock {
179                 // idx_range is validated at clear_range() and self.cursor_mlock is within the mmap.
180                 let pages = idx_range.end.min(self.cursor_mlock) - idx_range.start;
181                 // munlock(2) first because MADV_DONTNEED fails for mlock(2)ed pages.
182                 self.file_mmap
183                     .unlock(offset, pages_to_bytes(pages))
184                     .map_err(|e| Error::Mmap("munlock", e))?;
185                 pages
186             } else {
187                 0
188             };
189             // offset and size are validated at clear_range().
190             let size = pages_to_bytes(idx_range.end - idx_range.start);
191             // The page cache is cleared without writing pages back to file even if they are dirty.
192             // The disk contents which may not be the latest are kept for later trim optimization.
193             self.file_mmap
194                 .drop_page_cache(offset, size)
195                 .map_err(|e| Error::Mmap("madvise dontneed", e))?;
196             Ok(munlocked_size)
197         } else {
198             Err(Error::OutOfRange)
199         }
200     }
201 
202     /// Erase the pages corresponding to the given range from the file and underlying disk.
203     ///
204     /// If the pages are mlock(2)ed, unlock them before punching a hole. This returns the number of
205     /// pages munlock(2)ed.
206     ///
207     /// # Arguments
208     ///
209     /// * `idx_range` - The indices of consecutive pages to be erased. This may contains non-present
210     ///   pages.
erase_from_disk(&mut self, idx_range: Range<usize>) -> Result<usize>211     pub fn erase_from_disk(&mut self, idx_range: Range<usize>) -> Result<usize> {
212         let (mlock_range, mlocked_pages) = if idx_range.start < self.cursor_mlock {
213             let mlock_range = idx_range.start..idx_range.end.min(self.cursor_mlock);
214             let mlocked_pages = self
215                 .present_list
216                 .present_pages(mlock_range.clone())
217                 .ok_or(Error::OutOfRange)?;
218             (Some(mlock_range), mlocked_pages)
219         } else {
220             (None, 0)
221         };
222         if self.present_list.clear_range(idx_range.clone()) {
223             if let Some(mlock_range) = mlock_range {
224                 // mlock_range is validated at present_pages().
225                 // mlock_range may contains non-locked pages. munlock(2) succeeds even on that case.
226                 self.file_mmap
227                     .unlock(
228                         pages_to_bytes(mlock_range.start),
229                         pages_to_bytes(mlock_range.end - mlock_range.start),
230                     )
231                     .map_err(|e| Error::Mmap("munlock", e))?;
232             }
233             let file_offset = self.offset + pages_to_bytes(idx_range.start) as u64;
234             self.file.punch_hole(
235                 file_offset,
236                 pages_to_bytes(idx_range.end - idx_range.start) as u64,
237             )?;
238             Ok(mlocked_pages)
239         } else {
240             Err(Error::OutOfRange)
241         }
242     }
243 
244     /// munlock(2) pages if there are mlock(2)ed pages in the mmap and reset the internal cursor for
245     /// mlock(2) tracking.
clear_mlock(&mut self) -> Result<()>246     pub fn clear_mlock(&mut self) -> Result<()> {
247         if self.cursor_mlock > 0 {
248             // cursor_mlock is not `0` only when disabling vmm-swap is aborted by overriding
249             // vmm-swap enable. munlock(2)ing the whole possible pages is not a problem because this
250             // is not a hot path.
251             self.file_mmap
252                 .unlock(0, pages_to_bytes(self.cursor_mlock))
253                 .map_err(|e| Error::Mmap("munlock", e))?;
254         }
255         self.cursor_mlock = 0;
256         Ok(())
257     }
258 
259     /// Mark the page as present on the file.
260     ///
261     /// The content on the swap file on previous `SwapFile::write_to_file()` is reused.
262     ///
263     /// # Arguments
264     ///
265     /// * `idx` - the index of the page from the head of the pages.
mark_as_present(&mut self, idx: usize)266     pub fn mark_as_present(&mut self, idx: usize) {
267         self.present_list.mark_as_present(idx..idx + 1);
268     }
269 
270     /// Writes the contents to the swap file.
271     ///
272     /// # Arguments
273     ///
274     /// * `idx` - the index of the head page of the content from the head of the pages.
275     /// * `mem_slice` - the page content(s). this can be more than 1 page. the size must align with
276     ///   the pagesize.
write_to_file(&mut self, idx: usize, mem_slice: &[u8]) -> Result<()>277     pub fn write_to_file(&mut self, idx: usize, mem_slice: &[u8]) -> Result<()> {
278         // validate
279         if !is_page_aligned(mem_slice.len()) {
280             // mem_slice size must align with page size.
281             return Err(Error::InvalidSize);
282         }
283         let num_pages = bytes_to_pages(mem_slice.len());
284         if idx + num_pages > self.present_list.len() {
285             return Err(Error::OutOfRange);
286         }
287 
288         // Write with pwrite(2) syscall instead of copying contents to mmap because write syscall is
289         // more explicit for kernel how many pages are going to be written while mmap only knows
290         // each page to be written on a page fault basis.
291         self.file
292             .write_all_at(mem_slice, self.offset + pages_to_bytes(idx) as u64)?;
293 
294         if !self.present_list.mark_as_present(idx..idx + num_pages) {
295             // the range is already validated before writing.
296             unreachable!("idx range is out of range");
297         }
298 
299         Ok(())
300     }
301 
302     /// Returns the first range of indices of consecutive pages present in the swap file.
303     ///
304     /// # Arguments
305     ///
306     /// * `max_pages` - the max size of the returned chunk even if the chunk of consecutive present
307     ///   pages is longer than this.
first_data_range(&mut self, max_pages: usize) -> Option<Range<usize>>308     pub fn first_data_range(&mut self, max_pages: usize) -> Option<Range<usize>> {
309         self.present_list.first_data_range(max_pages)
310     }
311 
312     /// Returns the [VolatileSlice] corresponding to the indices regardless of whether the pages are
313     /// present or not.
314     ///
315     /// If the range is out of the region, this returns [Error::OutOfRange].
316     ///
317     /// # Arguments
318     ///
319     /// * `idx_range` - the indices of the pages.
get_slice(&self, idx_range: Range<usize>) -> Result<VolatileSlice>320     pub fn get_slice(&self, idx_range: Range<usize>) -> Result<VolatileSlice> {
321         match self.file_mmap.get_slice(
322             pages_to_bytes(idx_range.start),
323             pages_to_bytes(idx_range.end - idx_range.start),
324         ) {
325             Ok(slice) => Ok(slice),
326             Err(VolatileMemoryError::OutOfBounds { .. }) => Err(Error::OutOfRange),
327             Err(e) => Err(e.into()),
328         }
329     }
330 
331     /// Returns the count of present pages in the swap file.
present_pages(&self) -> usize332     pub fn present_pages(&self) -> usize {
333         self.present_list.all_present_pages()
334     }
335 }
336 
337 #[cfg(test)]
338 mod tests {
339     use std::slice;
340 
341     use base::pagesize;
342 
343     use super::*;
344 
345     #[test]
new_success()346     fn new_success() {
347         let file = tempfile::tempfile().unwrap();
348 
349         assert_eq!(SwapFile::new(&file, 0, 200).is_ok(), true);
350     }
351 
352     #[test]
len()353     fn len() {
354         let file = tempfile::tempfile().unwrap();
355         let swap_file = SwapFile::new(&file, 0, 200).unwrap();
356 
357         assert_eq!(swap_file.num_pages(), 200);
358     }
359 
360     #[test]
page_content_default_is_none()361     fn page_content_default_is_none() {
362         let file = tempfile::tempfile().unwrap();
363         let swap_file = SwapFile::new(&file, 0, 200).unwrap();
364 
365         assert_eq!(swap_file.page_content(0).unwrap().is_none(), true);
366     }
367 
368     #[test]
page_content_returns_content()369     fn page_content_returns_content() {
370         let file = tempfile::tempfile().unwrap();
371         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
372 
373         let data = &vec![1; pagesize()];
374         swap_file.write_to_file(0, data).unwrap();
375 
376         let page = swap_file.page_content(0).unwrap().unwrap();
377         let result = unsafe { slice::from_raw_parts(page.as_ptr() as *const u8, pagesize()) };
378         assert_eq!(result, data);
379     }
380 
381     #[test]
page_content_out_of_range()382     fn page_content_out_of_range() {
383         let file = tempfile::tempfile().unwrap();
384         let swap_file = SwapFile::new(&file, 0, 200).unwrap();
385 
386         assert_eq!(swap_file.page_content(199).is_ok(), true);
387         match swap_file.page_content(200) {
388             Err(Error::OutOfRange) => {}
389             _ => unreachable!("not out of range"),
390         }
391     }
392 
assert_page_content(swap_file: &SwapFile, idx: usize, data: &[u8])393     fn assert_page_content(swap_file: &SwapFile, idx: usize, data: &[u8]) {
394         let page = swap_file.page_content(idx).unwrap().unwrap();
395         let result = unsafe { slice::from_raw_parts(page.as_ptr() as *const u8, pagesize()) };
396         assert_eq!(result, data);
397     }
398 
399     #[test]
write_to_file_swap_file()400     fn write_to_file_swap_file() {
401         let file = tempfile::tempfile().unwrap();
402         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
403 
404         let buf1 = &vec![1; pagesize()];
405         let buf2 = &vec![2; 2 * pagesize()];
406         swap_file.write_to_file(0, buf1).unwrap();
407         swap_file.write_to_file(2, buf2).unwrap();
408 
409         // page_content()
410         assert_page_content(&swap_file, 0, buf1);
411         assert_page_content(&swap_file, 2, &buf2[0..pagesize()]);
412         assert_page_content(&swap_file, 3, &buf2[pagesize()..2 * pagesize()]);
413     }
414 
415     #[test]
write_to_file_no_conflict()416     fn write_to_file_no_conflict() {
417         let file = tempfile::tempfile().unwrap();
418         let mut swap_file1 = SwapFile::new(&file, 0, 2).unwrap();
419         let mut swap_file2 = SwapFile::new(&file, 2, 2).unwrap();
420 
421         let buf1 = &vec![1; pagesize()];
422         let buf2 = &vec![2; pagesize()];
423         let buf3 = &vec![3; pagesize()];
424         let buf4 = &vec![4; pagesize()];
425         swap_file1.write_to_file(0, buf1).unwrap();
426         swap_file1.write_to_file(1, buf2).unwrap();
427         swap_file2.write_to_file(0, buf3).unwrap();
428         swap_file2.write_to_file(1, buf4).unwrap();
429 
430         assert_page_content(&swap_file1, 0, buf1);
431         assert_page_content(&swap_file1, 1, buf2);
432         assert_page_content(&swap_file2, 0, buf3);
433         assert_page_content(&swap_file2, 1, buf4);
434     }
435 
436     #[test]
write_to_file_invalid_size()437     fn write_to_file_invalid_size() {
438         let file = tempfile::tempfile().unwrap();
439         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
440 
441         let buf = &vec![1; pagesize() + 1];
442         match swap_file.write_to_file(0, buf) {
443             Err(Error::InvalidSize) => {}
444             _ => unreachable!("not invalid size"),
445         };
446     }
447 
448     #[test]
write_to_file_out_of_range()449     fn write_to_file_out_of_range() {
450         let file = tempfile::tempfile().unwrap();
451         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
452 
453         let buf1 = &vec![1; pagesize()];
454         let buf2 = &vec![2; 2 * pagesize()];
455         match swap_file.write_to_file(200, buf1) {
456             Err(Error::OutOfRange) => {}
457             _ => unreachable!("not out of range"),
458         };
459         match swap_file.write_to_file(199, buf2) {
460             Err(Error::OutOfRange) => {}
461             _ => unreachable!("not out of range"),
462         };
463     }
464 
465     #[test]
466     #[cfg(target_arch = "x86_64")] // TODO(b/272612118): unit test infra (qemu-user) support
lock_and_start_populate()467     fn lock_and_start_populate() {
468         let file = tempfile::tempfile().unwrap();
469         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
470 
471         swap_file.write_to_file(1, &vec![1; pagesize()]).unwrap();
472         swap_file
473             .write_to_file(3, &vec![1; 5 * pagesize()])
474             .unwrap();
475         swap_file.write_to_file(10, &vec![1; pagesize()]).unwrap();
476 
477         let mut locked_pages = 0;
478         loop {
479             let pages = swap_file.lock_and_async_prefetch(2).unwrap();
480             if pages == 0 {
481                 break;
482             }
483             assert!(pages <= 2);
484             locked_pages += pages;
485         }
486         assert_eq!(locked_pages, 7);
487     }
488 
489     #[test]
clear_range()490     fn clear_range() {
491         let file = tempfile::tempfile().unwrap();
492         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
493 
494         let data = &vec![1; pagesize()];
495         swap_file.write_to_file(0, data).unwrap();
496         swap_file.clear_range(0..1).unwrap();
497 
498         assert_eq!(swap_file.page_content(0).unwrap().is_none(), true);
499     }
500 
501     #[test]
502     #[cfg(target_arch = "x86_64")] // TODO(b/272612118): unit test infra (qemu-user) support
clear_range_unlocked_pages()503     fn clear_range_unlocked_pages() {
504         let file = tempfile::tempfile().unwrap();
505         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
506 
507         swap_file
508             .write_to_file(1, &vec![1; 10 * pagesize()])
509             .unwrap();
510         // 1..6 is locked, 6..11 is not locked.
511         assert_eq!(swap_file.lock_and_async_prefetch(5).unwrap(), 5);
512 
513         // locked pages only
514         assert_eq!(swap_file.clear_range(1..4).unwrap(), 3);
515         // locked pages + non-locked pages
516         assert_eq!(swap_file.clear_range(4..7).unwrap(), 2);
517         // non-locked pages
518         assert_eq!(swap_file.clear_range(10..11).unwrap(), 0);
519     }
520 
521     #[test]
clear_range_keep_on_disk()522     fn clear_range_keep_on_disk() {
523         let file = tempfile::tempfile().unwrap();
524         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
525 
526         let data = &vec![1; pagesize()];
527         swap_file.write_to_file(0, data).unwrap();
528         swap_file.clear_range(0..1).unwrap();
529 
530         let slice = swap_file.get_slice(0..1).unwrap();
531         let slice = unsafe { slice::from_raw_parts(slice.as_ptr(), slice.size()) };
532         assert_eq!(slice, data);
533     }
534 
535     #[test]
clear_range_out_of_range()536     fn clear_range_out_of_range() {
537         let file = tempfile::tempfile().unwrap();
538         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
539 
540         assert_eq!(swap_file.clear_range(199..200).is_ok(), true);
541         match swap_file.clear_range(200..201) {
542             Err(Error::OutOfRange) => {}
543             _ => unreachable!("not out of range"),
544         };
545         match swap_file.clear_range(199..201) {
546             Err(Error::OutOfRange) => {}
547             _ => unreachable!("not out of range"),
548         };
549     }
550 
551     #[test]
erase_from_disk()552     fn erase_from_disk() {
553         let file = tempfile::tempfile().unwrap();
554         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
555 
556         let data = &vec![1; pagesize()];
557         swap_file.write_to_file(0, data).unwrap();
558         swap_file.erase_from_disk(0..1).unwrap();
559 
560         assert_eq!(swap_file.page_content(0).unwrap().is_none(), true);
561         let slice = swap_file.get_slice(0..1).unwrap();
562         let slice = unsafe { slice::from_raw_parts(slice.as_ptr(), slice.size()) };
563         assert_eq!(slice, &vec![0; pagesize()]);
564     }
565 
566     #[test]
567     #[cfg(target_arch = "x86_64")] // TODO(b/272612118): unit test infra (qemu-user) support
erase_from_disk_unlocked_pages()568     fn erase_from_disk_unlocked_pages() {
569         let file = tempfile::tempfile().unwrap();
570         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
571 
572         swap_file
573             .write_to_file(1, &vec![1; 10 * pagesize()])
574             .unwrap();
575         // 1..6 is locked, 6..11 is not locked.
576         assert_eq!(swap_file.lock_and_async_prefetch(5).unwrap(), 5);
577 
578         // empty pages
579         assert_eq!(swap_file.erase_from_disk(0..1).unwrap(), 0);
580         // empty pages + locked pages
581         assert_eq!(swap_file.erase_from_disk(0..2).unwrap(), 1);
582         // locked pages only
583         assert_eq!(swap_file.erase_from_disk(2..4).unwrap(), 2);
584         // empty pages + locked pages + non-locked pages
585         assert_eq!(swap_file.erase_from_disk(3..7).unwrap(), 2);
586         // non-locked pages
587         assert_eq!(swap_file.erase_from_disk(10..11).unwrap(), 0);
588     }
589 
590     #[test]
erase_from_disk_out_of_range()591     fn erase_from_disk_out_of_range() {
592         let file = tempfile::tempfile().unwrap();
593         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
594 
595         assert_eq!(swap_file.erase_from_disk(199..200).is_ok(), true);
596         match swap_file.erase_from_disk(200..201) {
597             Err(Error::OutOfRange) => {}
598             _ => unreachable!("not out of range"),
599         };
600         match swap_file.erase_from_disk(199..201) {
601             Err(Error::OutOfRange) => {}
602             _ => unreachable!("not out of range"),
603         };
604     }
605 
606     #[test]
607     #[cfg(target_arch = "x86_64")] // TODO(b/272612118): unit test infra (qemu-user) support
clear_mlock()608     fn clear_mlock() {
609         let file = tempfile::tempfile().unwrap();
610         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
611 
612         swap_file
613             .write_to_file(1, &vec![1; 10 * pagesize()])
614             .unwrap();
615         // success if there is no mlock.
616         assert!(swap_file.clear_mlock().is_ok());
617 
618         assert_eq!(swap_file.lock_and_async_prefetch(11).unwrap(), 10);
619         // success if there is mlocked area.
620         assert!(swap_file.clear_mlock().is_ok());
621 
622         // mlock area is cleared.
623         assert_eq!(swap_file.lock_and_async_prefetch(11).unwrap(), 10);
624     }
625 
626     #[test]
first_data_range()627     fn first_data_range() {
628         let file = tempfile::tempfile().unwrap();
629         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
630 
631         swap_file
632             .write_to_file(1, &vec![1; 2 * pagesize()])
633             .unwrap();
634         swap_file.write_to_file(3, &vec![2; pagesize()]).unwrap();
635 
636         assert_eq!(swap_file.first_data_range(200).unwrap(), 1..4);
637         assert_eq!(swap_file.first_data_range(2).unwrap(), 1..3);
638         swap_file.clear_range(1..3).unwrap();
639         assert_eq!(swap_file.first_data_range(2).unwrap(), 3..4);
640         swap_file.clear_range(3..4).unwrap();
641         assert!(swap_file.first_data_range(2).is_none());
642     }
643 
644     #[test]
get_slice()645     fn get_slice() {
646         let file = tempfile::tempfile().unwrap();
647         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
648 
649         swap_file.write_to_file(1, &vec![1; pagesize()]).unwrap();
650         swap_file.write_to_file(2, &vec![2; pagesize()]).unwrap();
651 
652         let slice = swap_file.get_slice(1..3).unwrap();
653         assert_eq!(slice.size(), 2 * pagesize());
654         for i in 0..pagesize() {
655             let mut byte = [0u8; 1];
656             slice.get_slice(i, 1).unwrap().copy_to(&mut byte);
657             assert_eq!(byte[0], 1);
658         }
659         for i in pagesize()..2 * pagesize() {
660             let mut byte = [0u8; 1];
661             slice.get_slice(i, 1).unwrap().copy_to(&mut byte);
662             assert_eq!(byte[0], 2);
663         }
664     }
665 
666     #[test]
get_slice_out_of_range()667     fn get_slice_out_of_range() {
668         let file = tempfile::tempfile().unwrap();
669         let swap_file = SwapFile::new(&file, 0, 200).unwrap();
670 
671         match swap_file.get_slice(200..201) {
672             Err(Error::OutOfRange) => {}
673             other => {
674                 unreachable!("unexpected result {:?}", other);
675             }
676         }
677     }
678 
679     #[test]
present_pages()680     fn present_pages() {
681         let file = tempfile::tempfile().unwrap();
682         let mut swap_file = SwapFile::new(&file, 0, 200).unwrap();
683 
684         swap_file.write_to_file(1, &vec![1; pagesize()]).unwrap();
685         swap_file.write_to_file(2, &vec![2; pagesize()]).unwrap();
686 
687         assert_eq!(swap_file.present_pages(), 2);
688     }
689 }
690