1 // Copyright (C) 2024 Red Hat, Inc.
2 //
3 // SPDX-License-Identifier: Apache-2.0
4
5 use std::ops::Index;
6 use std::os::fd::{AsRawFd, BorrowedFd};
7 use std::sync::atomic::{AtomicU8, Ordering};
8 use std::sync::{Arc, RwLock};
9 use std::{io, ptr};
10 use vm_memory::bitmap::{Bitmap, BitmapSlice, WithBitmapSlice};
11 use vm_memory::mmap::NewBitmap;
12 use vm_memory::{Address, GuestMemoryRegion};
13
14 // Size in bytes of the `VHOST_LOG_PAGE`
15 const LOG_PAGE_SIZE: usize = 0x1000;
16 // Number of bits grouped together as a basic storage unit ("word") in the bitmap
17 // (i.e., in this case one byte tracks 8 pages, one bit per page).
18 const LOG_WORD_SIZE: usize = u8::BITS as usize;
19
20 /// A `Bitmap` with an internal `Bitmap` that can be replaced at runtime
21 pub trait BitmapReplace: Bitmap {
22 type InnerBitmap: MemRegionBitmap;
23
24 /// Replace the internal `Bitmap`
replace(&self, bitmap: Self::InnerBitmap)25 fn replace(&self, bitmap: Self::InnerBitmap);
26 }
27
28 /// A bitmap relative to a memory region
29 pub trait MemRegionBitmap: Sized {
30 /// Creates a new bitmap relative to `region`, using the `logmem` as
31 /// backing memory for the bitmap
new<R: GuestMemoryRegion>(region: &R, logmem: Arc<MmapLogReg>) -> io::Result<Self>32 fn new<R: GuestMemoryRegion>(region: &R, logmem: Arc<MmapLogReg>) -> io::Result<Self>;
33 }
34
35 // TODO: This impl is a quick and dirty hack to allow the tests to continue using
36 // `GuestMemoryMmap<()>`. Sadly this is exposed in the public API, but it should
37 // be moved to an internal mock library.
38 impl BitmapReplace for () {
39 type InnerBitmap = ();
40
41 // this implementation must not be used if the backend sets `VHOST_USER_PROTOCOL_F_LOG_SHMFD`
replace(&self, _bitmap: ())42 fn replace(&self, _bitmap: ()) {
43 panic!("The unit bitmap () must not be used if VHOST_USER_PROTOCOL_F_LOG_SHMFD is set");
44 }
45 }
46
47 impl MemRegionBitmap for () {
new<R: GuestMemoryRegion>(_region: &R, _logmem: Arc<MmapLogReg>) -> io::Result<Self>48 fn new<R: GuestMemoryRegion>(_region: &R, _logmem: Arc<MmapLogReg>) -> io::Result<Self> {
49 Err(io::Error::from(io::ErrorKind::Unsupported))
50 }
51 }
52
53 /// `BitmapMmapRegion` implements a bitmap tha can be replaced at runtime.
54 /// The main use case is to support live migration on vhost-user backends
55 /// (see `VHOST_USER_PROTOCOL_F_LOG_SHMFD` and `VHOST_USER_SET_LOG_BASE` in the vhost-user protocol
56 /// specification). It uses a fixed memory page size of `VHOST_LOG_PAGE` bytes (i.e., `4096` bytes),
57 /// so it converts addresses to page numbers before setting or clearing the bits.
58 ///
59 /// To use this bitmap you need to define the memory as `GuestMemoryMmap<BitmapMmapRegion>`.
60 ///
61 /// Note:
62 /// This implementation uses `std::sync::RwLock`, the priority policy of the lock is dependent on
63 /// the underlying operating system's implementation and does not guarantee any particular policy,
64 /// in systems other than linux a thread trying to acquire the lock may starve.
65 #[derive(Default, Debug, Clone)]
66 pub struct BitmapMmapRegion {
67 // TODO: To avoid both reader and writer starvation we can replace the `std::sync::RwLock` with
68 // `parking_lot::RwLock`.
69 inner: Arc<RwLock<Option<AtomicBitmapMmap>>>,
70 base_address: usize, // The slice's base address
71 }
72
73 impl Bitmap for BitmapMmapRegion {
mark_dirty(&self, offset: usize, len: usize)74 fn mark_dirty(&self, offset: usize, len: usize) {
75 let inner = self.inner.read().unwrap();
76 if let Some(bitmap) = inner.as_ref() {
77 if let Some(absolute_offset) = self.base_address.checked_add(offset) {
78 bitmap.mark_dirty(absolute_offset, len);
79 }
80 }
81 }
82
dirty_at(&self, offset: usize) -> bool83 fn dirty_at(&self, offset: usize) -> bool {
84 let inner = self.inner.read().unwrap();
85 inner
86 .as_ref()
87 .is_some_and(|bitmap| bitmap.dirty_at(self.base_address.saturating_add(offset)))
88 }
89
slice_at(&self, offset: usize) -> <Self as WithBitmapSlice>::S90 fn slice_at(&self, offset: usize) -> <Self as WithBitmapSlice>::S {
91 Self {
92 inner: Arc::clone(&self.inner),
93 base_address: self.base_address.saturating_add(offset),
94 }
95 }
96 }
97
98 impl BitmapReplace for BitmapMmapRegion {
99 type InnerBitmap = AtomicBitmapMmap;
100
replace(&self, bitmap: AtomicBitmapMmap)101 fn replace(&self, bitmap: AtomicBitmapMmap) {
102 let mut inner = self.inner.write().unwrap();
103 inner.replace(bitmap);
104 }
105 }
106
107 impl BitmapSlice for BitmapMmapRegion {}
108
109 impl<'a> WithBitmapSlice<'a> for BitmapMmapRegion {
110 type S = Self;
111 }
112
113 impl NewBitmap for BitmapMmapRegion {
with_len(_len: usize) -> Self114 fn with_len(_len: usize) -> Self {
115 Self::default()
116 }
117 }
118
119 /// `AtomicBitmapMmap` implements a simple memory-mapped bitmap on the page level with test
120 /// and set operations. The main use case is to support live migration on vhost-user backends
121 /// (see `VHOST_USER_PROTOCOL_F_LOG_SHMFD` and `VHOST_USER_SET_LOG_BASE` in the vhost-user protocol
122 /// specification). It uses a fixed memory page size of `LOG_PAGE_SIZE` bytes, so it converts
123 /// addresses to page numbers before setting or clearing the bits.
124 #[derive(Debug)]
125 pub struct AtomicBitmapMmap {
126 logmem: Arc<MmapLogReg>,
127 pages_before_region: usize, // Number of pages to ignore from the start of the bitmap
128 number_of_pages: usize, // Number of total pages indexed in the bitmap for this region
129 }
130
131 // `AtomicBitmapMmap` implements a simple bitmap, it is page-size aware and relative
132 // to a memory region. It handling the `log` memory mapped area. Each page is indexed
133 // inside a word of `LOG_WORD_SIZE` bits, so even if the bitmap starts at the beginning of
134 // the mapped area, the memory region does not necessarily have to start at the beginning of
135 // that word.
136 // Note: we don't implement `Bitmap` because we cannot implement `slice_at()`
137 impl MemRegionBitmap for AtomicBitmapMmap {
138 // Creates a new memory-mapped bitmap for the memory region. This bitmap must fit within the
139 // log mapped memory.
new<R: GuestMemoryRegion>(region: &R, logmem: Arc<MmapLogReg>) -> io::Result<Self>140 fn new<R: GuestMemoryRegion>(region: &R, logmem: Arc<MmapLogReg>) -> io::Result<Self> {
141 let region_start_addr: usize = region.start_addr().raw_value().io_try_into()?;
142 let region_len: usize = region.len().io_try_into()?;
143 if region_len == 0 {
144 return Err(io::Error::from(io::ErrorKind::InvalidData));
145 }
146
147 // The size of the log should be large enough to cover all known guest addresses.
148 let region_end_addr = region_start_addr
149 .checked_add(region_len - 1)
150 .ok_or(io::Error::from(io::ErrorKind::InvalidData))?;
151 let region_end_log_word = page_word(page_number(region_end_addr));
152 if region_end_log_word >= logmem.len() {
153 return Err(io::Error::from(io::ErrorKind::InvalidData));
154 }
155
156 // The frontend sends a single bitmap (i.e., the log memory to be mapped using `fd`,
157 // `mmap_offset` and `mmap_size`) that covers the entire guest memory.
158 // However, since each memory region requires a bitmap relative to them, we have to
159 // adjust the offset and size, in number of pages, of this region.
160 let offset_pages = page_number(region_start_addr);
161 let size_page = page_number(region_len);
162
163 Ok(Self {
164 logmem,
165 pages_before_region: offset_pages,
166 number_of_pages: size_page,
167 })
168 }
169 }
170
171 impl AtomicBitmapMmap {
172 // Sets the memory range as dirty. The `offset` is relative to the memory region,
173 // so an offset of `0` references the start of the memory region. Any attempt to
174 // access beyond the end of the bitmap are simply ignored.
mark_dirty(&self, offset: usize, len: usize)175 fn mark_dirty(&self, offset: usize, len: usize) {
176 if len == 0 {
177 return;
178 }
179
180 let first_page = page_number(offset);
181 let last_page = page_number(offset.saturating_add(len - 1));
182 for page in first_page..=last_page {
183 if page >= self.number_of_pages {
184 break; // ignore out of bound access
185 }
186
187 // get the absolute page number
188 let page = self.pages_before_region + page;
189 self.logmem[page_word(page)].fetch_or(1 << page_bit(page), Ordering::Relaxed);
190 }
191 }
192
193 // Check whether the specified offset is marked as dirty. The `offset` is relative
194 // to the memory region, so a `0` offset references the start of the memory region.
195 // Any attempt to access beyond the end of the bitmap are simply ignored.
dirty_at(&self, offset: usize) -> bool196 fn dirty_at(&self, offset: usize) -> bool {
197 let page = page_number(offset);
198 if page >= self.number_of_pages {
199 return false; // ignore out of bound access
200 }
201
202 // get the absolute page number
203 let page = self.pages_before_region + page;
204 let page_bit = self.logmem[page_word(page)].load(Ordering::Relaxed) & (1 << page_bit(page));
205 page_bit != 0
206 }
207 }
208
209 /// `MmaplogReg` mmaps the frontend bitmap backing memory in the current process.
210 #[derive(Debug)]
211 pub struct MmapLogReg {
212 addr: *const AtomicU8,
213 len: usize,
214 }
215
216 // SAFETY: Send is not automatically implemented because the raw pointer.
217 // No one besides `MmapLogReg` has the raw pointer, so we can safely transfer it to another thread.
218 unsafe impl Send for MmapLogReg {}
219
220 // SAFETY: Sync is not automatically implemented because the raw pointer.
221 // `MmapLogReg` doesn't have any interior mutability and all access to `&AtomicU8`
222 // are done through atomic operations.
223 unsafe impl Sync for MmapLogReg {}
224
225 impl MmapLogReg {
226 // Note: We could try to adjust the mapping area to only cover the memory region, but
227 // the region's starting address is not guarantee to be LOG_WORD_SIZE-page aligned
228 // which makes the implementation needlessly cumbersome.
229 // Note: The specification does not define whether the offset must be page-aligned or not.
230 // But, since we are receiving the offset from the frontend to be used to call mmap,
231 // we assume it is properly aligned (currently, qemu always send a 0 offset).
from_file(fd: BorrowedFd, offset: u64, len: u64) -> io::Result<Self>232 pub(crate) fn from_file(fd: BorrowedFd, offset: u64, len: u64) -> io::Result<Self> {
233 let offset: isize = offset.io_try_into()?;
234 let len: usize = len.io_try_into()?;
235
236 // Let's uphold the safety contract for `std::ptr::offset()`.
237 if len > isize::MAX as usize {
238 return Err(io::Error::from(io::ErrorKind::InvalidData));
239 }
240
241 // SAFETY: `fd` is a valid file descriptor and we are not using `libc::MAP_FIXED`.
242 let addr = unsafe {
243 libc::mmap(
244 ptr::null_mut(),
245 len as libc::size_t,
246 libc::PROT_READ | libc::PROT_WRITE,
247 libc::MAP_SHARED,
248 fd.as_raw_fd(),
249 offset as libc::off_t,
250 )
251 };
252
253 if addr == libc::MAP_FAILED {
254 return Err(io::Error::last_os_error());
255 }
256
257 Ok(Self {
258 addr: addr as *const AtomicU8,
259 len,
260 })
261 }
262
len(&self) -> usize263 fn len(&self) -> usize {
264 self.len
265 }
266 }
267
268 impl Index<usize> for MmapLogReg {
269 type Output = AtomicU8;
270
271 // It's ok to get a reference to an atomic value.
index(&self, index: usize) -> &Self::Output272 fn index(&self, index: usize) -> &Self::Output {
273 assert!(index < self.len);
274 // Note: Instead of `&*` we can use `AtomicU8::from_ptr()` as soon it gets stabilized.
275 // SAFETY: `self.addr` is a valid and properly aligned pointer. Also, `self.addr` + `index`
276 // doesn't wrap around and is contained within the mapped memory region.
277 unsafe { &*self.addr.add(index) }
278 }
279 }
280
281 impl Drop for MmapLogReg {
drop(&mut self)282 fn drop(&mut self) {
283 // SAFETY: `addr` is properly aligned, also we are sure that this is the
284 // last reference alive and/or we have an exclusive access to this object.
285 unsafe {
286 libc::munmap(self.addr as *mut libc::c_void, self.len as libc::size_t);
287 }
288 }
289 }
290
291 trait IoTryInto<T: TryFrom<Self>>: Sized {
io_try_into(self) -> io::Result<T>292 fn io_try_into(self) -> io::Result<T>;
293 }
294
295 impl<TySrc, TyDst> IoTryInto<TyDst> for TySrc
296 where
297 TyDst: TryFrom<TySrc>,
298 <TyDst as TryFrom<TySrc>>::Error: Send + Sync + std::error::Error + 'static,
299 {
io_try_into(self) -> io::Result<TyDst>300 fn io_try_into(self) -> io::Result<TyDst> {
301 self.try_into()
302 .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))
303 }
304 }
305
306 #[inline]
307 // Get the page number corresponding to the address `addr`
page_number(addr: usize) -> usize308 fn page_number(addr: usize) -> usize {
309 addr / LOG_PAGE_SIZE
310 }
311
312 #[inline]
313 // Get the word within the bitmap of the page.
314 // Each page is indexed inside a word of `LOG_WORD_SIZE` bits.
page_word(page: usize) -> usize315 fn page_word(page: usize) -> usize {
316 page / LOG_WORD_SIZE
317 }
318
319 #[inline]
320 // Get the bit index inside a word of `LOG_WORD_SIZE` bits
page_bit(page: usize) -> usize321 fn page_bit(page: usize) -> usize {
322 page % LOG_WORD_SIZE
323 }
324
325 #[cfg(test)]
326 mod tests {
327 use super::*;
328 use std::fs::File;
329 use std::io::Write;
330 use std::os::fd::AsFd;
331 use vm_memory::{GuestAddress, GuestRegionMmap};
332 use vmm_sys_util::tempfile::TempFile;
333
334 // Helper method to check whether a specified range is clean.
range_is_clean<B: Bitmap>(b: &B, start: usize, len: usize) -> bool335 pub fn range_is_clean<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
336 (start..start + len).all(|offset| !b.dirty_at(offset))
337 }
338
339 // Helper method to check whether a specified range is dirty.
range_is_dirty<B: Bitmap>(b: &B, start: usize, len: usize) -> bool340 pub fn range_is_dirty<B: Bitmap>(b: &B, start: usize, len: usize) -> bool {
341 (start..start + len).all(|offset| b.dirty_at(offset))
342 }
343
tmp_file(len: usize) -> File344 fn tmp_file(len: usize) -> File {
345 let mut f = TempFile::new().unwrap().into_file();
346 let buf = vec![0; len];
347 f.write_all(buf.as_ref()).unwrap();
348 f
349 }
350
test_all(b: &BitmapMmapRegion, len: usize)351 fn test_all(b: &BitmapMmapRegion, len: usize) {
352 assert!(range_is_clean(b, 0, len), "The bitmap should be clean");
353
354 b.mark_dirty(0, len);
355 assert!(range_is_dirty(b, 0, len), "The bitmap should be dirty");
356 }
357
358 #[test]
359 #[cfg(not(miri))] // Miri cannot mmap files
test_bitmap_region_bigger_than_log()360 fn test_bitmap_region_bigger_than_log() {
361 // Let's create a log memory area to track 8 pages,
362 // since 1 bit correspond to 1 page, we need a 1-byte log memory area.
363 let mmap_offset: u64 = 0;
364 let mmap_size = 1; // // 1 byte = 8 bits/pages
365 let f = tmp_file(mmap_size);
366
367 // A guest memory region of 16 pages
368 let region_start_addr = GuestAddress(mmap_offset);
369 let region_len = LOG_PAGE_SIZE * 16;
370 let region: GuestRegionMmap<()> =
371 GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
372
373 let logmem =
374 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
375
376 let log = AtomicBitmapMmap::new(®ion, logmem);
377
378 assert!(log.is_err());
379 }
380 #[test]
381 #[cfg(not(miri))] // Miri cannot mmap files
test_bitmap_log_and_region_same_size()382 fn test_bitmap_log_and_region_same_size() {
383 // A log memory area able to track 32 pages
384 let mmap_offset: u64 = 0;
385 let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
386 let f = tmp_file(mmap_size);
387
388 // A 32-page guest memory region
389 let region_start_addr = GuestAddress::new(mmap_offset);
390 let region_len = LOG_PAGE_SIZE * 32;
391 let region: GuestRegionMmap<()> =
392 GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
393
394 let logmem =
395 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
396
397 let log = AtomicBitmapMmap::new(®ion, logmem);
398 assert!(log.is_ok());
399 let log = log.unwrap();
400
401 let bitmap = BitmapMmapRegion::default();
402 bitmap.replace(log);
403
404 test_all(&bitmap, region_len);
405 }
406
407 #[test]
408 #[cfg(not(miri))] // Miri cannot mmap files
test_bitmap_region_smaller_than_log()409 fn test_bitmap_region_smaller_than_log() {
410 // A log memory area able to track 32 pages
411 let mmap_offset: u64 = 0;
412 let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
413 let f = tmp_file(mmap_size);
414
415 // A 16-page guest memory region
416 let region_start_addr = GuestAddress::new(mmap_offset);
417 let region_len = LOG_PAGE_SIZE * 16;
418 let region: GuestRegionMmap<()> =
419 GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
420
421 let logmem =
422 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
423
424 let log = AtomicBitmapMmap::new(®ion, logmem);
425 assert!(log.is_ok());
426 let log = log.unwrap();
427
428 let bitmap = BitmapMmapRegion::default();
429
430 bitmap.replace(log);
431
432 test_all(&bitmap, region_len);
433 }
434
435 #[test]
436 #[cfg(not(miri))] // Miri cannot mmap files
test_bitmap_region_smaller_than_one_word()437 fn test_bitmap_region_smaller_than_one_word() {
438 // A log memory area able to track 32 pages
439 let mmap_offset: u64 = 0;
440 let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
441 let f = tmp_file(mmap_size);
442
443 // A 6-page guest memory region
444 let region_start_addr = GuestAddress::new(mmap_offset);
445 let region_len = LOG_PAGE_SIZE * 6;
446 let region: GuestRegionMmap<()> =
447 GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
448
449 let logmem =
450 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
451
452 let log = AtomicBitmapMmap::new(®ion, logmem);
453 assert!(log.is_ok());
454 let log = log.unwrap();
455
456 let bitmap = BitmapMmapRegion::default();
457 bitmap.replace(log);
458
459 test_all(&bitmap, region_len);
460 }
461
462 #[test]
463 #[cfg(not(miri))] // Miri cannot mmap files
test_bitmap_two_regions_overlapping_word_first_dirty()464 fn test_bitmap_two_regions_overlapping_word_first_dirty() {
465 // A log memory area able to track 32 pages
466 let mmap_offset: u64 = 0;
467 let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
468 let f = tmp_file(mmap_size);
469
470 let logmem =
471 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
472
473 // A 11-page guest memory region
474 let region0_start_addr = GuestAddress::new(mmap_offset);
475 let region0_len = LOG_PAGE_SIZE * 11;
476 let region0: GuestRegionMmap<()> =
477 GuestRegionMmap::from_range(region0_start_addr, region0_len, None).unwrap();
478
479 let log0 = AtomicBitmapMmap::new(®ion0, Arc::clone(&logmem));
480 assert!(log0.is_ok());
481 let log0 = log0.unwrap();
482 let bitmap0 = BitmapMmapRegion::default();
483 bitmap0.replace(log0);
484
485 // A 1-page guest memory region
486 let region1_start_addr = GuestAddress::new(mmap_offset + LOG_PAGE_SIZE as u64 * 14);
487 let region1_len = LOG_PAGE_SIZE;
488 let region1: GuestRegionMmap<()> =
489 GuestRegionMmap::from_range(region1_start_addr, region1_len, None).unwrap();
490
491 let log1 = AtomicBitmapMmap::new(®ion1, Arc::clone(&logmem));
492 assert!(log1.is_ok());
493 let log1 = log1.unwrap();
494
495 let bitmap1 = BitmapMmapRegion::default();
496 bitmap1.replace(log1);
497
498 // Both regions should be clean
499 assert!(
500 range_is_clean(&bitmap0, 0, region0_len),
501 "The bitmap0 should be clean"
502 );
503 assert!(
504 range_is_clean(&bitmap1, 0, region1_len),
505 "The bitmap1 should be clean"
506 );
507
508 // Marking region 0, region 1 should continue be clean
509 bitmap0.mark_dirty(0, region0_len);
510
511 assert!(
512 range_is_dirty(&bitmap0, 0, region0_len),
513 "The bitmap0 should be dirty"
514 );
515 assert!(
516 range_is_clean(&bitmap1, 0, region1_len),
517 "The bitmap1 should be clean"
518 );
519 }
520
521 #[test]
522 #[cfg(not(miri))] // Miri cannot mmap files
test_bitmap_two_regions_overlapping_word_second_dirty()523 fn test_bitmap_two_regions_overlapping_word_second_dirty() {
524 // A log memory area able to track 32 pages
525 let mmap_offset: u64 = 0;
526 let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
527 let f = tmp_file(mmap_size);
528
529 let logmem =
530 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
531
532 // A 11-page guest memory region
533 let region0_start_addr = GuestAddress::new(mmap_offset);
534 let region0_len = LOG_PAGE_SIZE * 11;
535 let region0: GuestRegionMmap<()> =
536 GuestRegionMmap::from_range(region0_start_addr, region0_len, None).unwrap();
537
538 let log0 = AtomicBitmapMmap::new(®ion0, Arc::clone(&logmem));
539 assert!(log0.is_ok());
540 let log0 = log0.unwrap();
541
542 let bitmap0 = BitmapMmapRegion::default();
543 bitmap0.replace(log0);
544
545 // A 1-page guest memory region
546 let region1_start_addr = GuestAddress::new(mmap_offset + LOG_PAGE_SIZE as u64 * 14);
547 let region1_len = LOG_PAGE_SIZE;
548 let region1: GuestRegionMmap<()> =
549 GuestRegionMmap::from_range(region1_start_addr, region1_len, None).unwrap();
550
551 let log1 = AtomicBitmapMmap::new(®ion1, Arc::clone(&logmem));
552 assert!(log1.is_ok());
553 let log1 = log1.unwrap();
554
555 let bitmap1 = BitmapMmapRegion::default();
556 bitmap1.replace(log1);
557
558 // Both regions should be clean
559 assert!(
560 range_is_clean(&bitmap0, 0, region0_len),
561 "The bitmap0 should be clean"
562 );
563 assert!(
564 range_is_clean(&bitmap1, 0, region1_len),
565 "The bitmap1 should be clean"
566 );
567
568 // Marking region 1, region 0 should continue be clean
569 bitmap1.mark_dirty(0, region1_len);
570
571 assert!(
572 range_is_dirty(&bitmap1, 0, region1_len),
573 "The bitmap0 should be dirty"
574 );
575 assert!(
576 range_is_clean(&bitmap0, 0, region0_len),
577 "The bitmap1 should be clean"
578 );
579 }
580
581 #[test]
582 #[cfg(not(miri))] // Miri cannot mmap files
test_bitmap_region_slice()583 fn test_bitmap_region_slice() {
584 // A log memory area able to track 32 pages
585 let mmap_offset: u64 = 0;
586 let mmap_size = 4; // 4 bytes * 8 bits = 32 bits/pages
587 let f = tmp_file(mmap_size);
588
589 // A 32-page guest memory region
590 let region_start_addr = GuestAddress::new(mmap_offset);
591 let region_len = LOG_PAGE_SIZE * 32;
592 let region: GuestRegionMmap<()> =
593 GuestRegionMmap::from_range(region_start_addr, region_len, None).unwrap();
594
595 let logmem =
596 Arc::new(MmapLogReg::from_file(f.as_fd(), mmap_offset, mmap_size as u64).unwrap());
597
598 let log = AtomicBitmapMmap::new(®ion, logmem);
599 assert!(log.is_ok());
600 let log = log.unwrap();
601
602 let bitmap = BitmapMmapRegion::default();
603 bitmap.replace(log);
604
605 assert!(
606 range_is_clean(&bitmap, 0, region_len),
607 "The bitmap should be clean"
608 );
609
610 // Let's get a slice of half the bitmap
611 let slice_len = region_len / 2;
612 let slice = bitmap.slice_at(slice_len);
613 assert!(
614 range_is_clean(&slice, 0, slice_len),
615 "The slice should be clean"
616 );
617
618 slice.mark_dirty(0, slice_len);
619 assert!(
620 range_is_dirty(&slice, 0, slice_len),
621 "The slice should be dirty"
622 );
623 assert!(
624 range_is_clean(&bitmap, 0, slice_len),
625 "The first half of the bitmap should be clean"
626 );
627 assert!(
628 range_is_dirty(&bitmap, slice_len, region_len - slice_len),
629 "The last half of the bitmap should be dirty"
630 );
631 }
632 }
633