• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use core::ffi::c_void;
6 use std::cmp::Reverse;
7 use std::collections::BTreeMap;
8 use std::collections::BinaryHeap;
9 use std::sync::Arc;
10 
11 use base::errno_result;
12 use base::error;
13 use base::ioctl_with_mut_ref;
14 use base::ioctl_with_ref;
15 use base::warn;
16 use base::AsRawDescriptor;
17 use base::Error;
18 use base::Event;
19 use base::MappedRegion;
20 use base::MmapError;
21 use base::Protection;
22 use base::RawDescriptor;
23 use base::Result;
24 use base::SafeDescriptor;
25 use fnv::FnvHashMap;
26 use libc::E2BIG;
27 use libc::EEXIST;
28 use libc::EFAULT;
29 use libc::EINVAL;
30 use libc::EIO;
31 use libc::ENOENT;
32 use libc::ENOSPC;
33 use libc::ENOTSUP;
34 use libc::EOVERFLOW;
35 use sync::Mutex;
36 use vm_memory::GuestAddress;
37 use vm_memory::GuestMemory;
38 #[cfg(windows)]
39 use win_util::win32_wide_string;
40 
41 use super::*;
42 use crate::host_phys_addr_bits;
43 use crate::ClockState;
44 use crate::Datamatch;
45 use crate::DeviceKind;
46 use crate::Hypervisor;
47 use crate::HypervisorKind;
48 use crate::IoEventAddress;
49 use crate::MemCacheType;
50 use crate::MemSlot;
51 use crate::VcpuX86_64;
52 use crate::Vm;
53 use crate::VmCap;
54 use crate::VmX86_64;
55 
56 /// A wrapper around creating and using a HAXM VM.
57 pub struct HaxmVm {
58     haxm: Haxm,
59     vm_id: u32,
60     descriptor: SafeDescriptor,
61     guest_mem: GuestMemory,
62     mem_regions: Arc<Mutex<BTreeMap<MemSlot, (GuestAddress, Box<dyn MappedRegion>)>>>,
63     /// A min heap of MemSlot numbers that were used and then removed and can now be re-used
64     mem_slot_gaps: Arc<Mutex<BinaryHeap<Reverse<MemSlot>>>>,
65     // HAXM's implementation of ioevents makes several assumptions about how crosvm uses ioevents:
66     //   1. All ioevents are registered during device setup, and thus can be cloned when the vm is
67     //      cloned instead of locked in an Arc<Mutex<>>. This will make handling ioevents in each
68     //      vcpu thread easier because no locks will need to be acquired.
69     //   2. All ioevents use Datamatch::AnyLength. We don't bother checking the datamatch, which
70     //      will make this faster.
71     //   3. We only ever register one eventfd to each address. This simplifies our data structure.
72     ioevents: FnvHashMap<IoEventAddress, Event>,
73 }
74 
75 impl HaxmVm {
76     /// Constructs a new `HaxmVm` using the given `Haxm` instance.
new(haxm: &Haxm, guest_mem: GuestMemory) -> Result<HaxmVm>77     pub fn new(haxm: &Haxm, guest_mem: GuestMemory) -> Result<HaxmVm> {
78         let mut vm_id: u32 = 0;
79         // SAFETY:
80         // Safe because we know descriptor is a real haxm descriptor as this module is the only
81         // one that can make Haxm objects.
82         let ret = unsafe { ioctl_with_mut_ref(haxm, HAX_IOCTL_CREATE_VM, &mut vm_id) };
83         if ret != 0 {
84             return errno_result();
85         }
86 
87         // Haxm creates additional device paths when VMs are created
88         let vm_descriptor = open_haxm_vm_device(USE_GHAXM.load(Ordering::Relaxed), vm_id)?;
89 
90         for region in guest_mem.regions() {
91             // SAFETY:
92             // Safe because the guest regions are guaranteed not to overlap.
93             unsafe {
94                 set_user_memory_region(
95                     &vm_descriptor,
96                     false,
97                     region.guest_addr.offset(),
98                     region.size as u64,
99                     MemoryRegionOp::Add(region.host_addr as *mut u8 as u64),
100                 )
101             }?;
102         }
103 
104         Ok(HaxmVm {
105             vm_id,
106             haxm: haxm.try_clone()?,
107             descriptor: vm_descriptor,
108             guest_mem,
109             mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
110             mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
111             ioevents: FnvHashMap::default(),
112         })
113     }
114 
check_raw_capability(&self, cap: u32) -> bool115     pub fn check_raw_capability(&self, cap: u32) -> bool {
116         let mut capability_info = hax_capabilityinfo::default();
117         let ret =
118             // SAFETY:
119             // Safe because we know that our file is a VM fd and we verify the return result.
120             unsafe { ioctl_with_mut_ref(&self.haxm, HAX_IOCTL_CAPABILITY, &mut capability_info) };
121 
122         if ret != 0 {
123             return false;
124         }
125 
126         // If wstatus is zero, HAXM is not usable.
127         // In this case, the winfo bits indicate why, rather than communicating capability
128         // information.
129         if capability_info.wstatus == 0 {
130             return false;
131         }
132 
133         (cap & capability_info.winfo as u32) != 0
134     }
135 
register_log_file(&self, path: &str) -> Result<()>136     pub fn register_log_file(&self, path: &str) -> Result<()> {
137         // The IOCTL here is only avilable on internal fork of HAXM and only works on Windows.
138         #[cfg(windows)]
139         if get_use_ghaxm() {
140             let mut log_file = hax_log_file::default();
141 
142             // Although it would be more efficient to do this check prior to allocating the log_file
143             // struct, the code would be more complex and less maintainable. This is only ever
144             // called once per-vm so the extra temporary memory and time shouldn't be a
145             // problem.
146             if path.len() >= log_file.path.len() {
147                 return Err(Error::new(E2BIG));
148             }
149 
150             let wstring = &win32_wide_string(path);
151             log_file.path[..wstring.len()].clone_from_slice(wstring);
152 
153             // SAFETY:
154             // Safe because we know that our file is a VM fd and we verify the return result.
155             let ret = unsafe { ioctl_with_ref(self, HAX_VM_IOCTL_REGISTER_LOG_FILE, &log_file) };
156 
157             if ret != 0 {
158                 return errno_result();
159             }
160         }
161         Ok(())
162     }
163 }
164 
165 impl AsRawDescriptor for HaxmVm {
as_raw_descriptor(&self) -> RawDescriptor166     fn as_raw_descriptor(&self) -> RawDescriptor {
167         self.descriptor.as_raw_descriptor()
168     }
169 }
170 
171 enum MemoryRegionOp {
172     // Map a memory region for the given host address.
173     Add(u64),
174     // Remove the memory region.
175     Remove,
176 }
177 
set_user_memory_region( descriptor: &SafeDescriptor, read_only: bool, guest_addr: u64, size: u64, op: MemoryRegionOp, ) -> Result<()>178 unsafe fn set_user_memory_region(
179     descriptor: &SafeDescriptor,
180     read_only: bool,
181     guest_addr: u64,
182     size: u64,
183     op: MemoryRegionOp,
184 ) -> Result<()> {
185     let (va, flags) = match op {
186         MemoryRegionOp::Add(va) => {
187             let mut flags = HAX_RAM_INFO_STANDALONE;
188             if read_only {
189                 flags |= HAX_RAM_INFO_ROM
190             }
191             (va, flags)
192         }
193         MemoryRegionOp::Remove => (0, HAX_RAM_INFO_INVALID),
194     };
195     let ram_info = hax_set_ram_info2 {
196         pa_start: guest_addr,
197         size,
198         va,
199         flags,
200         ..Default::default()
201     };
202 
203     // SAFETY:
204     // Safe because we know that our file is a VM fd and we verify the return result.
205     let ret = ioctl_with_ref(descriptor, HAX_VM_IOCTL_SET_RAM2, &ram_info);
206     if ret != 0 {
207         return errno_result();
208     }
209     Ok(())
210 }
211 
212 impl Vm for HaxmVm {
try_clone(&self) -> Result<Self>213     fn try_clone(&self) -> Result<Self> {
214         let mut ioevents = FnvHashMap::default();
215         for (addr, evt) in self.ioevents.iter() {
216             ioevents.insert(*addr, evt.try_clone()?);
217         }
218         Ok(HaxmVm {
219             vm_id: self.vm_id,
220             haxm: self.haxm.try_clone()?,
221             descriptor: self.descriptor.try_clone()?,
222             guest_mem: self.guest_mem.clone(),
223             mem_regions: self.mem_regions.clone(),
224             mem_slot_gaps: self.mem_slot_gaps.clone(),
225             ioevents,
226         })
227     }
228 
try_clone_descriptor(&self) -> Result<SafeDescriptor>229     fn try_clone_descriptor(&self) -> Result<SafeDescriptor> {
230         Err(Error::new(ENOTSUP))
231     }
232 
hypervisor_kind(&self) -> HypervisorKind233     fn hypervisor_kind(&self) -> HypervisorKind {
234         HypervisorKind::Haxm
235     }
236 
check_capability(&self, c: VmCap) -> bool237     fn check_capability(&self, c: VmCap) -> bool {
238         match c {
239             VmCap::DirtyLog => false,
240             VmCap::PvClock => false,
241             VmCap::Protected => false,
242             VmCap::EarlyInitCpuid => false,
243             VmCap::BusLockDetect => false,
244             VmCap::ReadOnlyMemoryRegion => false,
245             VmCap::MemNoncoherentDma => false,
246         }
247     }
248 
get_memory(&self) -> &GuestMemory249     fn get_memory(&self) -> &GuestMemory {
250         &self.guest_mem
251     }
252 
add_memory_region( &mut self, guest_addr: GuestAddress, mem: Box<dyn MappedRegion>, read_only: bool, _log_dirty_pages: bool, _cache: MemCacheType, ) -> Result<MemSlot>253     fn add_memory_region(
254         &mut self,
255         guest_addr: GuestAddress,
256         mem: Box<dyn MappedRegion>,
257         read_only: bool,
258         _log_dirty_pages: bool,
259         _cache: MemCacheType,
260     ) -> Result<MemSlot> {
261         let size = mem.size() as u64;
262         let end_addr = guest_addr.checked_add(size).ok_or(Error::new(EOVERFLOW))?;
263         if self.guest_mem.range_overlap(guest_addr, end_addr) {
264             return Err(Error::new(ENOSPC));
265         }
266         let mut regions = self.mem_regions.lock();
267         let mut gaps = self.mem_slot_gaps.lock();
268         let slot = match gaps.pop() {
269             Some(gap) => gap.0,
270             None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot,
271         };
272 
273         // SAFETY:
274         // Safe because we check that the given guest address is valid and has no overlaps. We also
275         // know that the pointer and size are correct because the MemoryMapping interface ensures
276         // this. We take ownership of the memory mapping so that it won't be unmapped until the slot
277         // is removed.
278         let res = unsafe {
279             set_user_memory_region(
280                 &self.descriptor,
281                 read_only,
282                 guest_addr.offset(),
283                 size,
284                 MemoryRegionOp::Add(mem.as_ptr() as u64),
285             )
286         };
287 
288         if let Err(e) = res {
289             gaps.push(Reverse(slot));
290             return Err(e);
291         }
292         regions.insert(slot, (guest_addr, mem));
293         Ok(slot)
294     }
295 
msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>296     fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()> {
297         let mut regions = self.mem_regions.lock();
298         let (_, mem) = regions.get_mut(&slot).ok_or(Error::new(ENOENT))?;
299 
300         mem.msync(offset, size).map_err(|err| match err {
301             MmapError::InvalidAddress => Error::new(EFAULT),
302             MmapError::NotPageAligned => Error::new(EINVAL),
303             MmapError::SystemCallFailed(e) => e,
304             _ => Error::new(EIO),
305         })
306     }
307 
remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>308     fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>> {
309         let mut regions = self.mem_regions.lock();
310 
311         if let Some((guest_addr, mem)) = regions.get(&slot) {
312             // SAFETY:
313             // Safe because the slot is checked against the list of memory slots.
314             unsafe {
315                 set_user_memory_region(
316                     &self.descriptor,
317                     false,
318                     guest_addr.offset(),
319                     mem.size() as u64,
320                     MemoryRegionOp::Remove,
321                 )?;
322             }
323             self.mem_slot_gaps.lock().push(Reverse(slot));
324             Ok(regions.remove(&slot).unwrap().1)
325         } else {
326             Err(Error::new(ENOENT))
327         }
328     }
329 
create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor>330     fn create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor> {
331         // Haxm does not support in-kernel devices
332         Err(Error::new(libc::ENXIO))
333     }
334 
get_dirty_log(&self, _slot: u32, _dirty_log: &mut [u8]) -> Result<()>335     fn get_dirty_log(&self, _slot: u32, _dirty_log: &mut [u8]) -> Result<()> {
336         // Haxm does not support VmCap::DirtyLog
337         Err(Error::new(libc::ENXIO))
338     }
339 
register_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>340     fn register_ioevent(
341         &mut self,
342         evt: &Event,
343         addr: IoEventAddress,
344         datamatch: Datamatch,
345     ) -> Result<()> {
346         if datamatch != Datamatch::AnyLength {
347             error!("HAXM currently only supports Datamatch::AnyLength");
348             return Err(Error::new(ENOTSUP));
349         }
350 
351         if self.ioevents.contains_key(&addr) {
352             error!("HAXM does not support multiple ioevents for the same address");
353             return Err(Error::new(EEXIST));
354         }
355 
356         self.ioevents.insert(addr, evt.try_clone()?);
357 
358         Ok(())
359     }
360 
unregister_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>361     fn unregister_ioevent(
362         &mut self,
363         evt: &Event,
364         addr: IoEventAddress,
365         datamatch: Datamatch,
366     ) -> Result<()> {
367         if datamatch != Datamatch::AnyLength {
368             error!("HAXM only supports Datamatch::AnyLength");
369             return Err(Error::new(ENOTSUP));
370         }
371 
372         match self.ioevents.get(&addr) {
373             Some(existing_evt) => {
374                 // evt should match the existing evt associated with addr
375                 if evt != existing_evt {
376                     return Err(Error::new(ENOENT));
377                 }
378                 self.ioevents.remove(&addr);
379             }
380 
381             None => {
382                 return Err(Error::new(ENOENT));
383             }
384         };
385         Ok(())
386     }
387 
388     /// Trigger any io events based on the memory mapped IO at `addr`.  If the hypervisor does
389     /// in-kernel IO event delivery, this is a no-op.
handle_io_events(&self, addr: IoEventAddress, _data: &[u8]) -> Result<()>390     fn handle_io_events(&self, addr: IoEventAddress, _data: &[u8]) -> Result<()> {
391         if let Some(evt) = self.ioevents.get(&addr) {
392             evt.signal()?;
393         }
394         Ok(())
395     }
396 
get_pvclock(&self) -> Result<ClockState>397     fn get_pvclock(&self) -> Result<ClockState> {
398         // Haxm does not support VmCap::PvClock
399         Err(Error::new(libc::ENXIO))
400     }
401 
set_pvclock(&self, _state: &ClockState) -> Result<()>402     fn set_pvclock(&self, _state: &ClockState) -> Result<()> {
403         // Haxm does not support VmCap::PvClock
404         Err(Error::new(libc::ENXIO))
405     }
406 
add_fd_mapping( &mut self, slot: u32, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>407     fn add_fd_mapping(
408         &mut self,
409         slot: u32,
410         offset: usize,
411         size: usize,
412         fd: &dyn AsRawDescriptor,
413         fd_offset: u64,
414         prot: Protection,
415     ) -> Result<()> {
416         let mut regions = self.mem_regions.lock();
417         let (_, region) = regions.get_mut(&slot).ok_or(Error::new(EINVAL))?;
418 
419         match region.add_fd_mapping(offset, size, fd, fd_offset, prot) {
420             Ok(()) => Ok(()),
421             Err(MmapError::SystemCallFailed(e)) => Err(e),
422             Err(_) => Err(Error::new(EIO)),
423         }
424     }
425 
remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>426     fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> {
427         let mut regions = self.mem_regions.lock();
428         let (_, region) = regions.get_mut(&slot).ok_or(Error::new(EINVAL))?;
429 
430         match region.remove_mapping(offset, size) {
431             Ok(()) => Ok(()),
432             Err(MmapError::SystemCallFailed(e)) => Err(e),
433             Err(_) => Err(Error::new(EIO)),
434         }
435     }
436 
handle_balloon_event(&mut self, _event: crate::BalloonEvent) -> Result<()>437     fn handle_balloon_event(&mut self, _event: crate::BalloonEvent) -> Result<()> {
438         // TODO(b/233773610): implement ballooning support in haxm
439         warn!("Memory ballooning attempted but not supported on haxm hypervisor");
440         // no-op
441         Ok(())
442     }
443 
get_guest_phys_addr_bits(&self) -> u8444     fn get_guest_phys_addr_bits(&self) -> u8 {
445         // Assume the guest physical address size is the same as the host.
446         host_phys_addr_bits()
447     }
448 }
449 
450 impl VmX86_64 for HaxmVm {
get_hypervisor(&self) -> &dyn HypervisorX86_64451     fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
452         &self.haxm
453     }
454 
create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>>455     fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
456         // SAFETY:
457         // Safe because we know that our file is a VM fd and we verify the return result.
458         let fd = unsafe { ioctl_with_ref(self, HAX_VM_IOCTL_VCPU_CREATE, &(id as u32)) };
459         if fd < 0 {
460             return errno_result();
461         }
462 
463         let descriptor =
464             open_haxm_vcpu_device(USE_GHAXM.load(Ordering::Relaxed), self.vm_id, id as u32)?;
465 
466         let mut tunnel_info = hax_tunnel_info::default();
467 
468         // SAFETY:
469         // Safe because we created tunnel_info and we check the return code for errors
470         let ret = unsafe {
471             ioctl_with_mut_ref(&descriptor, HAX_VCPU_IOCTL_SETUP_TUNNEL, &mut tunnel_info)
472         };
473 
474         if ret != 0 {
475             return errno_result();
476         }
477 
478         Ok(Box::new(HaxmVcpu {
479             descriptor,
480             id,
481             tunnel: tunnel_info.va as *mut hax_tunnel,
482             io_buffer: tunnel_info.io_va as *mut c_void,
483         }))
484     }
485 
486     /// Sets the address of the three-page region in the VM's address space.
487     /// This function is only necessary for 16 bit guests, which we do not support for HAXM.
set_tss_addr(&self, _addr: GuestAddress) -> Result<()>488     fn set_tss_addr(&self, _addr: GuestAddress) -> Result<()> {
489         Ok(())
490     }
491 
492     /// Sets the address of a one-page region in the VM's address space.
493     /// This function is only necessary for 16 bit guests, which we do not support for HAXM.
set_identity_map_addr(&self, _addr: GuestAddress) -> Result<()>494     fn set_identity_map_addr(&self, _addr: GuestAddress) -> Result<()> {
495         Ok(())
496     }
497 
load_protected_vm_firmware( &mut self, _fw_addr: GuestAddress, _fw_max_size: u64, ) -> Result<()>498     fn load_protected_vm_firmware(
499         &mut self,
500         _fw_addr: GuestAddress,
501         _fw_max_size: u64,
502     ) -> Result<()> {
503         // Haxm does not support protected VMs
504         Err(Error::new(libc::ENXIO))
505     }
506 }
507 
508 // TODO(b:241252288): Enable tests disabled with dummy feature flag - enable_haxm_tests.
509 #[cfg(test)]
510 #[cfg(feature = "enable_haxm_tests")]
511 mod tests {
512     use std::time::Duration;
513 
514     use base::EventWaitResult;
515     use base::MemoryMappingBuilder;
516     use base::SharedMemory;
517 
518     use super::*;
519 
520     #[test]
create_vm()521     fn create_vm() {
522         let haxm = Haxm::new().expect("failed to instantiate HAXM");
523         let mem =
524             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
525         HaxmVm::new(&haxm, mem).expect("failed to create vm");
526     }
527 
528     #[test]
create_vcpu()529     fn create_vcpu() {
530         let haxm = Haxm::new().expect("failed to instantiate HAXM");
531         let mem =
532             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
533         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
534         vm.create_vcpu(0).expect("failed to create vcpu");
535     }
536 
537     #[test]
register_ioevent()538     fn register_ioevent() {
539         let haxm = Haxm::new().expect("failed to create haxm");
540         let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
541         let mut vm = HaxmVm::new(&haxm, gm).expect("failed to create vm");
542         let evt = Event::new().expect("failed to create event");
543         let otherevt = Event::new().expect("failed to create event");
544         vm.register_ioevent(&evt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
545             .unwrap();
546         vm.register_ioevent(&evt, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
547             .unwrap();
548 
549         vm.register_ioevent(
550             &otherevt,
551             IoEventAddress::Mmio(0x1000),
552             Datamatch::AnyLength,
553         )
554         .expect_err("HAXM should not allow you to register two events for the same address");
555 
556         vm.register_ioevent(
557             &otherevt,
558             IoEventAddress::Mmio(0x1000),
559             Datamatch::U8(None),
560         )
561         .expect_err(
562             "HAXM should not allow you to register ioevents with Datamatches other than AnyLength",
563         );
564 
565         vm.register_ioevent(
566             &otherevt,
567             IoEventAddress::Mmio(0x1000),
568             Datamatch::U32(Some(0xf6)),
569         )
570         .expect_err(
571             "HAXM should not allow you to register ioevents with Datamatches other than AnyLength",
572         );
573 
574         vm.unregister_ioevent(&otherevt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
575             .expect_err("unregistering an unknown event should fail");
576         vm.unregister_ioevent(&evt, IoEventAddress::Pio(0xf5), Datamatch::AnyLength)
577             .expect_err("unregistering an unknown PIO address should fail");
578         vm.unregister_ioevent(&evt, IoEventAddress::Pio(0x1000), Datamatch::AnyLength)
579             .expect_err("unregistering an unknown PIO address should fail");
580         vm.unregister_ioevent(&evt, IoEventAddress::Mmio(0xf4), Datamatch::AnyLength)
581             .expect_err("unregistering an unknown MMIO address should fail");
582         vm.unregister_ioevent(&evt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
583             .unwrap();
584         vm.unregister_ioevent(&evt, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
585             .unwrap();
586     }
587 
588     #[test]
handle_io_events()589     fn handle_io_events() {
590         let haxm = Haxm::new().expect("failed to create haxm");
591         let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
592         let mut vm = HaxmVm::new(&haxm, gm).expect("failed to create vm");
593         let evt = Event::new().expect("failed to create event");
594         let evt2 = Event::new().expect("failed to create event");
595         vm.register_ioevent(&evt, IoEventAddress::Pio(0x1000), Datamatch::AnyLength)
596             .unwrap();
597         vm.register_ioevent(&evt2, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
598             .unwrap();
599 
600         // Check a pio address
601         vm.handle_io_events(IoEventAddress::Pio(0x1000), &[])
602             .expect("failed to handle_io_events");
603         assert_ne!(
604             evt.wait_timeout(Duration::from_millis(10))
605                 .expect("failed to read event"),
606             EventWaitResult::TimedOut
607         );
608         assert_eq!(
609             evt2.wait_timeout(Duration::from_millis(10))
610                 .expect("failed to read event"),
611             EventWaitResult::TimedOut
612         );
613         // Check an mmio address
614         vm.handle_io_events(IoEventAddress::Mmio(0x1000), &[])
615             .expect("failed to handle_io_events");
616         assert_eq!(
617             evt.wait_timeout(Duration::from_millis(10))
618                 .expect("failed to read event"),
619             EventWaitResult::TimedOut
620         );
621         assert_ne!(
622             evt2.wait_timeout(Duration::from_millis(10))
623                 .expect("failed to read event"),
624             EventWaitResult::TimedOut
625         );
626 
627         // Check an address that does not match any registered ioevents
628         vm.handle_io_events(IoEventAddress::Pio(0x1001), &[])
629             .expect("failed to handle_io_events");
630         assert_eq!(
631             evt.wait_timeout(Duration::from_millis(10))
632                 .expect("failed to read event"),
633             EventWaitResult::TimedOut
634         );
635         assert_eq!(
636             evt2.wait_timeout(Duration::from_millis(10))
637                 .expect("failed to read event"),
638             EventWaitResult::TimedOut
639         );
640     }
641 
642     #[test]
remove_memory()643     fn remove_memory() {
644         let haxm = Haxm::new().unwrap();
645         let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
646         let mut vm = HaxmVm::new(&haxm, gm).unwrap();
647         let mem_size = 0x1000;
648         let shm = SharedMemory::new("test", mem_size as u64).unwrap();
649         let mem = MemoryMappingBuilder::new(mem_size)
650             .from_shared_memory(&shm)
651             .build()
652             .unwrap();
653         let mem_ptr = mem.as_ptr();
654         let slot = vm
655             .add_memory_region(
656                 GuestAddress(0x1000),
657                 Box::new(mem),
658                 false,
659                 false,
660                 MemCacheType::CacheCoherent,
661             )
662             .unwrap();
663         let removed_mem = vm.remove_memory_region(slot).unwrap();
664         assert_eq!(removed_mem.size(), mem_size);
665         assert_eq!(removed_mem.as_ptr(), mem_ptr);
666     }
667 
668     #[cfg(windows)]
669     #[test]
register_log_file()670     fn register_log_file() {
671         let haxm = Haxm::new().unwrap();
672         let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
673         let vm = HaxmVm::new(&haxm, gm).unwrap();
674 
675         if !vm.check_raw_capability(HAX_CAP_VM_LOG) {
676             return;
677         }
678 
679         let dir = tempfile::TempDir::new().unwrap();
680         let mut file_path = dir.path().to_owned();
681         file_path.push("test");
682 
683         vm.register_log_file(file_path.to_str().unwrap())
684             .expect("failed to register log file");
685 
686         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
687 
688         // Setting cpuid will force some logs
689         let cpuid = haxm.get_supported_cpuid().unwrap();
690         vcpu.set_cpuid(&cpuid).expect("failed to set cpuid");
691 
692         assert!(file_path.exists());
693     }
694 }
695