1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use core::ffi::c_void;
6 use std::cmp::Reverse;
7 use std::collections::BTreeMap;
8 use std::collections::BinaryHeap;
9 use std::sync::Arc;
10
11 use base::errno_result;
12 use base::error;
13 use base::ioctl_with_mut_ref;
14 use base::ioctl_with_ref;
15 use base::warn;
16 use base::AsRawDescriptor;
17 use base::Error;
18 use base::Event;
19 use base::MappedRegion;
20 use base::MmapError;
21 use base::Protection;
22 use base::RawDescriptor;
23 use base::Result;
24 use base::SafeDescriptor;
25 use fnv::FnvHashMap;
26 use libc::E2BIG;
27 use libc::EEXIST;
28 use libc::EFAULT;
29 use libc::EINVAL;
30 use libc::EIO;
31 use libc::ENOENT;
32 use libc::ENOSPC;
33 use libc::ENOTSUP;
34 use libc::EOVERFLOW;
35 use sync::Mutex;
36 use vm_memory::GuestAddress;
37 use vm_memory::GuestMemory;
38 #[cfg(windows)]
39 use win_util::win32_wide_string;
40
41 use super::*;
42 use crate::host_phys_addr_bits;
43 use crate::ClockState;
44 use crate::Datamatch;
45 use crate::DeviceKind;
46 use crate::Hypervisor;
47 use crate::IoEventAddress;
48 use crate::MemCacheType;
49 use crate::MemSlot;
50 use crate::VcpuX86_64;
51 use crate::Vm;
52 use crate::VmCap;
53 use crate::VmX86_64;
54
55 /// A wrapper around creating and using a HAXM VM.
56 pub struct HaxmVm {
57 haxm: Haxm,
58 vm_id: u32,
59 descriptor: SafeDescriptor,
60 guest_mem: GuestMemory,
61 mem_regions: Arc<Mutex<BTreeMap<MemSlot, (GuestAddress, Box<dyn MappedRegion>)>>>,
62 /// A min heap of MemSlot numbers that were used and then removed and can now be re-used
63 mem_slot_gaps: Arc<Mutex<BinaryHeap<Reverse<MemSlot>>>>,
64 // HAXM's implementation of ioevents makes several assumptions about how crosvm uses ioevents:
65 // 1. All ioevents are registered during device setup, and thus can be cloned when the vm is
66 // cloned instead of locked in an Arc<Mutex<>>. This will make handling ioevents in each
67 // vcpu thread easier because no locks will need to be acquired.
68 // 2. All ioevents use Datamatch::AnyLength. We don't bother checking the datamatch, which
69 // will make this faster.
70 // 3. We only ever register one eventfd to each address. This simplifies our data structure.
71 ioevents: FnvHashMap<IoEventAddress, Event>,
72 }
73
74 impl HaxmVm {
75 /// Constructs a new `HaxmVm` using the given `Haxm` instance.
new(haxm: &Haxm, guest_mem: GuestMemory) -> Result<HaxmVm>76 pub fn new(haxm: &Haxm, guest_mem: GuestMemory) -> Result<HaxmVm> {
77 let mut vm_id: u32 = 0;
78 // SAFETY:
79 // Safe because we know descriptor is a real haxm descriptor as this module is the only
80 // one that can make Haxm objects.
81 let ret = unsafe { ioctl_with_mut_ref(haxm, HAX_IOCTL_CREATE_VM(), &mut vm_id) };
82 if ret != 0 {
83 return errno_result();
84 }
85
86 // Haxm creates additional device paths when VMs are created
87 let vm_descriptor = open_haxm_vm_device(USE_GHAXM.load(Ordering::Relaxed), vm_id)?;
88
89 for region in guest_mem.regions() {
90 // SAFETY:
91 // Safe because the guest regions are guaranteed not to overlap.
92 unsafe {
93 set_user_memory_region(
94 &vm_descriptor,
95 false,
96 region.guest_addr.offset(),
97 region.size as u64,
98 MemoryRegionOp::Add(region.host_addr as *mut u8 as u64),
99 )
100 }?;
101 }
102
103 Ok(HaxmVm {
104 vm_id,
105 haxm: haxm.try_clone()?,
106 descriptor: vm_descriptor,
107 guest_mem,
108 mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
109 mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
110 ioevents: FnvHashMap::default(),
111 })
112 }
113
check_raw_capability(&self, cap: u32) -> bool114 pub fn check_raw_capability(&self, cap: u32) -> bool {
115 let mut capability_info = hax_capabilityinfo::default();
116 let ret =
117 // SAFETY:
118 // Safe because we know that our file is a VM fd and we verify the return result.
119 unsafe { ioctl_with_mut_ref(&self.haxm, HAX_IOCTL_CAPABILITY(), &mut capability_info) };
120
121 if ret != 0 {
122 return false;
123 }
124
125 (cap & capability_info.winfo as u32) != 0
126 }
127
register_log_file(&self, path: &str) -> Result<()>128 pub fn register_log_file(&self, path: &str) -> Result<()> {
129 // The IOCTL here is only avilable on internal fork of HAXM and only works on Windows.
130 #[cfg(windows)]
131 if get_use_ghaxm() {
132 let mut log_file = hax_log_file::default();
133
134 // Although it would be more efficient to do this check prior to allocating the log_file
135 // struct, the code would be more complex and less maintainable. This is only ever
136 // called once per-vm so the extra temporary memory and time shouldn't be a
137 // problem.
138 if path.len() >= log_file.path.len() {
139 return Err(Error::new(E2BIG));
140 }
141
142 let wstring = &win32_wide_string(path);
143 log_file.path[..wstring.len()].clone_from_slice(wstring);
144
145 // SAFETY:
146 // Safe because we know that our file is a VM fd and we verify the return result.
147 let ret = unsafe { ioctl_with_ref(self, HAX_VM_IOCTL_REGISTER_LOG_FILE(), &log_file) };
148
149 if ret != 0 {
150 return errno_result();
151 }
152 }
153 Ok(())
154 }
155 }
156
157 impl AsRawDescriptor for HaxmVm {
as_raw_descriptor(&self) -> RawDescriptor158 fn as_raw_descriptor(&self) -> RawDescriptor {
159 self.descriptor.as_raw_descriptor()
160 }
161 }
162
163 enum MemoryRegionOp {
164 // Map a memory region for the given host address.
165 Add(u64),
166 // Remove the memory region.
167 Remove,
168 }
169
set_user_memory_region( descriptor: &SafeDescriptor, read_only: bool, guest_addr: u64, size: u64, op: MemoryRegionOp, ) -> Result<()>170 unsafe fn set_user_memory_region(
171 descriptor: &SafeDescriptor,
172 read_only: bool,
173 guest_addr: u64,
174 size: u64,
175 op: MemoryRegionOp,
176 ) -> Result<()> {
177 let (va, flags) = match op {
178 MemoryRegionOp::Add(va) => {
179 let mut flags = HAX_RAM_INFO_STANDALONE;
180 if read_only {
181 flags |= HAX_RAM_INFO_ROM
182 }
183 (va, flags)
184 }
185 MemoryRegionOp::Remove => (0, HAX_RAM_INFO_INVALID),
186 };
187 let ram_info = hax_set_ram_info2 {
188 pa_start: guest_addr,
189 size,
190 va,
191 flags,
192 ..Default::default()
193 };
194
195 // SAFETY:
196 // Safe because we know that our file is a VM fd and we verify the return result.
197 let ret = ioctl_with_ref(descriptor, HAX_VM_IOCTL_SET_RAM2(), &ram_info);
198 if ret != 0 {
199 return errno_result();
200 }
201 Ok(())
202 }
203
204 impl Vm for HaxmVm {
try_clone(&self) -> Result<Self>205 fn try_clone(&self) -> Result<Self> {
206 let mut ioevents = FnvHashMap::default();
207 for (addr, evt) in self.ioevents.iter() {
208 ioevents.insert(*addr, evt.try_clone()?);
209 }
210 Ok(HaxmVm {
211 vm_id: self.vm_id,
212 haxm: self.haxm.try_clone()?,
213 descriptor: self.descriptor.try_clone()?,
214 guest_mem: self.guest_mem.clone(),
215 mem_regions: self.mem_regions.clone(),
216 mem_slot_gaps: self.mem_slot_gaps.clone(),
217 ioevents,
218 })
219 }
220
check_capability(&self, c: VmCap) -> bool221 fn check_capability(&self, c: VmCap) -> bool {
222 match c {
223 VmCap::DirtyLog => false,
224 VmCap::PvClock => false,
225 VmCap::Protected => false,
226 VmCap::EarlyInitCpuid => false,
227 VmCap::BusLockDetect => false,
228 VmCap::ReadOnlyMemoryRegion => false,
229 VmCap::MemNoncoherentDma => false,
230 }
231 }
232
get_memory(&self) -> &GuestMemory233 fn get_memory(&self) -> &GuestMemory {
234 &self.guest_mem
235 }
236
add_memory_region( &mut self, guest_addr: GuestAddress, mem: Box<dyn MappedRegion>, read_only: bool, _log_dirty_pages: bool, _cache: MemCacheType, ) -> Result<MemSlot>237 fn add_memory_region(
238 &mut self,
239 guest_addr: GuestAddress,
240 mem: Box<dyn MappedRegion>,
241 read_only: bool,
242 _log_dirty_pages: bool,
243 _cache: MemCacheType,
244 ) -> Result<MemSlot> {
245 let size = mem.size() as u64;
246 let end_addr = guest_addr.checked_add(size).ok_or(Error::new(EOVERFLOW))?;
247 if self.guest_mem.range_overlap(guest_addr, end_addr) {
248 return Err(Error::new(ENOSPC));
249 }
250 let mut regions = self.mem_regions.lock();
251 let mut gaps = self.mem_slot_gaps.lock();
252 let slot = match gaps.pop() {
253 Some(gap) => gap.0,
254 None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot,
255 };
256
257 // SAFETY:
258 // Safe because we check that the given guest address is valid and has no overlaps. We also
259 // know that the pointer and size are correct because the MemoryMapping interface ensures
260 // this. We take ownership of the memory mapping so that it won't be unmapped until the slot
261 // is removed.
262 let res = unsafe {
263 set_user_memory_region(
264 &self.descriptor,
265 read_only,
266 guest_addr.offset(),
267 size,
268 MemoryRegionOp::Add(mem.as_ptr() as u64),
269 )
270 };
271
272 if let Err(e) = res {
273 gaps.push(Reverse(slot));
274 return Err(e);
275 }
276 regions.insert(slot, (guest_addr, mem));
277 Ok(slot)
278 }
279
msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>280 fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()> {
281 let mut regions = self.mem_regions.lock();
282 let (_, mem) = regions.get_mut(&slot).ok_or(Error::new(ENOENT))?;
283
284 mem.msync(offset, size).map_err(|err| match err {
285 MmapError::InvalidAddress => Error::new(EFAULT),
286 MmapError::NotPageAligned => Error::new(EINVAL),
287 MmapError::SystemCallFailed(e) => e,
288 _ => Error::new(EIO),
289 })
290 }
291
remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>292 fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>> {
293 let mut regions = self.mem_regions.lock();
294
295 if let Some((guest_addr, mem)) = regions.get(&slot) {
296 // SAFETY:
297 // Safe because the slot is checked against the list of memory slots.
298 unsafe {
299 set_user_memory_region(
300 &self.descriptor,
301 false,
302 guest_addr.offset(),
303 mem.size() as u64,
304 MemoryRegionOp::Remove,
305 )?;
306 }
307 self.mem_slot_gaps.lock().push(Reverse(slot));
308 Ok(regions.remove(&slot).unwrap().1)
309 } else {
310 Err(Error::new(ENOENT))
311 }
312 }
313
create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor>314 fn create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor> {
315 // Haxm does not support in-kernel devices
316 Err(Error::new(libc::ENXIO))
317 }
318
get_dirty_log(&self, _slot: u32, _dirty_log: &mut [u8]) -> Result<()>319 fn get_dirty_log(&self, _slot: u32, _dirty_log: &mut [u8]) -> Result<()> {
320 // Haxm does not support VmCap::DirtyLog
321 Err(Error::new(libc::ENXIO))
322 }
323
register_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>324 fn register_ioevent(
325 &mut self,
326 evt: &Event,
327 addr: IoEventAddress,
328 datamatch: Datamatch,
329 ) -> Result<()> {
330 if datamatch != Datamatch::AnyLength {
331 error!("HAXM currently only supports Datamatch::AnyLength");
332 return Err(Error::new(ENOTSUP));
333 }
334
335 if self.ioevents.contains_key(&addr) {
336 error!("HAXM does not support multiple ioevents for the same address");
337 return Err(Error::new(EEXIST));
338 }
339
340 self.ioevents.insert(addr, evt.try_clone()?);
341
342 Ok(())
343 }
344
unregister_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>345 fn unregister_ioevent(
346 &mut self,
347 evt: &Event,
348 addr: IoEventAddress,
349 datamatch: Datamatch,
350 ) -> Result<()> {
351 if datamatch != Datamatch::AnyLength {
352 error!("HAXM only supports Datamatch::AnyLength");
353 return Err(Error::new(ENOTSUP));
354 }
355
356 match self.ioevents.get(&addr) {
357 Some(existing_evt) => {
358 // evt should match the existing evt associated with addr
359 if evt != existing_evt {
360 return Err(Error::new(ENOENT));
361 }
362 self.ioevents.remove(&addr);
363 }
364
365 None => {
366 return Err(Error::new(ENOENT));
367 }
368 };
369 Ok(())
370 }
371
372 /// Trigger any io events based on the memory mapped IO at `addr`. If the hypervisor does
373 /// in-kernel IO event delivery, this is a no-op.
handle_io_events(&self, addr: IoEventAddress, _data: &[u8]) -> Result<()>374 fn handle_io_events(&self, addr: IoEventAddress, _data: &[u8]) -> Result<()> {
375 if let Some(evt) = self.ioevents.get(&addr) {
376 evt.signal()?;
377 }
378 Ok(())
379 }
380
get_pvclock(&self) -> Result<ClockState>381 fn get_pvclock(&self) -> Result<ClockState> {
382 // Haxm does not support VmCap::PvClock
383 Err(Error::new(libc::ENXIO))
384 }
385
set_pvclock(&self, _state: &ClockState) -> Result<()>386 fn set_pvclock(&self, _state: &ClockState) -> Result<()> {
387 // Haxm does not support VmCap::PvClock
388 Err(Error::new(libc::ENXIO))
389 }
390
add_fd_mapping( &mut self, slot: u32, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>391 fn add_fd_mapping(
392 &mut self,
393 slot: u32,
394 offset: usize,
395 size: usize,
396 fd: &dyn AsRawDescriptor,
397 fd_offset: u64,
398 prot: Protection,
399 ) -> Result<()> {
400 let mut regions = self.mem_regions.lock();
401 let (_, region) = regions.get_mut(&slot).ok_or(Error::new(EINVAL))?;
402
403 match region.add_fd_mapping(offset, size, fd, fd_offset, prot) {
404 Ok(()) => Ok(()),
405 Err(MmapError::SystemCallFailed(e)) => Err(e),
406 Err(_) => Err(Error::new(EIO)),
407 }
408 }
409
remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>410 fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> {
411 let mut regions = self.mem_regions.lock();
412 let (_, region) = regions.get_mut(&slot).ok_or(Error::new(EINVAL))?;
413
414 match region.remove_mapping(offset, size) {
415 Ok(()) => Ok(()),
416 Err(MmapError::SystemCallFailed(e)) => Err(e),
417 Err(_) => Err(Error::new(EIO)),
418 }
419 }
420
handle_balloon_event(&mut self, _event: crate::BalloonEvent) -> Result<()>421 fn handle_balloon_event(&mut self, _event: crate::BalloonEvent) -> Result<()> {
422 // TODO(b/233773610): implement ballooning support in haxm
423 warn!("Memory ballooning attempted but not supported on haxm hypervisor");
424 // no-op
425 Ok(())
426 }
427
get_guest_phys_addr_bits(&self) -> u8428 fn get_guest_phys_addr_bits(&self) -> u8 {
429 // Assume the guest physical address size is the same as the host.
430 host_phys_addr_bits()
431 }
432 }
433
434 impl VmX86_64 for HaxmVm {
get_hypervisor(&self) -> &dyn HypervisorX86_64435 fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
436 &self.haxm
437 }
438
create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>>439 fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
440 // SAFETY:
441 // Safe because we know that our file is a VM fd and we verify the return result.
442 let fd = unsafe { ioctl_with_ref(self, HAX_VM_IOCTL_VCPU_CREATE(), &(id as u32)) };
443 if fd < 0 {
444 return errno_result();
445 }
446
447 let descriptor =
448 open_haxm_vcpu_device(USE_GHAXM.load(Ordering::Relaxed), self.vm_id, id as u32)?;
449
450 let mut tunnel_info = hax_tunnel_info::default();
451
452 // SAFETY:
453 // Safe because we created tunnel_info and we check the return code for errors
454 let ret = unsafe {
455 ioctl_with_mut_ref(&descriptor, HAX_VCPU_IOCTL_SETUP_TUNNEL(), &mut tunnel_info)
456 };
457
458 if ret != 0 {
459 return errno_result();
460 }
461
462 Ok(Box::new(HaxmVcpu {
463 descriptor,
464 id,
465 tunnel: tunnel_info.va as *mut hax_tunnel,
466 io_buffer: tunnel_info.io_va as *mut c_void,
467 }))
468 }
469
470 /// Sets the address of the three-page region in the VM's address space.
471 /// This function is only necessary for 16 bit guests, which we do not support for HAXM.
set_tss_addr(&self, _addr: GuestAddress) -> Result<()>472 fn set_tss_addr(&self, _addr: GuestAddress) -> Result<()> {
473 Ok(())
474 }
475
476 /// Sets the address of a one-page region in the VM's address space.
477 /// This function is only necessary for 16 bit guests, which we do not support for HAXM.
set_identity_map_addr(&self, _addr: GuestAddress) -> Result<()>478 fn set_identity_map_addr(&self, _addr: GuestAddress) -> Result<()> {
479 Ok(())
480 }
481 }
482
483 // TODO(b:241252288): Enable tests disabled with dummy feature flag - enable_haxm_tests.
484 #[cfg(test)]
485 #[cfg(feature = "enable_haxm_tests")]
486 mod tests {
487 use std::time::Duration;
488
489 use base::EventWaitResult;
490 use base::MemoryMappingBuilder;
491 use base::SharedMemory;
492
493 use super::*;
494
495 #[test]
create_vm()496 fn create_vm() {
497 let haxm = Haxm::new().expect("failed to instantiate HAXM");
498 let mem =
499 GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
500 HaxmVm::new(&haxm, mem).expect("failed to create vm");
501 }
502
503 #[test]
create_vcpu()504 fn create_vcpu() {
505 let haxm = Haxm::new().expect("failed to instantiate HAXM");
506 let mem =
507 GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
508 let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
509 vm.create_vcpu(0).expect("failed to create vcpu");
510 }
511
512 #[test]
register_ioevent()513 fn register_ioevent() {
514 let haxm = Haxm::new().expect("failed to create haxm");
515 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
516 let mut vm = HaxmVm::new(&haxm, gm).expect("failed to create vm");
517 let evt = Event::new().expect("failed to create event");
518 let otherevt = Event::new().expect("failed to create event");
519 vm.register_ioevent(&evt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
520 .unwrap();
521 vm.register_ioevent(&evt, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
522 .unwrap();
523
524 vm.register_ioevent(
525 &otherevt,
526 IoEventAddress::Mmio(0x1000),
527 Datamatch::AnyLength,
528 )
529 .expect_err("HAXM should not allow you to register two events for the same address");
530
531 vm.register_ioevent(
532 &otherevt,
533 IoEventAddress::Mmio(0x1000),
534 Datamatch::U8(None),
535 )
536 .expect_err(
537 "HAXM should not allow you to register ioevents with Datamatches other than AnyLength",
538 );
539
540 vm.register_ioevent(
541 &otherevt,
542 IoEventAddress::Mmio(0x1000),
543 Datamatch::U32(Some(0xf6)),
544 )
545 .expect_err(
546 "HAXM should not allow you to register ioevents with Datamatches other than AnyLength",
547 );
548
549 vm.unregister_ioevent(&otherevt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
550 .expect_err("unregistering an unknown event should fail");
551 vm.unregister_ioevent(&evt, IoEventAddress::Pio(0xf5), Datamatch::AnyLength)
552 .expect_err("unregistering an unknown PIO address should fail");
553 vm.unregister_ioevent(&evt, IoEventAddress::Pio(0x1000), Datamatch::AnyLength)
554 .expect_err("unregistering an unknown PIO address should fail");
555 vm.unregister_ioevent(&evt, IoEventAddress::Mmio(0xf4), Datamatch::AnyLength)
556 .expect_err("unregistering an unknown MMIO address should fail");
557 vm.unregister_ioevent(&evt, IoEventAddress::Pio(0xf4), Datamatch::AnyLength)
558 .unwrap();
559 vm.unregister_ioevent(&evt, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
560 .unwrap();
561 }
562
563 #[test]
handle_io_events()564 fn handle_io_events() {
565 let haxm = Haxm::new().expect("failed to create haxm");
566 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
567 let mut vm = HaxmVm::new(&haxm, gm).expect("failed to create vm");
568 let evt = Event::new().expect("failed to create event");
569 let evt2 = Event::new().expect("failed to create event");
570 vm.register_ioevent(&evt, IoEventAddress::Pio(0x1000), Datamatch::AnyLength)
571 .unwrap();
572 vm.register_ioevent(&evt2, IoEventAddress::Mmio(0x1000), Datamatch::AnyLength)
573 .unwrap();
574
575 // Check a pio address
576 vm.handle_io_events(IoEventAddress::Pio(0x1000), &[])
577 .expect("failed to handle_io_events");
578 assert_ne!(
579 evt.wait_timeout(Duration::from_millis(10))
580 .expect("failed to read event"),
581 EventWaitResult::TimedOut
582 );
583 assert_eq!(
584 evt2.wait_timeout(Duration::from_millis(10))
585 .expect("failed to read event"),
586 EventWaitResult::TimedOut
587 );
588 // Check an mmio address
589 vm.handle_io_events(IoEventAddress::Mmio(0x1000), &[])
590 .expect("failed to handle_io_events");
591 assert_eq!(
592 evt.wait_timeout(Duration::from_millis(10))
593 .expect("failed to read event"),
594 EventWaitResult::TimedOut
595 );
596 assert_ne!(
597 evt2.wait_timeout(Duration::from_millis(10))
598 .expect("failed to read event"),
599 EventWaitResult::TimedOut
600 );
601
602 // Check an address that does not match any registered ioevents
603 vm.handle_io_events(IoEventAddress::Pio(0x1001), &[])
604 .expect("failed to handle_io_events");
605 assert_eq!(
606 evt.wait_timeout(Duration::from_millis(10))
607 .expect("failed to read event"),
608 EventWaitResult::TimedOut
609 );
610 assert_eq!(
611 evt2.wait_timeout(Duration::from_millis(10))
612 .expect("failed to read event"),
613 EventWaitResult::TimedOut
614 );
615 }
616
617 #[test]
remove_memory()618 fn remove_memory() {
619 let haxm = Haxm::new().unwrap();
620 let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
621 let mut vm = HaxmVm::new(&haxm, gm).unwrap();
622 let mem_size = 0x1000;
623 let shm = SharedMemory::new("test", mem_size as u64).unwrap();
624 let mem = MemoryMappingBuilder::new(mem_size)
625 .from_shared_memory(&shm)
626 .build()
627 .unwrap();
628 let mem_ptr = mem.as_ptr();
629 let slot = vm
630 .add_memory_region(
631 GuestAddress(0x1000),
632 Box::new(mem),
633 false,
634 false,
635 MemCacheType::CacheCoherent,
636 )
637 .unwrap();
638 let removed_mem = vm.remove_memory_region(slot).unwrap();
639 assert_eq!(removed_mem.size(), mem_size);
640 assert_eq!(removed_mem.as_ptr(), mem_ptr);
641 }
642
643 #[cfg(windows)]
644 #[test]
register_log_file()645 fn register_log_file() {
646 let haxm = Haxm::new().unwrap();
647 let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
648 let vm = HaxmVm::new(&haxm, gm).unwrap();
649
650 if !vm.check_raw_capability(HAX_CAP_VM_LOG) {
651 return;
652 }
653
654 let dir = tempfile::TempDir::new().unwrap();
655 let mut file_path = dir.path().to_owned();
656 file_path.push("test");
657
658 vm.register_log_file(file_path.to_str().unwrap())
659 .expect("failed to register log file");
660
661 let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
662
663 // Setting cpuid will force some logs
664 let cpuid = haxm.get_supported_cpuid().unwrap();
665 vcpu.set_cpuid(&cpuid).expect("failed to set cpuid");
666
667 assert!(file_path.exists());
668 }
669 }
670