• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! A crate for abstracting the underlying kernel hypervisor used in crosvm.
6 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
7 pub mod aarch64;
8 pub mod caps;
9 
10 #[cfg(all(
11     unix,
12     any(target_arch = "arm", target_arch = "aarch64"),
13     feature = "gunyah"
14 ))]
15 pub mod gunyah;
16 #[cfg(all(windows, feature = "haxm"))]
17 pub mod haxm;
18 #[cfg(any(target_os = "android", target_os = "linux"))]
19 pub mod kvm;
20 #[cfg(target_arch = "riscv64")]
21 pub mod riscv64;
22 #[cfg(all(windows, feature = "whpx"))]
23 pub mod whpx;
24 #[cfg(target_arch = "x86_64")]
25 pub mod x86_64;
26 
27 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
28 #[cfg(all(unix, feature = "geniezone"))]
29 pub mod geniezone;
30 
31 use base::AsRawDescriptor;
32 use base::Event;
33 use base::MappedRegion;
34 use base::Protection;
35 use base::Result;
36 use base::SafeDescriptor;
37 use serde::Deserialize;
38 use serde::Serialize;
39 use vm_memory::GuestAddress;
40 use vm_memory::GuestMemory;
41 
42 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
43 pub use crate::aarch64::*;
44 pub use crate::caps::*;
45 #[cfg(target_arch = "riscv64")]
46 pub use crate::riscv64::*;
47 #[cfg(target_arch = "x86_64")]
48 pub use crate::x86_64::*;
49 
50 /// An index in the list of guest-mapped memory regions.
51 pub type MemSlot = u32;
52 
53 /// Range of GPA space. Starting from `guest_address` up to `size`.
54 pub struct MemRegion {
55     pub guest_address: GuestAddress,
56     pub size: u64,
57 }
58 
59 /// Signal to the hypervisor on kernels that support the KVM_CAP_USER_CONFIGURE_NONCOHERENT_DMA (or
60 /// equivalent) that during user memory region (memslot) configuration, a guest page's memtype
61 /// should be considered in SLAT effective memtype determination rather than implicitly respecting
62 /// only the host page's memtype.
63 ///
64 /// This explicit control is needed for Virtio devices (e.g. gpu) that configure memslots for host
65 /// WB page mappings with guest WC page mappings. See b/316337317, b/360295883 for more detail.
66 #[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
67 pub enum MemCacheType {
68     /// Don't provide any explicit instruction to the hypervisor on how it should determine a
69     /// memslot's effective memtype.
70     ///
71     /// On KVM-VMX (Intel), this means that the memslot is flagged with VMX_EPT_IPAT_BIT such that
72     /// only the host memtype is respected.
73     CacheCoherent,
74     /// explicitly instruct the hypervisor to respect the guest page's memtype when determining the
75     /// memslot's effective memtype.
76     ///
77     /// On KVM-VMX (Intel), this means the memslot is NOT flagged with VMX_EPT_IPAT_BIT, and the
78     /// effective memtype will generally decay to the weaker amongst the host/guest memtypes and
79     /// the MTRR for the physical address.
80     CacheNonCoherent,
81 }
82 
83 /// This is intended for use with virtio-balloon, where a guest driver determines unused ranges and
84 /// requests they be freed. Use without the guest's knowledge is sure to break something.
85 pub enum BalloonEvent {
86     /// Balloon event when the region is acquired from the guest. The guest cannot access this
87     /// region any more. The guest memory can be reclaimed by the host OS. As per virtio-balloon
88     /// spec, the given address and size are intended to be page-aligned.
89     Inflate(MemRegion),
90     /// Balloon event when the region is returned to the guest. VMM should reallocate memory and
91     /// register it with the hypervisor for accesses by the guest.
92     Deflate(MemRegion),
93     /// Balloon event when the requested memory size is achieved. This can be achieved through
94     /// either inflation or deflation. The `u64` will be the current size of the balloon in bytes.
95     BalloonTargetReached(u64),
96 }
97 
98 /// Supported hypervisors.
99 ///
100 /// When adding a new one, also update the HypervisorFfi in crosvm_control/src/lib.rs
101 #[derive(Serialize, Deserialize, Debug, Clone)]
102 pub enum HypervisorKind {
103     Geniezone,
104     Gunyah,
105     Kvm,
106     Haxm,
107     Whpx,
108 }
109 
110 /// A trait for checking hypervisor capabilities.
111 pub trait Hypervisor: Send {
112     /// Makes a shallow clone of this `Hypervisor`.
try_clone(&self) -> Result<Self> where Self: Sized113     fn try_clone(&self) -> Result<Self>
114     where
115         Self: Sized;
116 
117     /// Checks if a particular `HypervisorCap` is available.
check_capability(&self, cap: HypervisorCap) -> bool118     fn check_capability(&self, cap: HypervisorCap) -> bool;
119 }
120 
121 /// A wrapper for using a VM and getting/setting its state.
122 pub trait Vm: Send {
123     /// Makes a shallow clone of this `Vm`.
try_clone(&self) -> Result<Self> where Self: Sized124     fn try_clone(&self) -> Result<Self>
125     where
126         Self: Sized;
127 
128     /// Makes a shallow clone of the fd of this `Vm`.
try_clone_descriptor(&self) -> Result<SafeDescriptor>129     fn try_clone_descriptor(&self) -> Result<SafeDescriptor>;
130 
131     /// Returns hypervisor managing this `Vm`.
hypervisor_kind(&self) -> HypervisorKind132     fn hypervisor_kind(&self) -> HypervisorKind;
133 
134     /// Checks if a particular `VmCap` is available.
135     ///
136     /// This is distinct from the `Hypervisor` version of this method because some extensions depend
137     /// on the particular `Vm` instance. This method is encouraged because it more accurately
138     /// reflects the usable capabilities.
check_capability(&self, c: VmCap) -> bool139     fn check_capability(&self, c: VmCap) -> bool;
140 
141     /// Enable the VM capabilities.
enable_capability(&self, _capability: VmCap, _flags: u32) -> Result<bool>142     fn enable_capability(&self, _capability: VmCap, _flags: u32) -> Result<bool> {
143         Err(std::io::Error::from(std::io::ErrorKind::Unsupported).into())
144     }
145 
146     /// Get the guest physical address size in bits.
get_guest_phys_addr_bits(&self) -> u8147     fn get_guest_phys_addr_bits(&self) -> u8;
148 
149     /// Gets the guest-mapped memory for the Vm.
get_memory(&self) -> &GuestMemory150     fn get_memory(&self) -> &GuestMemory;
151 
152     /// Inserts the given `MappedRegion` into the VM's address space at `guest_addr`.
153     ///
154     /// The slot that was assigned the memory mapping is returned on success.  The slot can be given
155     /// to `Vm::remove_memory_region` to remove the memory from the VM's address space and take back
156     /// ownership of `mem_region`.
157     ///
158     /// Note that memory inserted into the VM's address space must not overlap with any other memory
159     /// slot's region.
160     ///
161     /// If `read_only` is true, the guest will be able to read the memory as normal, but attempts to
162     /// write will trigger a mmio VM exit, leaving the memory untouched.
163     ///
164     /// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to
165     /// by the guest with `get_dirty_log`.
166     ///
167     /// `cache` can be used to set guest mem cache attribute if supported. Default is cache coherent
168     /// memory. Noncoherent memory means this memory might not be coherent from all access points,
169     /// e.g this could be the case when host GPU doesn't set the memory to be coherent with CPU
170     /// access. Setting this attribute would allow hypervisor to adjust guest mem control to ensure
171     /// synchronized guest access in noncoherent DMA case.
add_memory_region( &mut self, guest_addr: GuestAddress, mem_region: Box<dyn MappedRegion>, read_only: bool, log_dirty_pages: bool, cache: MemCacheType, ) -> Result<MemSlot>172     fn add_memory_region(
173         &mut self,
174         guest_addr: GuestAddress,
175         mem_region: Box<dyn MappedRegion>,
176         read_only: bool,
177         log_dirty_pages: bool,
178         cache: MemCacheType,
179     ) -> Result<MemSlot>;
180 
181     /// Does a synchronous msync of the memory mapped at `slot`, syncing `size` bytes starting at
182     /// `offset` from the start of the region.  `offset` must be page aligned.
msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>183     fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>;
184 
185     /// Gives a MADV_PAGEOUT advice to the memory region mapped at `slot`, with the address range
186     /// starting at `offset` from the start of the region, and with size `size`. `offset`
187     /// must be page aligned.
188     #[cfg(any(target_os = "android", target_os = "linux"))]
madvise_pageout_memory_region( &mut self, slot: MemSlot, offset: usize, size: usize, ) -> Result<()>189     fn madvise_pageout_memory_region(
190         &mut self,
191         slot: MemSlot,
192         offset: usize,
193         size: usize,
194     ) -> Result<()>;
195 
196     /// Gives a MADV_REMOVE advice to the memory region mapped at `slot`, with the address range
197     /// starting at `offset` from the start of the region, and with size `size`. `offset`
198     /// must be page aligned.
199     #[cfg(any(target_os = "android", target_os = "linux"))]
madvise_remove_memory_region( &mut self, slot: MemSlot, offset: usize, size: usize, ) -> Result<()>200     fn madvise_remove_memory_region(
201         &mut self,
202         slot: MemSlot,
203         offset: usize,
204         size: usize,
205     ) -> Result<()>;
206 
207     /// Removes and drops the `UserMemoryRegion` that was previously added at the given slot.
remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>208     fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>;
209 
210     /// Creates an emulated device.
create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor>211     fn create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor>;
212 
213     /// Gets the bitmap of dirty pages since the last call to `get_dirty_log` for the memory at
214     /// `slot`.  Only works on VMs that support `VmCap::DirtyLog`.
215     ///
216     /// The size of `dirty_log` must be at least as many bits as there are pages in the memory
217     /// region `slot` represents. For example, if the size of `slot` is 16 pages, `dirty_log` must
218     /// be 2 bytes or greater.
get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()>219     fn get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()>;
220 
221     /// Registers an event to be signaled whenever a certain address is written to.
222     ///
223     /// The `datamatch` parameter can be used to limit signaling `evt` to only the cases where the
224     /// value being written is equal to `datamatch`. Note that the size of `datamatch` is important
225     /// and must match the expected size of the guest's write.
226     ///
227     /// In all cases where `evt` is signaled, the ordinary vmexit to userspace that would be
228     /// triggered is prevented.
register_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>229     fn register_ioevent(
230         &mut self,
231         evt: &Event,
232         addr: IoEventAddress,
233         datamatch: Datamatch,
234     ) -> Result<()>;
235 
236     /// Unregisters an event previously registered with `register_ioevent`.
237     ///
238     /// The `evt`, `addr`, and `datamatch` set must be the same as the ones passed into
239     /// `register_ioevent`.
unregister_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>240     fn unregister_ioevent(
241         &mut self,
242         evt: &Event,
243         addr: IoEventAddress,
244         datamatch: Datamatch,
245     ) -> Result<()>;
246 
247     /// Trigger any matching registered io events based on an MMIO or PIO write at `addr`. The
248     /// `data` slice represents the contents and length of the write, which is used to compare with
249     /// the registered io events' Datamatch values. If the hypervisor does in-kernel IO event
250     /// delivery, this is a no-op.
handle_io_events(&self, addr: IoEventAddress, data: &[u8]) -> Result<()>251     fn handle_io_events(&self, addr: IoEventAddress, data: &[u8]) -> Result<()>;
252 
253     /// Retrieves the current timestamp of the paravirtual clock as seen by the current guest.
254     /// Only works on VMs that support `VmCap::PvClock`.
get_pvclock(&self) -> Result<ClockState>255     fn get_pvclock(&self) -> Result<ClockState>;
256 
257     /// Sets the current timestamp of the paravirtual clock as seen by the current guest.
258     /// Only works on VMs that support `VmCap::PvClock`.
set_pvclock(&self, state: &ClockState) -> Result<()>259     fn set_pvclock(&self, state: &ClockState) -> Result<()>;
260 
261     /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
262     /// at `offset` bytes from the start of the arena with `prot` protections.
263     /// `offset` must be page aligned.
264     ///
265     /// # Arguments
266     /// * `offset` - Page aligned offset into the arena in bytes.
267     /// * `size` - Size of memory region in bytes.
268     /// * `fd` - File descriptor to mmap from.
269     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
270     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_mapping( &mut self, slot: u32, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>271     fn add_fd_mapping(
272         &mut self,
273         slot: u32,
274         offset: usize,
275         size: usize,
276         fd: &dyn AsRawDescriptor,
277         fd_offset: u64,
278         prot: Protection,
279     ) -> Result<()>;
280 
281     /// Remove `size`-byte mapping starting at `offset`.
remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>282     fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>;
283 
284     /// Events from virtio-balloon that affect the state for guest memory and host memory.
handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()>285     fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()>;
286 }
287 
288 /// Operation for Io and Mmio
289 #[derive(Debug)]
290 pub enum IoOperation<'a> {
291     /// Data to be read from a device on the bus.
292     ///
293     /// The `handle_fn` should fill the entire slice with the read data.
294     Read(&'a mut [u8]),
295 
296     /// Data to be written to a device on the bus.
297     Write(&'a [u8]),
298 }
299 
300 /// Parameters describing an MMIO or PIO from the guest.
301 #[derive(Debug)]
302 pub struct IoParams<'a> {
303     pub address: u64,
304     pub operation: IoOperation<'a>,
305 }
306 
307 /// Handle to a virtual CPU that may be used to request a VM exit from within a signal handler.
308 #[cfg(any(target_os = "android", target_os = "linux"))]
309 pub struct VcpuSignalHandle {
310     inner: Box<dyn VcpuSignalHandleInner>,
311 }
312 
313 #[cfg(any(target_os = "android", target_os = "linux"))]
314 impl VcpuSignalHandle {
315     /// Request an immediate exit for this VCPU.
316     ///
317     /// This function is safe to call from a signal handler.
signal_immediate_exit(&self)318     pub fn signal_immediate_exit(&self) {
319         self.inner.signal_immediate_exit()
320     }
321 }
322 
323 /// Signal-safe mechanism for requesting an immediate VCPU exit.
324 ///
325 /// Each hypervisor backend must implement this for its VCPU type.
326 #[cfg(any(target_os = "android", target_os = "linux"))]
327 pub(crate) trait VcpuSignalHandleInner {
328     /// Signal the associated VCPU to exit if it is currently running.
329     ///
330     /// # Safety
331     ///
332     /// The implementation of this function must be async signal safe.
333     /// <https://man7.org/linux/man-pages/man7/signal-safety.7.html>
signal_immediate_exit(&self)334     fn signal_immediate_exit(&self);
335 }
336 
337 /// A virtual CPU holding a virtualized hardware thread's state, such as registers and interrupt
338 /// state, which may be used to execute virtual machines.
339 pub trait Vcpu: downcast_rs::DowncastSync {
340     /// Makes a shallow clone of this `Vcpu`.
try_clone(&self) -> Result<Self> where Self: Sized341     fn try_clone(&self) -> Result<Self>
342     where
343         Self: Sized;
344 
345     /// Casts this architecture specific trait object to the base trait object `Vcpu`.
as_vcpu(&self) -> &dyn Vcpu346     fn as_vcpu(&self) -> &dyn Vcpu;
347 
348     /// Runs the VCPU until it exits, returning the reason for the exit.
run(&mut self) -> Result<VcpuExit>349     fn run(&mut self) -> Result<VcpuExit>;
350 
351     /// Returns the vcpu id.
id(&self) -> usize352     fn id(&self) -> usize;
353 
354     /// Sets the bit that requests an immediate exit.
set_immediate_exit(&self, exit: bool)355     fn set_immediate_exit(&self, exit: bool);
356 
357     /// Returns a handle that can be used to cause this VCPU to exit from `run()` from a signal
358     /// handler.
359     #[cfg(any(target_os = "android", target_os = "linux"))]
signal_handle(&self) -> VcpuSignalHandle360     fn signal_handle(&self) -> VcpuSignalHandle;
361 
362     /// Handles an incoming MMIO request from the guest.
363     ///
364     /// This function should be called after `Vcpu::run` returns `VcpuExit::Mmio`, and in the same
365     /// thread as run().
366     ///
367     /// Once called, it will determine whether a MMIO read or MMIO write was the reason for the MMIO
368     /// exit, call `handle_fn` with the respective IoParams to perform the MMIO read or write, and
369     /// set the return data in the vcpu so that the vcpu can resume running.
handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()>370     fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Result<()>) -> Result<()>;
371 
372     /// Handles an incoming PIO from the guest.
373     ///
374     /// This function should be called after `Vcpu::run` returns `VcpuExit::Io`, and in the same
375     /// thread as run().
376     ///
377     /// Once called, it will determine whether an input or output was the reason for the Io exit,
378     /// call `handle_fn` with the respective IoParams to perform the input/output operation, and set
379     /// the return data in the vcpu so that the vcpu can resume running.
handle_io(&self, handle_fn: &mut dyn FnMut(IoParams)) -> Result<()>380     fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams)) -> Result<()>;
381 
382     /// Signals to the hypervisor that this Vcpu is being paused by userspace.
on_suspend(&self) -> Result<()>383     fn on_suspend(&self) -> Result<()>;
384 
385     /// Enables a hypervisor-specific extension on this Vcpu.  `cap` is a constant defined by the
386     /// hypervisor API (e.g., kvm.h).  `args` are the arguments for enabling the feature, if any.
387     ///
388     /// # Safety
389     /// This function is marked as unsafe because `args` may be interpreted as pointers for some
390     /// capabilities. The caller must ensure that any pointers passed in the `args` array are
391     /// allocated as the kernel expects, and that mutable pointers are owned.
enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()>392     unsafe fn enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()>;
393 }
394 
395 downcast_rs::impl_downcast!(sync Vcpu);
396 
397 /// An address either in programmable I/O space or in memory mapped I/O space.
398 #[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq, std::hash::Hash)]
399 pub enum IoEventAddress {
400     Pio(u64),
401     Mmio(u64),
402 }
403 
404 /// Used in `Vm::register_ioevent` to indicate a size and optionally value to match.
405 #[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
406 pub enum Datamatch {
407     AnyLength,
408     U8(Option<u8>),
409     U16(Option<u16>),
410     U32(Option<u32>),
411     U64(Option<u64>),
412 }
413 
414 #[derive(Copy, Clone, Debug)]
415 pub enum VcpuShutdownErrorKind {
416     DoubleFault,
417     TripleFault,
418     Other,
419 }
420 
421 /// A Vcpu shutdown may signify an error, such as a double or triple fault,
422 /// or hypervisor specific reasons. This error covers all such cases.
423 #[derive(Copy, Clone, Debug)]
424 pub struct VcpuShutdownError {
425     kind: VcpuShutdownErrorKind,
426     raw_error_code: u64,
427 }
428 
429 impl VcpuShutdownError {
new(kind: VcpuShutdownErrorKind, raw_error_code: u64) -> VcpuShutdownError430     pub fn new(kind: VcpuShutdownErrorKind, raw_error_code: u64) -> VcpuShutdownError {
431         Self {
432             kind,
433             raw_error_code,
434         }
435     }
kind(&self) -> VcpuShutdownErrorKind436     pub fn kind(&self) -> VcpuShutdownErrorKind {
437         self.kind
438     }
get_raw_error_code(&self) -> u64439     pub fn get_raw_error_code(&self) -> u64 {
440         self.raw_error_code
441     }
442 }
443 
444 // Note that when adding entries to the VcpuExit enum you may want to add corresponding entries in
445 // crosvm::stats::exit_to_index and crosvm::stats::exit_index_to_str if you don't want the new
446 // exit type to be categorized as "Unknown".
447 
448 /// A reason why a VCPU exited. One of these returns every time `Vcpu::run` is called.
449 #[derive(Debug, Clone, Copy)]
450 pub enum VcpuExit {
451     /// An io instruction needs to be emulated.
452     /// vcpu handle_io should be called to handle the io operation
453     Io,
454     /// A mmio instruction needs to be emulated.
455     /// vcpu handle_mmio should be called to handle the mmio operation
456     Mmio,
457     IoapicEoi {
458         vector: u8,
459     },
460     Exception,
461     Hypercall,
462     Debug,
463     Hlt,
464     IrqWindowOpen,
465     Shutdown(std::result::Result<(), VcpuShutdownError>),
466     FailEntry {
467         hardware_entry_failure_reason: u64,
468     },
469     Intr,
470     SetTpr,
471     TprAccess,
472     InternalError,
473     SystemEventShutdown,
474     SystemEventReset,
475     SystemEventCrash,
476     /// An invalid vcpu register was set while running.
477     InvalidVpRegister,
478     /// incorrect setup for vcpu requiring an unsupported feature
479     UnsupportedFeature,
480     /// vcpu run was user cancelled
481     Canceled,
482     /// an unrecoverable exception was encountered (different from Exception)
483     UnrecoverableException,
484     /// vcpu stopped due to an msr access.
485     MsrAccess,
486     /// vcpu stopped due to a cpuid request.
487     #[cfg(target_arch = "x86_64")]
488     Cpuid {
489         entry: CpuIdEntry,
490     },
491     /// vcpu stopped due to calling rdtsc
492     RdTsc,
493     /// vcpu stopped for an apic smi trap
494     ApicSmiTrap,
495     /// vcpu stopped due to an apic trap
496     ApicInitSipiTrap,
497     /// vcpu stoppted due to bus lock
498     BusLock,
499     /// Riscv supervisor call.
500     Sbi {
501         extension_id: u64,
502         function_id: u64,
503         args: [u64; 6],
504     },
505     /// Emulate CSR access from guest.
506     RiscvCsr {
507         csr_num: u64,
508         new_value: u64,
509         write_mask: u64,
510         ret_value: u64,
511     },
512 }
513 
514 /// A device type to create with `Vm.create_device`.
515 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
516 pub enum DeviceKind {
517     /// VFIO device for direct access to devices from userspace
518     Vfio,
519     /// ARM virtual general interrupt controller v2
520     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
521     ArmVgicV2,
522     /// ARM virtual general interrupt controller v3
523     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
524     ArmVgicV3,
525     /// RiscV AIA in-kernel emulation
526     #[cfg(target_arch = "riscv64")]
527     RiscvAia,
528 }
529 
530 /// The source chip of an `IrqSource`
531 #[repr(C)]
532 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
533 pub enum IrqSourceChip {
534     PicPrimary,
535     PicSecondary,
536     Ioapic,
537     Gic,
538     Aia,
539 }
540 
541 /// A source of IRQs in an `IrqRoute`.
542 #[repr(C)]
543 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
544 pub enum IrqSource {
545     Irqchip { chip: IrqSourceChip, pin: u32 },
546     Msi { address: u64, data: u32 },
547 }
548 
549 /// A single route for an IRQ.
550 #[repr(C)]
551 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
552 pub struct IrqRoute {
553     pub gsi: u32,
554     pub source: IrqSource,
555 }
556 
557 /// The state of the paravirtual clock.
558 #[derive(Debug, Default, Copy, Clone, Serialize, Deserialize)]
559 pub struct ClockState {
560     /// Current pv clock timestamp, as seen by the guest
561     pub clock: u64,
562 }
563 
564 /// The MPState represents the state of a processor.
565 #[repr(C)]
566 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
567 pub enum MPState {
568     /// the vcpu is currently running (x86/x86_64,arm/arm64)
569     Runnable,
570     /// the vcpu is an application processor (AP) which has not yet received an INIT signal
571     /// (x86/x86_64)
572     Uninitialized,
573     /// the vcpu has received an INIT signal, and is now ready for a SIPI (x86/x86_64)
574     InitReceived,
575     /// the vcpu has executed a HLT instruction and is waiting for an interrupt (x86/x86_64)
576     Halted,
577     /// the vcpu has just received a SIPI (vector accessible via KVM_GET_VCPU_EVENTS) (x86/x86_64)
578     SipiReceived,
579     /// the vcpu is stopped (arm/arm64)
580     Stopped,
581 }
582 
583 /// Whether the VM should be run in protected mode or not.
584 #[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
585 pub enum ProtectionType {
586     /// The VM should be run in the unprotected mode, where the host has access to its memory.
587     Unprotected,
588     /// The VM should be run in protected mode, so the host cannot access its memory directly. It
589     /// should be booted via the protected VM firmware, so that it can access its secrets.
590     Protected,
591     /// The VM should be run in protected mode, so the host cannot access its memory directly. It
592     /// should be booted via a custom VM firmware, useful for debugging and testing.
593     ProtectedWithCustomFirmware,
594     /// The VM should be run in protected mode, but booted directly without pVM firmware. The host
595     /// will still be unable to access the VM memory, but it won't be given any secrets.
596     ProtectedWithoutFirmware,
597     /// The VM should be run in unprotected mode, but with the same memory layout as protected
598     /// mode, protected VM firmware loaded, and simulating protected mode as much as possible.
599     /// This is useful for debugging the protected VM firmware and other protected mode issues.
600     UnprotectedWithFirmware,
601 }
602 
603 impl ProtectionType {
604     /// Returns whether the hypervisor will prevent us from accessing the VM's memory.
isolates_memory(&self) -> bool605     pub fn isolates_memory(&self) -> bool {
606         matches!(
607             self,
608             Self::Protected | Self::ProtectedWithCustomFirmware | Self::ProtectedWithoutFirmware
609         )
610     }
611 
612     /// Returns whether the VMM needs to load the pVM firmware.
needs_firmware_loaded(&self) -> bool613     pub fn needs_firmware_loaded(&self) -> bool {
614         matches!(
615             self,
616             Self::UnprotectedWithFirmware | Self::ProtectedWithCustomFirmware
617         )
618     }
619 
620     /// Returns whether the VM runs a pVM firmware.
runs_firmware(&self) -> bool621     pub fn runs_firmware(&self) -> bool {
622         self.needs_firmware_loaded() || matches!(self, Self::Protected)
623     }
624 }
625 
626 #[derive(Clone, Copy)]
627 pub struct Config {
628     #[cfg(target_arch = "aarch64")]
629     /// enable the Memory Tagging Extension in the guest
630     pub mte: bool,
631     pub protection_type: ProtectionType,
632 }
633 
634 impl Default for Config {
default() -> Config635     fn default() -> Config {
636         Config {
637             #[cfg(target_arch = "aarch64")]
638             mte: false,
639             protection_type: ProtectionType::Unprotected,
640         }
641     }
642 }
643