• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! A crate for abstracting the underlying kernel hypervisor used in crosvm.
6 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
7 pub mod aarch64;
8 pub mod caps;
9 
10 #[cfg(all(
11     unix,
12     any(target_arch = "arm", target_arch = "aarch64"),
13     feature = "gunyah"
14 ))]
15 pub mod gunyah;
16 #[cfg(all(windows, feature = "haxm"))]
17 pub mod haxm;
18 #[cfg(unix)]
19 pub mod kvm;
20 #[cfg(all(windows, feature = "whpx"))]
21 pub mod whpx;
22 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
23 pub mod x86_64;
24 
25 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
26 #[cfg(all(unix, feature = "geniezone"))]
27 pub mod geniezone;
28 
29 use std::os::raw::c_int;
30 
31 use base::AsRawDescriptor;
32 use base::Event;
33 use base::MappedRegion;
34 use base::Protection;
35 use base::Result;
36 use base::SafeDescriptor;
37 use serde::Deserialize;
38 use serde::Serialize;
39 use vm_memory::GuestAddress;
40 use vm_memory::GuestMemory;
41 
42 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
43 pub use crate::aarch64::*;
44 pub use crate::caps::*;
45 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
46 pub use crate::x86_64::*;
47 
48 /// An index in the list of guest-mapped memory regions.
49 pub type MemSlot = u32;
50 
51 /// A trait for checking hypervisor capabilities.
52 pub trait Hypervisor: Send {
53     /// Makes a shallow clone of this `Hypervisor`.
try_clone(&self) -> Result<Self> where Self: Sized54     fn try_clone(&self) -> Result<Self>
55     where
56         Self: Sized;
57 
58     /// Checks if a particular `HypervisorCap` is available.
check_capability(&self, cap: HypervisorCap) -> bool59     fn check_capability(&self, cap: HypervisorCap) -> bool;
60 }
61 
62 /// A wrapper for using a VM and getting/setting its state.
63 pub trait Vm: Send {
64     /// Makes a shallow clone of this `Vm`.
try_clone(&self) -> Result<Self> where Self: Sized65     fn try_clone(&self) -> Result<Self>
66     where
67         Self: Sized;
68 
69     /// Checks if a particular `VmCap` is available.
70     ///
71     /// This is distinct from the `Hypervisor` version of this method because some extensions depend
72     /// on the particular `Vm` instance. This method is encouraged because it more accurately
73     /// reflects the usable capabilities.
check_capability(&self, c: VmCap) -> bool74     fn check_capability(&self, c: VmCap) -> bool;
75 
76     /// Enable the VM capabilities.
enable_capability(&self, _capability: VmCap, _flags: u32) -> Result<bool>77     fn enable_capability(&self, _capability: VmCap, _flags: u32) -> Result<bool> {
78         Err(std::io::Error::from(std::io::ErrorKind::Unsupported).into())
79     }
80 
81     /// Get the guest physical address size in bits.
get_guest_phys_addr_bits(&self) -> u882     fn get_guest_phys_addr_bits(&self) -> u8;
83 
84     /// Gets the guest-mapped memory for the Vm.
get_memory(&self) -> &GuestMemory85     fn get_memory(&self) -> &GuestMemory;
86 
87     /// Inserts the given `MappedRegion` into the VM's address space at `guest_addr`.
88     ///
89     /// The slot that was assigned the memory mapping is returned on success.  The slot can be given
90     /// to `Vm::remove_memory_region` to remove the memory from the VM's address space and take back
91     /// ownership of `mem_region`.
92     ///
93     /// Note that memory inserted into the VM's address space must not overlap with any other memory
94     /// slot's region.
95     ///
96     /// If `read_only` is true, the guest will be able to read the memory as normal, but attempts to
97     /// write will trigger a mmio VM exit, leaving the memory untouched.
98     ///
99     /// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to
100     /// by the guest with `get_dirty_log`.
add_memory_region( &mut self, guest_addr: GuestAddress, mem_region: Box<dyn MappedRegion>, read_only: bool, log_dirty_pages: bool, ) -> Result<MemSlot>101     fn add_memory_region(
102         &mut self,
103         guest_addr: GuestAddress,
104         mem_region: Box<dyn MappedRegion>,
105         read_only: bool,
106         log_dirty_pages: bool,
107     ) -> Result<MemSlot>;
108 
109     /// Does a synchronous msync of the memory mapped at `slot`, syncing `size` bytes starting at
110     /// `offset` from the start of the region.  `offset` must be page aligned.
msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>111     fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>;
112 
113     /// Removes and drops the `UserMemoryRegion` that was previously added at the given slot.
remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>114     fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>;
115 
116     /// Creates an emulated device.
create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor>117     fn create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor>;
118 
119     /// Gets the bitmap of dirty pages since the last call to `get_dirty_log` for the memory at
120     /// `slot`.  Only works on VMs that support `VmCap::DirtyLog`.
121     ///
122     /// The size of `dirty_log` must be at least as many bits as there are pages in the memory
123     /// region `slot` represents. For example, if the size of `slot` is 16 pages, `dirty_log` must
124     /// be 2 bytes or greater.
get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()>125     fn get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()>;
126 
127     /// Registers an event to be signaled whenever a certain address is written to.
128     ///
129     /// The `datamatch` parameter can be used to limit signaling `evt` to only the cases where the
130     /// value being written is equal to `datamatch`. Note that the size of `datamatch` is important
131     /// and must match the expected size of the guest's write.
132     ///
133     /// In all cases where `evt` is signaled, the ordinary vmexit to userspace that would be
134     /// triggered is prevented.
register_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>135     fn register_ioevent(
136         &mut self,
137         evt: &Event,
138         addr: IoEventAddress,
139         datamatch: Datamatch,
140     ) -> Result<()>;
141 
142     /// Unregisters an event previously registered with `register_ioevent`.
143     ///
144     /// The `evt`, `addr`, and `datamatch` set must be the same as the ones passed into
145     /// `register_ioevent`.
unregister_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>146     fn unregister_ioevent(
147         &mut self,
148         evt: &Event,
149         addr: IoEventAddress,
150         datamatch: Datamatch,
151     ) -> Result<()>;
152 
153     /// Trigger any matching registered io events based on an MMIO or PIO write at `addr`. The
154     /// `data` slice represents the contents and length of the write, which is used to compare with
155     /// the registered io events' Datamatch values. If the hypervisor does in-kernel IO event
156     /// delivery, this is a no-op.
handle_io_events(&self, addr: IoEventAddress, data: &[u8]) -> Result<()>157     fn handle_io_events(&self, addr: IoEventAddress, data: &[u8]) -> Result<()>;
158 
159     /// Retrieves the current timestamp of the paravirtual clock as seen by the current guest.
160     /// Only works on VMs that support `VmCap::PvClock`.
get_pvclock(&self) -> Result<ClockState>161     fn get_pvclock(&self) -> Result<ClockState>;
162 
163     /// Sets the current timestamp of the paravirtual clock as seen by the current guest.
164     /// Only works on VMs that support `VmCap::PvClock`.
set_pvclock(&self, state: &ClockState) -> Result<()>165     fn set_pvclock(&self, state: &ClockState) -> Result<()>;
166 
167     /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
168     /// at `offset` bytes from the start of the arena with `prot` protections.
169     /// `offset` must be page aligned.
170     ///
171     /// # Arguments
172     /// * `offset` - Page aligned offset into the arena in bytes.
173     /// * `size` - Size of memory region in bytes.
174     /// * `fd` - File descriptor to mmap from.
175     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
176     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_mapping( &mut self, slot: u32, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>177     fn add_fd_mapping(
178         &mut self,
179         slot: u32,
180         offset: usize,
181         size: usize,
182         fd: &dyn AsRawDescriptor,
183         fd_offset: u64,
184         prot: Protection,
185     ) -> Result<()>;
186 
187     /// Remove `size`-byte mapping starting at `offset`.
remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>188     fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>;
189 
190     /// Frees the given segment of guest memory to be reclaimed by the host OS.
191     /// This is intended for use with virtio-balloon, where a guest driver determines
192     /// unused ranges and requests they be freed. Use without the guest's knowledge is sure
193     /// to break something. As per virtio-balloon spec, the given address and size
194     /// are intended to be page-aligned.
195     ///
196     /// # Arguments
197     /// * `guest_address` - Address in the guest's "physical" memory to begin the unmapping
198     /// * `size` - The size of the region to unmap, in bytes
handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()>199     fn handle_inflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()>;
200 
201     /// Reallocates memory and maps it to provide to the guest. This is intended to be used
202     /// exclusively in tandem with `handle_inflate`, and will return an `Err` Result otherwise.
203     ///
204     /// # Arguments
205     /// * `guest_address` - Address in the guest's "physical" memory to begin the mapping
206     /// * `size` - The size of the region to map, in bytes
handle_deflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()>207     fn handle_deflate(&mut self, guest_address: GuestAddress, size: u64) -> Result<()>;
208 }
209 
210 /// A unique fingerprint for a particular `VcpuRunHandle`, used in `Vcpu` impls to ensure the
211 /// `VcpuRunHandle ` they receive is the same one that was returned from `take_run_handle`.
212 #[derive(Clone, PartialEq, Eq)]
213 pub struct VcpuRunHandleFingerprint(u64);
214 
215 impl VcpuRunHandleFingerprint {
as_u64(&self) -> u64216     pub fn as_u64(&self) -> u64 {
217         self.0
218     }
219 }
220 
221 /// A handle returned by a `Vcpu` to be used with `Vcpu::run` to execute a virtual machine's VCPU.
222 ///
223 /// This is used to ensure that the caller has bound the `Vcpu` to a thread with
224 /// `Vcpu::take_run_handle` and to execute hypervisor specific cleanup routines when dropped.
225 pub struct VcpuRunHandle {
226     drop_fn: fn(),
227     fingerprint: VcpuRunHandleFingerprint,
228     // Prevents Send+Sync for this type.
229     phantom: std::marker::PhantomData<*mut ()>,
230 }
231 
232 impl VcpuRunHandle {
233     /// Used by `Vcpu` impls to create a unique run handle, that when dropped, will call the given
234     /// `drop_fn`.
new(drop_fn: fn()) -> Self235     pub fn new(drop_fn: fn()) -> Self {
236         // Creates a probably unique number with a hash of the current thread id and epoch time.
237         use std::hash::Hash;
238         use std::hash::Hasher;
239         let mut hasher = std::collections::hash_map::DefaultHasher::new();
240         std::time::Instant::now().hash(&mut hasher);
241         std::thread::current().id().hash(&mut hasher);
242         Self {
243             drop_fn,
244             fingerprint: VcpuRunHandleFingerprint(hasher.finish()),
245             phantom: std::marker::PhantomData,
246         }
247     }
248 
249     /// Gets the unique fingerprint which may be copied and compared freely.
fingerprint(&self) -> &VcpuRunHandleFingerprint250     pub fn fingerprint(&self) -> &VcpuRunHandleFingerprint {
251         &self.fingerprint
252     }
253 }
254 
255 impl Drop for VcpuRunHandle {
drop(&mut self)256     fn drop(&mut self) {
257         (self.drop_fn)();
258     }
259 }
260 
261 /// Operation for Io and Mmio
262 #[derive(Copy, Clone, Debug)]
263 pub enum IoOperation {
264     Read,
265     Write {
266         /// Data to be written.
267         ///
268         /// For 64 bit architecture, Mmio and Io only work with at most 8 bytes of data.
269         data: [u8; 8],
270     },
271 }
272 
273 /// Parameters describing an MMIO or PIO from the guest.
274 #[derive(Copy, Clone, Debug)]
275 pub struct IoParams {
276     pub address: u64,
277     pub size: usize,
278     pub operation: IoOperation,
279 }
280 
281 /// A virtual CPU holding a virtualized hardware thread's state, such as registers and interrupt
282 /// state, which may be used to execute virtual machines.
283 ///
284 /// To run, `take_run_handle` must be called to lock the vcpu to a thread. Then the returned
285 /// `VcpuRunHandle` can be used for running.
286 pub trait Vcpu: downcast_rs::DowncastSync {
287     /// Makes a shallow clone of this `Vcpu`.
try_clone(&self) -> Result<Self> where Self: Sized288     fn try_clone(&self) -> Result<Self>
289     where
290         Self: Sized;
291 
292     /// Casts this architecture specific trait object to the base trait object `Vcpu`.
as_vcpu(&self) -> &dyn Vcpu293     fn as_vcpu(&self) -> &dyn Vcpu;
294 
295     /// Returns a unique `VcpuRunHandle`. A `VcpuRunHandle` is required to run the guest.
296     ///
297     /// Assigns a vcpu to the current thread so that signal handlers can call
298     /// set_local_immediate_exit().  An optional signal number will be temporarily blocked while
299     /// assigning the vcpu to the thread and later blocked when `VcpuRunHandle` is destroyed.
300     ///
301     /// Returns an error, `EBUSY`, if the current thread already contains a Vcpu.
take_run_handle(&self, signal_num: Option<c_int>) -> Result<VcpuRunHandle>302     fn take_run_handle(&self, signal_num: Option<c_int>) -> Result<VcpuRunHandle>;
303 
304     /// Runs the VCPU until it exits, returning the reason for the exit.
305     ///
306     /// Note that the state of the VCPU and associated VM must be setup first for this to do
307     /// anything useful. The given `run_handle` must be the same as the one returned by
308     /// `take_run_handle` for this `Vcpu`.
run(&mut self, run_handle: &VcpuRunHandle) -> Result<VcpuExit>309     fn run(&mut self, run_handle: &VcpuRunHandle) -> Result<VcpuExit>;
310 
311     /// Returns the vcpu id.
id(&self) -> usize312     fn id(&self) -> usize;
313 
314     /// Sets the bit that requests an immediate exit.
set_immediate_exit(&self, exit: bool)315     fn set_immediate_exit(&self, exit: bool);
316 
317     /// Sets/clears the bit for immediate exit for the vcpu on the current thread.
set_local_immediate_exit(exit: bool) where Self: Sized318     fn set_local_immediate_exit(exit: bool)
319     where
320         Self: Sized;
321 
322     /// Returns a function pointer that invokes `set_local_immediate_exit` in a
323     /// signal-safe way when called.
set_local_immediate_exit_fn(&self) -> extern "C" fn()324     fn set_local_immediate_exit_fn(&self) -> extern "C" fn();
325 
326     /// Handles an incoming MMIO request from the guest.
327     ///
328     /// This function should be called after `Vcpu::run` returns `VcpuExit::Mmio`, and in the same
329     /// thread as run().
330     ///
331     /// Once called, it will determine whether a MMIO read or MMIO write was the reason for the MMIO
332     /// exit, call `handle_fn` with the respective IoParams to perform the MMIO read or write, and
333     /// set the return data in the vcpu so that the vcpu can resume running.
handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>334     fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>;
335 
336     /// Handles an incoming PIO from the guest.
337     ///
338     /// This function should be called after `Vcpu::run` returns `VcpuExit::Io`, and in the same
339     /// thread as run().
340     ///
341     /// Once called, it will determine whether an input or output was the reason for the Io exit,
342     /// call `handle_fn` with the respective IoParams to perform the input/output operation, and set
343     /// the return data in the vcpu so that the vcpu can resume running.
handle_io(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>344     fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>;
345 
346     /// Handles the HYPERV_HYPERCALL exit from a vcpu.
347     ///
348     /// This function should be called after `Vcpu::run` returns `VcpuExit::HypervHcall`, and in the
349     /// same thread as run.
350     ///
351     /// Once called, it will parse the appropriate input parameters to the provided function to
352     /// handle the hyperv call, and then set the return data into the vcpu so it can resume running.
handle_hyperv_hypercall(&self, func: &mut dyn FnMut(HypervHypercall) -> u64) -> Result<()>353     fn handle_hyperv_hypercall(&self, func: &mut dyn FnMut(HypervHypercall) -> u64) -> Result<()>;
354 
355     /// Handles a RDMSR exit from the guest.
356     ///
357     /// This function should be called after `Vcpu::run` returns `VcpuExit::RdMsr`,
358     /// and in the same thread as run.
359     ///
360     /// It will put `data` into the guest buffer and return.
handle_rdmsr(&self, data: u64) -> Result<()>361     fn handle_rdmsr(&self, data: u64) -> Result<()>;
362 
363     /// Handles a WRMSR exit from the guest by removing any error indication for the operation.
364     ///
365     /// This function should be called after `Vcpu::run` returns `VcpuExit::WrMsr`,
366     /// and in the same thread as run.
handle_wrmsr(&self)367     fn handle_wrmsr(&self);
368 
369     /// Signals to the hypervisor that this guest is being paused by userspace.  Only works on Vms
370     /// that support `VmCap::PvClockSuspend`.
pvclock_ctrl(&self) -> Result<()>371     fn pvclock_ctrl(&self) -> Result<()>;
372 
373     /// Specifies set of signals that are blocked during execution of `RunnableVcpu::run`.  Signals
374     /// that are not blocked will cause run to return with `VcpuExit::Intr`.  Only works on Vms that
375     /// support `VmCap::SignalMask`.
set_signal_mask(&self, signals: &[c_int]) -> Result<()>376     fn set_signal_mask(&self, signals: &[c_int]) -> Result<()>;
377 
378     /// Enables a hypervisor-specific extension on this Vcpu.  `cap` is a constant defined by the
379     /// hypervisor API (e.g., kvm.h).  `args` are the arguments for enabling the feature, if any.
380     ///
381     /// # Safety
382     /// This function is marked as unsafe because `args` may be interpreted as pointers for some
383     /// capabilities. The caller must ensure that any pointers passed in the `args` array are
384     /// allocated as the kernel expects, and that mutable pointers are owned.
enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()>385     unsafe fn enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()>;
386 }
387 
388 downcast_rs::impl_downcast!(sync Vcpu);
389 
390 /// An address either in programmable I/O space or in memory mapped I/O space.
391 #[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq, std::hash::Hash)]
392 pub enum IoEventAddress {
393     Pio(u64),
394     Mmio(u64),
395 }
396 
397 /// Used in `Vm::register_ioevent` to indicate a size and optionally value to match.
398 #[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
399 pub enum Datamatch {
400     AnyLength,
401     U8(Option<u8>),
402     U16(Option<u16>),
403     U32(Option<u32>),
404     U64(Option<u64>),
405 }
406 
407 /// A reason why a VCPU exited. One of these returns every time `Vcpu::run` is called.
408 #[derive(Debug, Clone, Copy)]
409 pub enum VcpuExit {
410     /// An io instruction needs to be emulated.
411     /// vcpu handle_io should be called to handle the io operation
412     Io,
413     /// A mmio instruction needs to be emulated.
414     /// vcpu handle_mmio should be called to handle the mmio operation
415     Mmio,
416     IoapicEoi {
417         vector: u8,
418     },
419     HypervHypercall,
420     Unknown,
421     Exception,
422     Hypercall,
423     Debug,
424     Hlt,
425     IrqWindowOpen,
426     Shutdown,
427     FailEntry {
428         hardware_entry_failure_reason: u64,
429     },
430     Intr,
431     SetTpr,
432     TprAccess,
433     S390Sieic,
434     S390Reset,
435     Dcr,
436     Nmi,
437     InternalError,
438     Osi,
439     PaprHcall,
440     S390Ucontrol,
441     Watchdog,
442     S390Tsch,
443     Epr,
444     SystemEventShutdown,
445     SystemEventReset,
446     SystemEventCrash,
447     SystemEventS2Idle,
448     RdMsr {
449         index: u32,
450     },
451     WrMsr {
452         index: u32,
453         data: u64,
454     },
455     /// An invalid vcpu register was set while running.
456     InvalidVpRegister,
457     /// incorrect setup for vcpu requiring an unsupported feature
458     UnsupportedFeature,
459     /// vcpu run was user cancelled
460     Canceled,
461     /// an unrecoverable exception was encountered (different from Exception)
462     UnrecoverableException,
463     /// vcpu stopped due to an msr access.
464     MsrAccess,
465     /// vcpu stopped due to a cpuid request.
466     #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
467     Cpuid {
468         entry: CpuIdEntry,
469     },
470     /// vcpu stopped due to calling rdtsc
471     RdTsc,
472     /// vcpu stopped for an apic smi trap
473     ApicSmiTrap,
474     /// vcpu stopped due to an apic trap
475     ApicInitSipiTrap,
476     /// vcpu stoppted due to bus lock
477     BusLock,
478 }
479 
480 /// A hypercall with parameters being made from the guest.
481 #[derive(Debug)]
482 pub enum HypervHypercall {
483     HypervSynic {
484         msr: u32,
485         control: u64,
486         evt_page: u64,
487         msg_page: u64,
488     },
489     HypervHcall {
490         input: u64,
491         params: [u64; 2],
492     },
493 }
494 
495 /// A device type to create with `Vm.create_device`.
496 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
497 pub enum DeviceKind {
498     /// VFIO device for direct access to devices from userspace
499     Vfio,
500     /// ARM virtual general interrupt controller v2
501     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
502     ArmVgicV2,
503     /// ARM virtual general interrupt controller v3
504     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
505     ArmVgicV3,
506 }
507 
508 /// The source chip of an `IrqSource`
509 #[repr(C)]
510 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
511 pub enum IrqSourceChip {
512     PicPrimary,
513     PicSecondary,
514     Ioapic,
515     Gic,
516 }
517 
518 /// A source of IRQs in an `IrqRoute`.
519 #[repr(C)]
520 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
521 pub enum IrqSource {
522     Irqchip { chip: IrqSourceChip, pin: u32 },
523     Msi { address: u64, data: u32 },
524 }
525 
526 /// A single route for an IRQ.
527 #[repr(C)]
528 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
529 pub struct IrqRoute {
530     pub gsi: u32,
531     pub source: IrqSource,
532 }
533 
534 /// The state of the paravirtual clock.
535 #[derive(Debug, Default, Copy, Clone)]
536 pub struct ClockState {
537     /// Current pv clock timestamp, as seen by the guest
538     pub clock: u64,
539     /// Hypervisor-specific feature flags for the pv clock
540     pub flags: u32,
541 }
542 
543 /// The MPState represents the state of a processor.
544 #[repr(C)]
545 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
546 pub enum MPState {
547     /// the vcpu is currently running (x86/x86_64,arm/arm64)
548     Runnable,
549     /// the vcpu is an application processor (AP) which has not yet received an INIT signal
550     /// (x86/x86_64)
551     Uninitialized,
552     /// the vcpu has received an INIT signal, and is now ready for a SIPI (x86/x86_64)
553     InitReceived,
554     /// the vcpu has executed a HLT instruction and is waiting for an interrupt (x86/x86_64)
555     Halted,
556     /// the vcpu has just received a SIPI (vector accessible via KVM_GET_VCPU_EVENTS) (x86/x86_64)
557     SipiReceived,
558     /// the vcpu is stopped (arm/arm64)
559     Stopped,
560 }
561 
562 /// Whether the VM should be run in protected mode or not.
563 #[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
564 pub enum ProtectionType {
565     /// The VM should be run in the unprotected mode, where the host has access to its memory.
566     Unprotected,
567     /// The VM should be run in protected mode, so the host cannot access its memory directly. It
568     /// should be booted via the protected VM firmware, so that it can access its secrets.
569     Protected,
570     /// The VM should be run in protected mode, so the host cannot access its memory directly. It
571     /// should be booted via a custom VM firmware, useful for debugging and testing.
572     ProtectedWithCustomFirmware,
573     /// The VM should be run in protected mode, but booted directly without pVM firmware. The host
574     /// will still be unable to access the VM memory, but it won't be given any secrets.
575     ProtectedWithoutFirmware,
576     /// The VM should be run in unprotected mode, but with the same memory layout as protected mode,
577     /// protected VM firmware loaded, and simulating protected mode as much as possible. This is
578     /// useful for debugging the protected VM firmware and other protected mode issues.
579     UnprotectedWithFirmware,
580 }
581 
582 impl ProtectionType {
583     /// Returns whether the hypervisor will prevent us from accessing the VM's memory.
isolates_memory(&self) -> bool584     pub fn isolates_memory(&self) -> bool {
585         matches!(
586             self,
587             Self::Protected | Self::ProtectedWithCustomFirmware | Self::ProtectedWithoutFirmware
588         )
589     }
590 
591     /// Returns whether the VMM needs to load the pVM firmware.
loads_firmware(&self) -> bool592     pub fn loads_firmware(&self) -> bool {
593         matches!(
594             self,
595             Self::UnprotectedWithFirmware | Self::ProtectedWithCustomFirmware
596         )
597     }
598 
599     /// Returns whether the VM runs a pVM firmware.
runs_firmware(&self) -> bool600     pub fn runs_firmware(&self) -> bool {
601         self.loads_firmware() || matches!(self, Self::Protected)
602     }
603 }
604 
605 #[derive(Clone, Copy)]
606 pub struct Config {
607     #[cfg(target_arch = "aarch64")]
608     /// enable the Memory Tagging Extension in the guest
609     pub mte: bool,
610     pub protection_type: ProtectionType,
611 }
612 
613 impl Default for Config {
default() -> Config614     fn default() -> Config {
615         Config {
616             #[cfg(target_arch = "aarch64")]
617             mte: false,
618             protection_type: ProtectionType::Unprotected,
619         }
620     }
621 }
622