• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! A crate for abstracting the underlying kernel hypervisor used in crosvm.
6 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
7 pub mod aarch64;
8 pub mod caps;
9 
10 #[cfg(all(
11     unix,
12     any(target_arch = "arm", target_arch = "aarch64"),
13     feature = "gunyah"
14 ))]
15 pub mod gunyah;
16 #[cfg(all(windows, feature = "haxm"))]
17 pub mod haxm;
18 #[cfg(any(target_os = "android", target_os = "linux"))]
19 pub mod kvm;
20 #[cfg(target_arch = "riscv64")]
21 pub mod riscv64;
22 #[cfg(all(windows, feature = "whpx"))]
23 pub mod whpx;
24 #[cfg(target_arch = "x86_64")]
25 pub mod x86_64;
26 
27 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
28 #[cfg(all(unix, feature = "geniezone"))]
29 pub mod geniezone;
30 
31 use base::AsRawDescriptor;
32 use base::Event;
33 use base::MappedRegion;
34 use base::Protection;
35 use base::Result;
36 use base::SafeDescriptor;
37 use serde::Deserialize;
38 use serde::Serialize;
39 use vm_memory::GuestAddress;
40 use vm_memory::GuestMemory;
41 
42 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
43 pub use crate::aarch64::*;
44 pub use crate::caps::*;
45 #[cfg(target_arch = "riscv64")]
46 pub use crate::riscv64::*;
47 #[cfg(target_arch = "x86_64")]
48 pub use crate::x86_64::*;
49 
50 /// An index in the list of guest-mapped memory regions.
51 pub type MemSlot = u32;
52 
53 /// Range of GPA space. Starting from `guest_address` up to `size`.
54 pub struct MemRegion {
55     pub guest_address: GuestAddress,
56     pub size: u64,
57 }
58 
59 #[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq)]
60 pub enum MemCacheType {
61     CacheCoherent,
62     CacheNonCoherent,
63 }
64 
65 /// This is intended for use with virtio-balloon, where a guest driver determines unused ranges and
66 /// requests they be freed. Use without the guest's knowledge is sure to break something.
67 pub enum BalloonEvent {
68     /// Balloon event when the region is acquired from the guest. The guest cannot access this
69     /// region any more. The guest memory can be reclaimed by the host OS. As per virtio-balloon
70     /// spec, the given address and size are intended to be page-aligned.
71     Inflate(MemRegion),
72     /// Balloon event when the region is returned to the guest. VMM should reallocate memory and
73     /// register it with the hypervisor for accesses by the guest.
74     Deflate(MemRegion),
75     /// Balloon event when the requested memory size is achieved. This can be achieved through
76     /// either inflation or deflation. The `u64` will be the current size of the balloon in bytes.
77     BalloonTargetReached(u64),
78 }
79 
80 /// A trait for checking hypervisor capabilities.
81 pub trait Hypervisor: Send {
82     /// Makes a shallow clone of this `Hypervisor`.
try_clone(&self) -> Result<Self> where Self: Sized83     fn try_clone(&self) -> Result<Self>
84     where
85         Self: Sized;
86 
87     /// Checks if a particular `HypervisorCap` is available.
check_capability(&self, cap: HypervisorCap) -> bool88     fn check_capability(&self, cap: HypervisorCap) -> bool;
89 }
90 
91 /// A wrapper for using a VM and getting/setting its state.
92 pub trait Vm: Send {
93     /// Makes a shallow clone of this `Vm`.
try_clone(&self) -> Result<Self> where Self: Sized94     fn try_clone(&self) -> Result<Self>
95     where
96         Self: Sized;
97 
98     /// Checks if a particular `VmCap` is available.
99     ///
100     /// This is distinct from the `Hypervisor` version of this method because some extensions depend
101     /// on the particular `Vm` instance. This method is encouraged because it more accurately
102     /// reflects the usable capabilities.
check_capability(&self, c: VmCap) -> bool103     fn check_capability(&self, c: VmCap) -> bool;
104 
105     /// Enable the VM capabilities.
enable_capability(&self, _capability: VmCap, _flags: u32) -> Result<bool>106     fn enable_capability(&self, _capability: VmCap, _flags: u32) -> Result<bool> {
107         Err(std::io::Error::from(std::io::ErrorKind::Unsupported).into())
108     }
109 
110     /// Get the guest physical address size in bits.
get_guest_phys_addr_bits(&self) -> u8111     fn get_guest_phys_addr_bits(&self) -> u8;
112 
113     /// Gets the guest-mapped memory for the Vm.
get_memory(&self) -> &GuestMemory114     fn get_memory(&self) -> &GuestMemory;
115 
116     /// Inserts the given `MappedRegion` into the VM's address space at `guest_addr`.
117     ///
118     /// The slot that was assigned the memory mapping is returned on success.  The slot can be given
119     /// to `Vm::remove_memory_region` to remove the memory from the VM's address space and take back
120     /// ownership of `mem_region`.
121     ///
122     /// Note that memory inserted into the VM's address space must not overlap with any other memory
123     /// slot's region.
124     ///
125     /// If `read_only` is true, the guest will be able to read the memory as normal, but attempts to
126     /// write will trigger a mmio VM exit, leaving the memory untouched.
127     ///
128     /// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to
129     /// by the guest with `get_dirty_log`.
130     ///
131     /// `cache` can be used to set guest mem cache attribute if supported. Default is cache coherent
132     /// memory. Noncoherent memory means this memory might not be coherent from all access points,
133     /// e.g this could be the case when host GPU doesn't set the memory to be coherent with CPU
134     /// access. Setting this attribute would allow hypervisor to adjust guest mem control to ensure
135     /// synchronized guest access in noncoherent DMA case.
add_memory_region( &mut self, guest_addr: GuestAddress, mem_region: Box<dyn MappedRegion>, read_only: bool, log_dirty_pages: bool, cache: MemCacheType, ) -> Result<MemSlot>136     fn add_memory_region(
137         &mut self,
138         guest_addr: GuestAddress,
139         mem_region: Box<dyn MappedRegion>,
140         read_only: bool,
141         log_dirty_pages: bool,
142         cache: MemCacheType,
143     ) -> Result<MemSlot>;
144 
145     /// Does a synchronous msync of the memory mapped at `slot`, syncing `size` bytes starting at
146     /// `offset` from the start of the region.  `offset` must be page aligned.
msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>147     fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>;
148 
149     /// Removes and drops the `UserMemoryRegion` that was previously added at the given slot.
remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>150     fn remove_memory_region(&mut self, slot: MemSlot) -> Result<Box<dyn MappedRegion>>;
151 
152     /// Creates an emulated device.
create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor>153     fn create_device(&self, kind: DeviceKind) -> Result<SafeDescriptor>;
154 
155     /// Gets the bitmap of dirty pages since the last call to `get_dirty_log` for the memory at
156     /// `slot`.  Only works on VMs that support `VmCap::DirtyLog`.
157     ///
158     /// The size of `dirty_log` must be at least as many bits as there are pages in the memory
159     /// region `slot` represents. For example, if the size of `slot` is 16 pages, `dirty_log` must
160     /// be 2 bytes or greater.
get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()>161     fn get_dirty_log(&self, slot: MemSlot, dirty_log: &mut [u8]) -> Result<()>;
162 
163     /// Registers an event to be signaled whenever a certain address is written to.
164     ///
165     /// The `datamatch` parameter can be used to limit signaling `evt` to only the cases where the
166     /// value being written is equal to `datamatch`. Note that the size of `datamatch` is important
167     /// and must match the expected size of the guest's write.
168     ///
169     /// In all cases where `evt` is signaled, the ordinary vmexit to userspace that would be
170     /// triggered is prevented.
register_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>171     fn register_ioevent(
172         &mut self,
173         evt: &Event,
174         addr: IoEventAddress,
175         datamatch: Datamatch,
176     ) -> Result<()>;
177 
178     /// Unregisters an event previously registered with `register_ioevent`.
179     ///
180     /// The `evt`, `addr`, and `datamatch` set must be the same as the ones passed into
181     /// `register_ioevent`.
unregister_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>182     fn unregister_ioevent(
183         &mut self,
184         evt: &Event,
185         addr: IoEventAddress,
186         datamatch: Datamatch,
187     ) -> Result<()>;
188 
189     /// Trigger any matching registered io events based on an MMIO or PIO write at `addr`. The
190     /// `data` slice represents the contents and length of the write, which is used to compare with
191     /// the registered io events' Datamatch values. If the hypervisor does in-kernel IO event
192     /// delivery, this is a no-op.
handle_io_events(&self, addr: IoEventAddress, data: &[u8]) -> Result<()>193     fn handle_io_events(&self, addr: IoEventAddress, data: &[u8]) -> Result<()>;
194 
195     /// Retrieves the current timestamp of the paravirtual clock as seen by the current guest.
196     /// Only works on VMs that support `VmCap::PvClock`.
get_pvclock(&self) -> Result<ClockState>197     fn get_pvclock(&self) -> Result<ClockState>;
198 
199     /// Sets the current timestamp of the paravirtual clock as seen by the current guest.
200     /// Only works on VMs that support `VmCap::PvClock`.
set_pvclock(&self, state: &ClockState) -> Result<()>201     fn set_pvclock(&self, state: &ClockState) -> Result<()>;
202 
203     /// Maps `size` bytes starting at `fs_offset` bytes from within the given `fd`
204     /// at `offset` bytes from the start of the arena with `prot` protections.
205     /// `offset` must be page aligned.
206     ///
207     /// # Arguments
208     /// * `offset` - Page aligned offset into the arena in bytes.
209     /// * `size` - Size of memory region in bytes.
210     /// * `fd` - File descriptor to mmap from.
211     /// * `fd_offset` - Offset in bytes from the beginning of `fd` to start the mmap.
212     /// * `prot` - Protection (e.g. readable/writable) of the memory region.
add_fd_mapping( &mut self, slot: u32, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>213     fn add_fd_mapping(
214         &mut self,
215         slot: u32,
216         offset: usize,
217         size: usize,
218         fd: &dyn AsRawDescriptor,
219         fd_offset: u64,
220         prot: Protection,
221     ) -> Result<()>;
222 
223     /// Remove `size`-byte mapping starting at `offset`.
remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>224     fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>;
225 
226     /// Events from virtio-balloon that affect the state for guest memory and host memory.
handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()>227     fn handle_balloon_event(&mut self, event: BalloonEvent) -> Result<()>;
228 }
229 
230 /// Operation for Io and Mmio
231 #[derive(Copy, Clone, Debug)]
232 pub enum IoOperation {
233     Read,
234     Write {
235         /// Data to be written.
236         ///
237         /// For 64 bit architecture, Mmio and Io only work with at most 8 bytes of data.
238         data: [u8; 8],
239     },
240 }
241 
242 /// Parameters describing an MMIO or PIO from the guest.
243 #[derive(Copy, Clone, Debug)]
244 pub struct IoParams {
245     pub address: u64,
246     pub size: usize,
247     pub operation: IoOperation,
248 }
249 
250 /// Handle to a virtual CPU that may be used to request a VM exit from within a signal handler.
251 #[cfg(any(target_os = "android", target_os = "linux"))]
252 pub struct VcpuSignalHandle {
253     inner: Box<dyn VcpuSignalHandleInner>,
254 }
255 
256 #[cfg(any(target_os = "android", target_os = "linux"))]
257 impl VcpuSignalHandle {
258     /// Request an immediate exit for this VCPU.
259     ///
260     /// This function is safe to call from a signal handler.
signal_immediate_exit(&self)261     pub fn signal_immediate_exit(&self) {
262         self.inner.signal_immediate_exit()
263     }
264 }
265 
266 /// Signal-safe mechanism for requesting an immediate VCPU exit.
267 ///
268 /// Each hypervisor backend must implement this for its VCPU type.
269 #[cfg(any(target_os = "android", target_os = "linux"))]
270 pub(crate) trait VcpuSignalHandleInner {
271     /// Signal the associated VCPU to exit if it is currently running.
272     ///
273     /// # Safety
274     ///
275     /// The implementation of this function must be async signal safe.
276     /// <https://man7.org/linux/man-pages/man7/signal-safety.7.html>
signal_immediate_exit(&self)277     fn signal_immediate_exit(&self);
278 }
279 
280 /// A virtual CPU holding a virtualized hardware thread's state, such as registers and interrupt
281 /// state, which may be used to execute virtual machines.
282 pub trait Vcpu: downcast_rs::DowncastSync {
283     /// Makes a shallow clone of this `Vcpu`.
try_clone(&self) -> Result<Self> where Self: Sized284     fn try_clone(&self) -> Result<Self>
285     where
286         Self: Sized;
287 
288     /// Casts this architecture specific trait object to the base trait object `Vcpu`.
as_vcpu(&self) -> &dyn Vcpu289     fn as_vcpu(&self) -> &dyn Vcpu;
290 
291     /// Runs the VCPU until it exits, returning the reason for the exit.
run(&mut self) -> Result<VcpuExit>292     fn run(&mut self) -> Result<VcpuExit>;
293 
294     /// Returns the vcpu id.
id(&self) -> usize295     fn id(&self) -> usize;
296 
297     /// Sets the bit that requests an immediate exit.
set_immediate_exit(&self, exit: bool)298     fn set_immediate_exit(&self, exit: bool);
299 
300     /// Returns a handle that can be used to cause this VCPU to exit from `run()` from a signal
301     /// handler.
302     #[cfg(any(target_os = "android", target_os = "linux"))]
signal_handle(&self) -> VcpuSignalHandle303     fn signal_handle(&self) -> VcpuSignalHandle;
304 
305     /// Handles an incoming MMIO request from the guest.
306     ///
307     /// This function should be called after `Vcpu::run` returns `VcpuExit::Mmio`, and in the same
308     /// thread as run().
309     ///
310     /// Once called, it will determine whether a MMIO read or MMIO write was the reason for the MMIO
311     /// exit, call `handle_fn` with the respective IoParams to perform the MMIO read or write, and
312     /// set the return data in the vcpu so that the vcpu can resume running.
handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>313     fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>;
314 
315     /// Handles an incoming PIO from the guest.
316     ///
317     /// This function should be called after `Vcpu::run` returns `VcpuExit::Io`, and in the same
318     /// thread as run().
319     ///
320     /// Once called, it will determine whether an input or output was the reason for the Io exit,
321     /// call `handle_fn` with the respective IoParams to perform the input/output operation, and set
322     /// the return data in the vcpu so that the vcpu can resume running.
handle_io(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>323     fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>;
324 
325     /// Handles the HYPERV_HYPERCALL exit from a vcpu.
326     ///
327     /// This function should be called after `Vcpu::run` returns `VcpuExit::HypervHcall`, and in the
328     /// same thread as run.
329     ///
330     /// Once called, it will parse the appropriate input parameters to the provided function to
331     /// handle the hyperv call, and then set the return data into the vcpu so it can resume running.
handle_hyperv_hypercall(&self, func: &mut dyn FnMut(HypervHypercall) -> u64) -> Result<()>332     fn handle_hyperv_hypercall(&self, func: &mut dyn FnMut(HypervHypercall) -> u64) -> Result<()>;
333 
334     /// Handles a RDMSR exit from the guest.
335     ///
336     /// This function should be called after `Vcpu::run` returns `VcpuExit::RdMsr`,
337     /// and in the same thread as run.
338     ///
339     /// It will put `data` into the guest buffer and return.
handle_rdmsr(&self, data: u64) -> Result<()>340     fn handle_rdmsr(&self, data: u64) -> Result<()>;
341 
342     /// Handles a WRMSR exit from the guest by removing any error indication for the operation.
343     ///
344     /// This function should be called after `Vcpu::run` returns `VcpuExit::WrMsr`,
345     /// and in the same thread as run.
handle_wrmsr(&self)346     fn handle_wrmsr(&self);
347 
348     /// Signals to the hypervisor that this Vcpu is being paused by userspace.
on_suspend(&self) -> Result<()>349     fn on_suspend(&self) -> Result<()>;
350 
351     /// Enables a hypervisor-specific extension on this Vcpu.  `cap` is a constant defined by the
352     /// hypervisor API (e.g., kvm.h).  `args` are the arguments for enabling the feature, if any.
353     ///
354     /// # Safety
355     /// This function is marked as unsafe because `args` may be interpreted as pointers for some
356     /// capabilities. The caller must ensure that any pointers passed in the `args` array are
357     /// allocated as the kernel expects, and that mutable pointers are owned.
enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()>358     unsafe fn enable_raw_capability(&self, cap: u32, args: &[u64; 4]) -> Result<()>;
359 }
360 
361 downcast_rs::impl_downcast!(sync Vcpu);
362 
363 /// An address either in programmable I/O space or in memory mapped I/O space.
364 #[derive(Copy, Clone, Debug, Serialize, Deserialize, PartialEq, Eq, std::hash::Hash)]
365 pub enum IoEventAddress {
366     Pio(u64),
367     Mmio(u64),
368 }
369 
370 /// Used in `Vm::register_ioevent` to indicate a size and optionally value to match.
371 #[derive(Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
372 pub enum Datamatch {
373     AnyLength,
374     U8(Option<u8>),
375     U16(Option<u16>),
376     U32(Option<u32>),
377     U64(Option<u64>),
378 }
379 
380 /// A reason why a VCPU exited. One of these returns every time `Vcpu::run` is called.
381 #[derive(Debug, Clone, Copy)]
382 pub enum VcpuExit {
383     /// An io instruction needs to be emulated.
384     /// vcpu handle_io should be called to handle the io operation
385     Io,
386     /// A mmio instruction needs to be emulated.
387     /// vcpu handle_mmio should be called to handle the mmio operation
388     Mmio,
389     IoapicEoi {
390         vector: u8,
391     },
392     HypervHypercall,
393     Unknown,
394     Exception,
395     Hypercall,
396     Debug,
397     Hlt,
398     IrqWindowOpen,
399     Shutdown,
400     FailEntry {
401         hardware_entry_failure_reason: u64,
402     },
403     Intr,
404     SetTpr,
405     TprAccess,
406     S390Sieic,
407     S390Reset,
408     Dcr,
409     Nmi,
410     InternalError,
411     Osi,
412     PaprHcall,
413     S390Ucontrol,
414     Watchdog,
415     S390Tsch,
416     Epr,
417     SystemEventShutdown,
418     SystemEventReset,
419     SystemEventCrash,
420     RdMsr {
421         index: u32,
422     },
423     WrMsr {
424         index: u32,
425         data: u64,
426     },
427     /// An invalid vcpu register was set while running.
428     InvalidVpRegister,
429     /// incorrect setup for vcpu requiring an unsupported feature
430     UnsupportedFeature,
431     /// vcpu run was user cancelled
432     Canceled,
433     /// an unrecoverable exception was encountered (different from Exception)
434     UnrecoverableException,
435     /// vcpu stopped due to an msr access.
436     MsrAccess,
437     /// vcpu stopped due to a cpuid request.
438     #[cfg(target_arch = "x86_64")]
439     Cpuid {
440         entry: CpuIdEntry,
441     },
442     /// vcpu stopped due to calling rdtsc
443     RdTsc,
444     /// vcpu stopped for an apic smi trap
445     ApicSmiTrap,
446     /// vcpu stopped due to an apic trap
447     ApicInitSipiTrap,
448     /// vcpu stoppted due to bus lock
449     BusLock,
450     /// Riscv supervisor call.
451     Sbi {
452         extension_id: u64,
453         function_id: u64,
454         args: [u64; 6],
455     },
456     /// Emulate CSR access from guest.
457     RiscvCsr {
458         csr_num: u64,
459         new_value: u64,
460         write_mask: u64,
461         ret_value: u64,
462     },
463 }
464 
465 /// A hypercall with parameters being made from the guest.
466 #[derive(Debug)]
467 pub enum HypervHypercall {
468     HypervSynic {
469         msr: u32,
470         control: u64,
471         evt_page: u64,
472         msg_page: u64,
473     },
474     HypervHcall {
475         input: u64,
476         params: [u64; 2],
477     },
478 }
479 
480 /// A device type to create with `Vm.create_device`.
481 #[derive(Clone, Copy, Debug, PartialEq, Eq)]
482 pub enum DeviceKind {
483     /// VFIO device for direct access to devices from userspace
484     Vfio,
485     /// ARM virtual general interrupt controller v2
486     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
487     ArmVgicV2,
488     /// ARM virtual general interrupt controller v3
489     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
490     ArmVgicV3,
491     /// RiscV AIA in-kernel emulation
492     #[cfg(target_arch = "riscv64")]
493     RiscvAia,
494 }
495 
496 /// The source chip of an `IrqSource`
497 #[repr(C)]
498 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
499 pub enum IrqSourceChip {
500     PicPrimary,
501     PicSecondary,
502     Ioapic,
503     Gic,
504     Aia,
505 }
506 
507 /// A source of IRQs in an `IrqRoute`.
508 #[repr(C)]
509 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
510 pub enum IrqSource {
511     Irqchip { chip: IrqSourceChip, pin: u32 },
512     Msi { address: u64, data: u32 },
513 }
514 
515 /// A single route for an IRQ.
516 #[repr(C)]
517 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
518 pub struct IrqRoute {
519     pub gsi: u32,
520     pub source: IrqSource,
521 }
522 
523 /// The state of the paravirtual clock.
524 #[derive(Debug, Default, Copy, Clone, Serialize, Deserialize)]
525 pub struct ClockState {
526     /// Current pv clock timestamp, as seen by the guest
527     pub clock: u64,
528 }
529 
530 /// The MPState represents the state of a processor.
531 #[repr(C)]
532 #[derive(Clone, Copy, Debug, PartialEq, Eq, Serialize, Deserialize)]
533 pub enum MPState {
534     /// the vcpu is currently running (x86/x86_64,arm/arm64)
535     Runnable,
536     /// the vcpu is an application processor (AP) which has not yet received an INIT signal
537     /// (x86/x86_64)
538     Uninitialized,
539     /// the vcpu has received an INIT signal, and is now ready for a SIPI (x86/x86_64)
540     InitReceived,
541     /// the vcpu has executed a HLT instruction and is waiting for an interrupt (x86/x86_64)
542     Halted,
543     /// the vcpu has just received a SIPI (vector accessible via KVM_GET_VCPU_EVENTS) (x86/x86_64)
544     SipiReceived,
545     /// the vcpu is stopped (arm/arm64)
546     Stopped,
547 }
548 
549 /// Whether the VM should be run in protected mode or not.
550 #[derive(Copy, Clone, Debug, Deserialize, Eq, PartialEq, Serialize)]
551 pub enum ProtectionType {
552     /// The VM should be run in the unprotected mode, where the host has access to its memory.
553     Unprotected,
554     /// The VM should be run in protected mode, so the host cannot access its memory directly. It
555     /// should be booted via the protected VM firmware, so that it can access its secrets.
556     Protected,
557     /// The VM should be run in protected mode, so the host cannot access its memory directly. It
558     /// should be booted via a custom VM firmware, useful for debugging and testing.
559     ProtectedWithCustomFirmware,
560     /// The VM should be run in protected mode, but booted directly without pVM firmware. The host
561     /// will still be unable to access the VM memory, but it won't be given any secrets.
562     ProtectedWithoutFirmware,
563     /// The VM should be run in unprotected mode, but with the same memory layout as protected
564     /// mode, protected VM firmware loaded, and simulating protected mode as much as possible.
565     /// This is useful for debugging the protected VM firmware and other protected mode issues.
566     UnprotectedWithFirmware,
567 }
568 
569 impl ProtectionType {
570     /// Returns whether the hypervisor will prevent us from accessing the VM's memory.
isolates_memory(&self) -> bool571     pub fn isolates_memory(&self) -> bool {
572         matches!(
573             self,
574             Self::Protected | Self::ProtectedWithCustomFirmware | Self::ProtectedWithoutFirmware
575         )
576     }
577 
578     /// Returns whether the VMM needs to load the pVM firmware.
loads_firmware(&self) -> bool579     pub fn loads_firmware(&self) -> bool {
580         matches!(
581             self,
582             Self::UnprotectedWithFirmware | Self::ProtectedWithCustomFirmware
583         )
584     }
585 
586     /// Returns whether the VM runs a pVM firmware.
runs_firmware(&self) -> bool587     pub fn runs_firmware(&self) -> bool {
588         self.loads_firmware() || matches!(self, Self::Protected)
589     }
590 }
591 
592 #[derive(Clone, Copy)]
593 pub struct Config {
594     #[cfg(target_arch = "aarch64")]
595     /// enable the Memory Tagging Extension in the guest
596     pub mte: bool,
597     pub protection_type: ProtectionType,
598 }
599 
600 impl Default for Config {
default() -> Config601     fn default() -> Config {
602         Config {
603             #[cfg(target_arch = "aarch64")]
604             mte: false,
605             protection_type: ProtectionType::Unprotected,
606         }
607     }
608 }
609