1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::arch::x86_64::CpuidResult;
6 use std::collections::BTreeMap;
7
8 use base::errno_result;
9 use base::error;
10 use base::ioctl;
11 use base::ioctl_with_mut_ptr;
12 use base::ioctl_with_mut_ref;
13 use base::ioctl_with_ptr;
14 use base::ioctl_with_ref;
15 use base::ioctl_with_val;
16 use base::AsRawDescriptor;
17 use base::Error;
18 use base::IoctlNr;
19 use base::MappedRegion;
20 use base::Result;
21 use data_model::vec_with_array_field;
22 use data_model::FlexibleArrayWrapper;
23 use kvm_sys::*;
24 use libc::E2BIG;
25 use libc::EAGAIN;
26 use libc::EINVAL;
27 use libc::EIO;
28 use libc::ENOMEM;
29 use libc::ENXIO;
30 use serde::Deserialize;
31 use serde::Serialize;
32 use snapshot::AnySnapshot;
33 use vm_memory::GuestAddress;
34
35 use super::Config;
36 use super::Kvm;
37 use super::KvmCap;
38 use super::KvmVcpu;
39 use super::KvmVm;
40 use crate::host_phys_addr_bits;
41 use crate::ClockState;
42 use crate::CpuId;
43 use crate::CpuIdEntry;
44 use crate::DebugRegs;
45 use crate::DescriptorTable;
46 use crate::DeviceKind;
47 use crate::Fpu;
48 use crate::FpuReg;
49 use crate::HypervisorX86_64;
50 use crate::IoapicRedirectionTableEntry;
51 use crate::IoapicState;
52 use crate::IrqSourceChip;
53 use crate::LapicState;
54 use crate::PicSelect;
55 use crate::PicState;
56 use crate::PitChannelState;
57 use crate::PitState;
58 use crate::ProtectionType;
59 use crate::Regs;
60 use crate::Segment;
61 use crate::Sregs;
62 use crate::VcpuExit;
63 use crate::VcpuX86_64;
64 use crate::VmCap;
65 use crate::VmX86_64;
66 use crate::Xsave;
67 use crate::NUM_IOAPIC_PINS;
68
69 type KvmCpuId = FlexibleArrayWrapper<kvm_cpuid2, kvm_cpuid_entry2>;
70 const KVM_XSAVE_MAX_SIZE: usize = 4096;
71 const MSR_IA32_APICBASE: u32 = 0x0000001b;
72
73 #[derive(Debug, Clone, Serialize, Deserialize)]
74 pub struct VcpuEvents {
75 pub exception: VcpuExceptionState,
76 pub interrupt: VcpuInterruptState,
77 pub nmi: VcpuNmiState,
78 pub sipi_vector: Option<u32>,
79 pub smi: VcpuSmiState,
80 pub triple_fault: VcpuTripleFaultState,
81 pub exception_payload: Option<u64>,
82 }
83
84 #[derive(Debug, Clone, Serialize, Deserialize)]
85 pub struct VcpuExceptionState {
86 pub injected: bool,
87 pub nr: u8,
88 pub has_error_code: bool,
89 pub pending: Option<bool>,
90 pub error_code: u32,
91 }
92
93 #[derive(Debug, Clone, Serialize, Deserialize)]
94 pub struct VcpuInterruptState {
95 pub injected: bool,
96 pub nr: u8,
97 pub soft: bool,
98 pub shadow: Option<u8>,
99 }
100
101 #[derive(Debug, Clone, Serialize, Deserialize)]
102 pub struct VcpuNmiState {
103 pub injected: bool,
104 pub pending: Option<bool>,
105 pub masked: bool,
106 }
107
108 #[derive(Debug, Clone, Serialize, Deserialize)]
109 pub struct VcpuSmiState {
110 pub smm: Option<bool>,
111 pub pending: bool,
112 pub smm_inside_nmi: bool,
113 pub latched_init: u8,
114 }
115
116 #[derive(Debug, Clone, Serialize, Deserialize)]
117 pub struct VcpuTripleFaultState {
118 pub pending: Option<bool>,
119 }
120
get_cpuid_with_initial_capacity<T: AsRawDescriptor>( descriptor: &T, kind: IoctlNr, initial_capacity: usize, ) -> Result<CpuId>121 pub fn get_cpuid_with_initial_capacity<T: AsRawDescriptor>(
122 descriptor: &T,
123 kind: IoctlNr,
124 initial_capacity: usize,
125 ) -> Result<CpuId> {
126 let mut entries: usize = initial_capacity;
127
128 loop {
129 let mut kvm_cpuid = KvmCpuId::new(entries);
130
131 let ret = {
132 // SAFETY:
133 // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the
134 // memory allocated for the struct. The limit is read from nent within KvmCpuId,
135 // which is set to the allocated size above.
136 unsafe { ioctl_with_mut_ptr(descriptor, kind, kvm_cpuid.as_mut_ptr()) }
137 };
138 if ret < 0 {
139 let err = Error::last();
140 match err.errno() {
141 E2BIG => {
142 // double the available memory for cpuid entries for kvm.
143 if let Some(val) = entries.checked_mul(2) {
144 entries = val;
145 } else {
146 return Err(err);
147 }
148 }
149 _ => return Err(err),
150 }
151 } else {
152 return Ok(CpuId::from(&kvm_cpuid));
153 }
154 }
155 }
156
157 impl Kvm {
get_cpuid(&self, kind: IoctlNr) -> Result<CpuId>158 pub fn get_cpuid(&self, kind: IoctlNr) -> Result<CpuId> {
159 const KVM_MAX_ENTRIES: usize = 256;
160 get_cpuid_with_initial_capacity(self, kind, KVM_MAX_ENTRIES)
161 }
162
get_vm_type(&self, protection_type: ProtectionType) -> Result<u32>163 pub fn get_vm_type(&self, protection_type: ProtectionType) -> Result<u32> {
164 if protection_type.isolates_memory() {
165 Ok(KVM_X86_PKVM_PROTECTED_VM)
166 } else {
167 Ok(KVM_X86_DEFAULT_VM)
168 }
169 }
170
171 /// Get the size of guest physical addresses in bits.
get_guest_phys_addr_bits(&self) -> u8172 pub fn get_guest_phys_addr_bits(&self) -> u8 {
173 // Assume the guest physical address size is the same as the host.
174 host_phys_addr_bits()
175 }
176 }
177
178 impl HypervisorX86_64 for Kvm {
get_supported_cpuid(&self) -> Result<CpuId>179 fn get_supported_cpuid(&self) -> Result<CpuId> {
180 self.get_cpuid(KVM_GET_SUPPORTED_CPUID)
181 }
182
get_msr_index_list(&self) -> Result<Vec<u32>>183 fn get_msr_index_list(&self) -> Result<Vec<u32>> {
184 const MAX_KVM_MSR_ENTRIES: usize = 256;
185
186 let mut msr_list = vec_with_array_field::<kvm_msr_list, u32>(MAX_KVM_MSR_ENTRIES);
187 msr_list[0].nmsrs = MAX_KVM_MSR_ENTRIES as u32;
188
189 let ret = {
190 // SAFETY:
191 // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory
192 // allocated for the struct. The limit is read from nmsrs, which is set to the allocated
193 // size (MAX_KVM_MSR_ENTRIES) above.
194 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST, &mut msr_list[0]) }
195 };
196 if ret < 0 {
197 return errno_result();
198 }
199
200 let mut nmsrs = msr_list[0].nmsrs;
201
202 // SAFETY:
203 // Mapping the unsized array to a slice is unsafe because the length isn't known. Using
204 // the length we originally allocated with eliminates the possibility of overflow.
205 let indices: &[u32] = unsafe {
206 if nmsrs > MAX_KVM_MSR_ENTRIES as u32 {
207 nmsrs = MAX_KVM_MSR_ENTRIES as u32;
208 }
209 msr_list[0].indices.as_slice(nmsrs as usize)
210 };
211
212 Ok(indices.to_vec())
213 }
214 }
215
216 impl KvmVm {
217 /// Does platform specific initialization for the KvmVm.
init_arch(&self, _cfg: &Config) -> Result<()>218 pub fn init_arch(&self, _cfg: &Config) -> Result<()> {
219 Ok(())
220 }
221
222 /// Whether running under pKVM.
is_pkvm(&self) -> bool223 pub fn is_pkvm(&self) -> bool {
224 false
225 }
226
227 /// Checks if a particular `VmCap` is available, or returns None if arch-independent
228 /// Vm.check_capability() should handle the check.
check_capability_arch(&self, c: VmCap) -> Option<bool>229 pub fn check_capability_arch(&self, c: VmCap) -> Option<bool> {
230 match c {
231 VmCap::PvClock => Some(true),
232 _ => None,
233 }
234 }
235
236 /// Returns the params to pass to KVM_CREATE_DEVICE for a `kind` device on this arch, or None to
237 /// let the arch-independent `KvmVm::create_device` handle it.
get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device>238 pub fn get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device> {
239 None
240 }
241
242 /// Arch-specific implementation of `Vm::get_pvclock`.
get_pvclock_arch(&self) -> Result<ClockState>243 pub fn get_pvclock_arch(&self) -> Result<ClockState> {
244 let mut clock_data: kvm_clock_data = Default::default();
245 let ret =
246 // SAFETY:
247 // Safe because we know that our file is a VM fd, we know the kernel will only write correct
248 // amount of memory to our pointer, and we verify the return result.
249 unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK, &mut clock_data) };
250 if ret == 0 {
251 Ok(ClockState::from(&clock_data))
252 } else {
253 errno_result()
254 }
255 }
256
257 /// Arch-specific implementation of `Vm::set_pvclock`.
set_pvclock_arch(&self, state: &ClockState) -> Result<()>258 pub fn set_pvclock_arch(&self, state: &ClockState) -> Result<()> {
259 let clock_data = kvm_clock_data::from(state);
260 // SAFETY:
261 // Safe because we know that our file is a VM fd, we know the kernel will only read correct
262 // amount of memory from our pointer, and we verify the return result.
263 let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK, &clock_data) };
264 if ret == 0 {
265 Ok(())
266 } else {
267 errno_result()
268 }
269 }
270
271 /// Retrieves the state of given interrupt controller by issuing KVM_GET_IRQCHIP ioctl.
272 ///
273 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state>274 pub fn get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state> {
275 let mut irqchip_state = kvm_irqchip {
276 chip_id: id as u32,
277 ..Default::default()
278 };
279 let ret = {
280 // SAFETY:
281 // Safe because we know our file is a VM fd, we know the kernel will only write
282 // correct amount of memory to our pointer, and we verify the return result.
283 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
284 };
285 if ret == 0 {
286 Ok(
287 // SAFETY:
288 // Safe as we know that we are retrieving data related to the
289 // PIC (primary or secondary) and not IOAPIC.
290 unsafe { irqchip_state.chip.pic },
291 )
292 } else {
293 errno_result()
294 }
295 }
296
297 /// Sets the state of given interrupt controller by issuing KVM_SET_IRQCHIP ioctl.
298 ///
299 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()>300 pub fn set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()> {
301 let mut irqchip_state = kvm_irqchip {
302 chip_id: id as u32,
303 ..Default::default()
304 };
305 irqchip_state.chip.pic = *state;
306 // SAFETY:
307 // Safe because we know that our file is a VM fd, we know the kernel will only read
308 // correct amount of memory from our pointer, and we verify the return result.
309 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
310 if ret == 0 {
311 Ok(())
312 } else {
313 errno_result()
314 }
315 }
316
317 /// Retrieves the number of pins for emulated IO-APIC.
get_ioapic_num_pins(&self) -> Result<usize>318 pub fn get_ioapic_num_pins(&self) -> Result<usize> {
319 Ok(NUM_IOAPIC_PINS)
320 }
321
322 /// Retrieves the state of IOAPIC by issuing KVM_GET_IRQCHIP ioctl.
323 ///
324 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
get_ioapic_state(&self) -> Result<kvm_ioapic_state>325 pub fn get_ioapic_state(&self) -> Result<kvm_ioapic_state> {
326 let mut irqchip_state = kvm_irqchip {
327 chip_id: 2,
328 ..Default::default()
329 };
330 let ret = {
331 // SAFETY:
332 // Safe because we know our file is a VM fd, we know the kernel will only write
333 // correct amount of memory to our pointer, and we verify the return result.
334 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP, &mut irqchip_state) }
335 };
336 if ret == 0 {
337 Ok(
338 // SAFETY:
339 // Safe as we know that we are retrieving data related to the
340 // IOAPIC and not PIC.
341 unsafe { irqchip_state.chip.ioapic },
342 )
343 } else {
344 errno_result()
345 }
346 }
347
348 /// Sets the state of IOAPIC by issuing KVM_SET_IRQCHIP ioctl.
349 ///
350 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()>351 pub fn set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()> {
352 let mut irqchip_state = kvm_irqchip {
353 chip_id: 2,
354 ..Default::default()
355 };
356 irqchip_state.chip.ioapic = *state;
357 // SAFETY:
358 // Safe because we know that our file is a VM fd, we know the kernel will only read
359 // correct amount of memory from our pointer, and we verify the return result.
360 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP, &irqchip_state) };
361 if ret == 0 {
362 Ok(())
363 } else {
364 errno_result()
365 }
366 }
367
368 /// Creates a PIT as per the KVM_CREATE_PIT2 ioctl.
369 ///
370 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
create_pit(&self) -> Result<()>371 pub fn create_pit(&self) -> Result<()> {
372 let pit_config = kvm_pit_config::default();
373 // SAFETY:
374 // Safe because we know that our file is a VM fd, we know the kernel will only read the
375 // correct amount of memory from our pointer, and we verify the return result.
376 let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2, &pit_config) };
377 if ret == 0 {
378 Ok(())
379 } else {
380 errno_result()
381 }
382 }
383
384 /// Retrieves the state of PIT by issuing KVM_GET_PIT2 ioctl.
385 ///
386 /// Note that this call can only succeed after a call to `Vm::create_pit`.
get_pit_state(&self) -> Result<kvm_pit_state2>387 pub fn get_pit_state(&self) -> Result<kvm_pit_state2> {
388 let mut pit_state = Default::default();
389 // SAFETY:
390 // Safe because we know that our file is a VM fd, we know the kernel will only write
391 // correct amount of memory to our pointer, and we verify the return result.
392 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2, &mut pit_state) };
393 if ret == 0 {
394 Ok(pit_state)
395 } else {
396 errno_result()
397 }
398 }
399
400 /// Sets the state of PIT by issuing KVM_SET_PIT2 ioctl.
401 ///
402 /// Note that this call can only succeed after a call to `Vm::create_pit`.
set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()>403 pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> {
404 // SAFETY:
405 // Safe because we know that our file is a VM fd, we know the kernel will only read
406 // correct amount of memory from our pointer, and we verify the return result.
407 let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2, pit_state) };
408 if ret == 0 {
409 Ok(())
410 } else {
411 errno_result()
412 }
413 }
414
415 /// Set MSR_PLATFORM_INFO read access.
set_platform_info_read_access(&self, allow_read: bool) -> Result<()>416 pub fn set_platform_info_read_access(&self, allow_read: bool) -> Result<()> {
417 let mut cap = kvm_enable_cap {
418 cap: KVM_CAP_MSR_PLATFORM_INFO,
419 ..Default::default()
420 };
421 cap.args[0] = allow_read as u64;
422
423 // SAFETY:
424 // Safe because we know that our file is a VM fd, we know that the
425 // kernel will only read correct amount of memory from our pointer, and
426 // we verify the return result.
427 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
428 if ret < 0 {
429 errno_result()
430 } else {
431 Ok(())
432 }
433 }
434
435 /// Enable support for split-irqchip.
enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()>436 pub fn enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()> {
437 let mut cap = kvm_enable_cap {
438 cap: KVM_CAP_SPLIT_IRQCHIP,
439 ..Default::default()
440 };
441 cap.args[0] = ioapic_pins as u64;
442 // SAFETY:
443 // safe becuase we allocated the struct and we know the kernel will read
444 // exactly the size of the struct
445 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP, &cap) };
446 if ret < 0 {
447 errno_result()
448 } else {
449 Ok(())
450 }
451 }
452
453 /// Get pKVM hypervisor details, e.g. the firmware size.
454 ///
455 /// Returns `Err` if not running under pKVM.
456 ///
457 /// Uses `KVM_ENABLE_CAP` internally, but it is only a getter, there should be no side effects
458 /// in KVM.
get_protected_vm_info(&self) -> Result<KvmProtectedVmInfo>459 fn get_protected_vm_info(&self) -> Result<KvmProtectedVmInfo> {
460 let mut info = KvmProtectedVmInfo {
461 firmware_size: 0,
462 reserved: [0; 7],
463 };
464 // SAFETY:
465 // Safe because we allocated the struct and we know the kernel won't write beyond the end of
466 // the struct or keep a pointer to it.
467 unsafe {
468 self.enable_raw_capability(
469 KvmCap::X86ProtectedVm,
470 KVM_CAP_X86_PROTECTED_VM_FLAGS_INFO,
471 &[&mut info as *mut KvmProtectedVmInfo as u64, 0, 0, 0],
472 )
473 }?;
474 Ok(info)
475 }
476
set_protected_vm_firmware_gpa(&self, fw_addr: GuestAddress) -> Result<()>477 fn set_protected_vm_firmware_gpa(&self, fw_addr: GuestAddress) -> Result<()> {
478 // SAFETY:
479 // Safe because none of the args are pointers.
480 unsafe {
481 self.enable_raw_capability(
482 KvmCap::X86ProtectedVm,
483 KVM_CAP_X86_PROTECTED_VM_FLAGS_SET_FW_GPA,
484 &[fw_addr.0, 0, 0, 0],
485 )
486 }
487 }
488 }
489
490 #[repr(C)]
491 struct KvmProtectedVmInfo {
492 firmware_size: u64,
493 reserved: [u64; 7],
494 }
495
496 impl VmX86_64 for KvmVm {
get_hypervisor(&self) -> &dyn HypervisorX86_64497 fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
498 &self.kvm
499 }
500
load_protected_vm_firmware( &mut self, fw_addr: GuestAddress, fw_max_size: u64, ) -> Result<()>501 fn load_protected_vm_firmware(
502 &mut self,
503 fw_addr: GuestAddress,
504 fw_max_size: u64,
505 ) -> Result<()> {
506 let info = self.get_protected_vm_info()?;
507 if info.firmware_size == 0 {
508 Err(Error::new(EINVAL))
509 } else {
510 if info.firmware_size > fw_max_size {
511 return Err(Error::new(ENOMEM));
512 }
513 self.set_protected_vm_firmware_gpa(fw_addr)
514 }
515 }
516
create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>>517 fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
518 // create_vcpu is declared separately in VmAArch64 and VmX86, so it can return VcpuAArch64
519 // or VcpuX86. But both use the same implementation in KvmVm::create_vcpu.
520 Ok(Box::new(KvmVm::create_kvm_vcpu(self, id)?))
521 }
522
523 /// Sets the address of the three-page region in the VM's address space.
524 ///
525 /// See the documentation on the KVM_SET_TSS_ADDR ioctl.
set_tss_addr(&self, addr: GuestAddress) -> Result<()>526 fn set_tss_addr(&self, addr: GuestAddress) -> Result<()> {
527 // SAFETY:
528 // Safe because we know that our file is a VM fd and we verify the return result.
529 let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR, addr.offset()) };
530 if ret == 0 {
531 Ok(())
532 } else {
533 errno_result()
534 }
535 }
536
537 /// Sets the address of a one-page region in the VM's address space.
538 ///
539 /// See the documentation on the KVM_SET_IDENTITY_MAP_ADDR ioctl.
set_identity_map_addr(&self, addr: GuestAddress) -> Result<()>540 fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> {
541 // SAFETY:
542 // Safe because we know that our file is a VM fd and we verify the return result.
543 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR, &addr.offset()) };
544 if ret == 0 {
545 Ok(())
546 } else {
547 errno_result()
548 }
549 }
550 }
551
552 impl KvmVcpu {
553 /// Handles a `KVM_EXIT_SYSTEM_EVENT` with event type `KVM_SYSTEM_EVENT_RESET` with the given
554 /// event flags and returns the appropriate `VcpuExit` value for the run loop to handle.
system_event_reset(&self, _event_flags: u64) -> Result<VcpuExit>555 pub fn system_event_reset(&self, _event_flags: u64) -> Result<VcpuExit> {
556 Ok(VcpuExit::SystemEventReset)
557 }
558
559 /// Gets the Xsave size by checking the extension KVM_CAP_XSAVE2.
560 ///
561 /// Size should always be >=0. If size is negative, an error occurred.
562 /// If size <= 4096, XSAVE2 is not supported by the CPU or the kernel. KVM_XSAVE_MAX_SIZE is
563 /// returned (4096).
564 /// Otherwise, the size will be returned.
xsave_size(&self) -> Result<usize>565 fn xsave_size(&self) -> Result<usize> {
566 let size = {
567 // SAFETY:
568 // Safe because we know that our file is a valid VM fd
569 unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION, KVM_CAP_XSAVE2 as u64) }
570 };
571 if size < 0 {
572 return errno_result();
573 }
574 // Safe to unwrap since we already tested for negative values
575 let size: usize = size.try_into().unwrap();
576 Ok(size.max(KVM_XSAVE_MAX_SIZE))
577 }
578
579 #[inline]
handle_vm_exit_arch(&self, run: &mut kvm_run) -> Option<VcpuExit>580 pub(crate) fn handle_vm_exit_arch(&self, run: &mut kvm_run) -> Option<VcpuExit> {
581 match run.exit_reason {
582 KVM_EXIT_IO => Some(VcpuExit::Io),
583 KVM_EXIT_IOAPIC_EOI => {
584 // SAFETY:
585 // Safe because the exit_reason (which comes from the kernel) told us which
586 // union field to use.
587 let vector = unsafe { run.__bindgen_anon_1.eoi.vector };
588 Some(VcpuExit::IoapicEoi { vector })
589 }
590 KVM_EXIT_HLT => Some(VcpuExit::Hlt),
591 KVM_EXIT_SET_TPR => Some(VcpuExit::SetTpr),
592 KVM_EXIT_TPR_ACCESS => Some(VcpuExit::TprAccess),
593 KVM_EXIT_X86_BUS_LOCK => Some(VcpuExit::BusLock),
594 _ => None,
595 }
596 }
597 }
598
599 impl VcpuX86_64 for KvmVcpu {
600 #[allow(clippy::cast_ptr_alignment)]
set_interrupt_window_requested(&self, requested: bool)601 fn set_interrupt_window_requested(&self, requested: bool) {
602 // SAFETY:
603 // Safe because we know we mapped enough memory to hold the kvm_run struct because the
604 // kernel told us how large it was. The pointer is page aligned so casting to a different
605 // type is well defined, hence the clippy allow attribute.
606 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
607 run.request_interrupt_window = requested.into();
608 }
609
610 #[allow(clippy::cast_ptr_alignment)]
ready_for_interrupt(&self) -> bool611 fn ready_for_interrupt(&self) -> bool {
612 // SAFETY:
613 // Safe because we know we mapped enough memory to hold the kvm_run struct because the
614 // kernel told us how large it was. The pointer is page aligned so casting to a different
615 // type is well defined, hence the clippy allow attribute.
616 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
617 run.ready_for_interrupt_injection != 0 && run.if_flag != 0
618 }
619
620 /// Use the KVM_INTERRUPT ioctl to inject the specified interrupt vector.
621 ///
622 /// While this ioctl exists on PPC and MIPS as well as x86, the semantics are different and
623 /// ChromeOS doesn't support PPC or MIPS.
interrupt(&self, irq: u8) -> Result<()>624 fn interrupt(&self, irq: u8) -> Result<()> {
625 if !self.ready_for_interrupt() {
626 return Err(Error::new(EAGAIN));
627 }
628
629 let interrupt = kvm_interrupt { irq: irq.into() };
630 // SAFETY:
631 // safe becuase we allocated the struct and we know the kernel will read
632 // exactly the size of the struct
633 let ret = unsafe { ioctl_with_ref(self, KVM_INTERRUPT, &interrupt) };
634 if ret == 0 {
635 Ok(())
636 } else {
637 errno_result()
638 }
639 }
640
inject_nmi(&self) -> Result<()>641 fn inject_nmi(&self) -> Result<()> {
642 // SAFETY:
643 // Safe because we know that our file is a VCPU fd.
644 let ret = unsafe { ioctl(self, KVM_NMI) };
645 if ret == 0 {
646 Ok(())
647 } else {
648 errno_result()
649 }
650 }
651
get_regs(&self) -> Result<Regs>652 fn get_regs(&self) -> Result<Regs> {
653 let mut regs: kvm_regs = Default::default();
654 let ret = {
655 // SAFETY:
656 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
657 // the correct amount of memory from our pointer, and we verify the return
658 // result.
659 unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS, &mut regs) }
660 };
661 if ret == 0 {
662 Ok(Regs::from(®s))
663 } else {
664 errno_result()
665 }
666 }
667
set_regs(&self, regs: &Regs) -> Result<()>668 fn set_regs(&self, regs: &Regs) -> Result<()> {
669 let regs = kvm_regs::from(regs);
670 let ret = {
671 // SAFETY:
672 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
673 // the correct amount of memory from our pointer, and we verify the return
674 // result.
675 unsafe { ioctl_with_ref(self, KVM_SET_REGS, ®s) }
676 };
677 if ret == 0 {
678 Ok(())
679 } else {
680 errno_result()
681 }
682 }
683
get_sregs(&self) -> Result<Sregs>684 fn get_sregs(&self) -> Result<Sregs> {
685 let mut regs: kvm_sregs = Default::default();
686 let ret = {
687 // SAFETY:
688 // Safe because we know that our file is a VCPU fd, we know the kernel will only write
689 // the correct amount of memory to our pointer, and we verify the return
690 // result.
691 unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) }
692 };
693 if ret == 0 {
694 Ok(Sregs::from(®s))
695 } else {
696 errno_result()
697 }
698 }
699
set_sregs(&self, sregs: &Sregs) -> Result<()>700 fn set_sregs(&self, sregs: &Sregs) -> Result<()> {
701 // Get the current `kvm_sregs` so we can use its `apic_base` and `interrupt_bitmap`, which
702 // are not present in `Sregs`.
703 let mut kvm_sregs: kvm_sregs = Default::default();
704 // SAFETY:
705 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
706 // correct amount of memory to our pointer, and we verify the return result.
707 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut kvm_sregs) };
708 if ret != 0 {
709 return errno_result();
710 }
711
712 kvm_sregs.cs = kvm_segment::from(&sregs.cs);
713 kvm_sregs.ds = kvm_segment::from(&sregs.ds);
714 kvm_sregs.es = kvm_segment::from(&sregs.es);
715 kvm_sregs.fs = kvm_segment::from(&sregs.fs);
716 kvm_sregs.gs = kvm_segment::from(&sregs.gs);
717 kvm_sregs.ss = kvm_segment::from(&sregs.ss);
718 kvm_sregs.tr = kvm_segment::from(&sregs.tr);
719 kvm_sregs.ldt = kvm_segment::from(&sregs.ldt);
720 kvm_sregs.gdt = kvm_dtable::from(&sregs.gdt);
721 kvm_sregs.idt = kvm_dtable::from(&sregs.idt);
722 kvm_sregs.cr0 = sregs.cr0;
723 kvm_sregs.cr2 = sregs.cr2;
724 kvm_sregs.cr3 = sregs.cr3;
725 kvm_sregs.cr4 = sregs.cr4;
726 kvm_sregs.cr8 = sregs.cr8;
727 kvm_sregs.efer = sregs.efer;
728
729 // SAFETY:
730 // Safe because we know that our file is a VCPU fd, we know the kernel will only read the
731 // correct amount of memory from our pointer, and we verify the return result.
732 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, &kvm_sregs) };
733 if ret == 0 {
734 Ok(())
735 } else {
736 errno_result()
737 }
738 }
739
get_fpu(&self) -> Result<Fpu>740 fn get_fpu(&self) -> Result<Fpu> {
741 let mut fpu: kvm_fpu = Default::default();
742 // SAFETY:
743 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
744 // correct amount of memory to our pointer, and we verify the return result.
745 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU, &mut fpu) };
746 if ret == 0 {
747 Ok(Fpu::from(&fpu))
748 } else {
749 errno_result()
750 }
751 }
752
set_fpu(&self, fpu: &Fpu) -> Result<()>753 fn set_fpu(&self, fpu: &Fpu) -> Result<()> {
754 let fpu = kvm_fpu::from(fpu);
755 let ret = {
756 // SAFETY:
757 // Here we trust the kernel not to read past the end of the kvm_fpu struct.
758 unsafe { ioctl_with_ref(self, KVM_SET_FPU, &fpu) }
759 };
760 if ret == 0 {
761 Ok(())
762 } else {
763 errno_result()
764 }
765 }
766
767 /// If the VM reports using XSave2, the function will call XSave2.
get_xsave(&self) -> Result<Xsave>768 fn get_xsave(&self) -> Result<Xsave> {
769 let size = self.xsave_size()?;
770 let ioctl_nr = if size > KVM_XSAVE_MAX_SIZE {
771 KVM_GET_XSAVE2
772 } else {
773 KVM_GET_XSAVE
774 };
775 let mut xsave = Xsave::new(size);
776
777 // SAFETY:
778 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
779 // correct amount of memory to our pointer, and we verify the return result.
780 let ret = unsafe { ioctl_with_mut_ptr(self, ioctl_nr, xsave.as_mut_ptr()) };
781 if ret == 0 {
782 Ok(xsave)
783 } else {
784 errno_result()
785 }
786 }
787
set_xsave(&self, xsave: &Xsave) -> Result<()>788 fn set_xsave(&self, xsave: &Xsave) -> Result<()> {
789 let size = self.xsave_size()?;
790 // Ensure xsave is the same size as used in get_xsave.
791 // Return err if sizes don't match => not the same extensions are enabled for CPU.
792 if xsave.len() != size {
793 return Err(Error::new(EIO));
794 }
795
796 // SAFETY:
797 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
798 // correct amount of memory to our pointer, and we verify the return result.
799 // Because of the len check above, and because the layout of `struct kvm_xsave` is
800 // compatible with a slice of `u32`, we can pass the pointer to `xsave` directly.
801 let ret = unsafe { ioctl_with_ptr(self, KVM_SET_XSAVE, xsave.as_ptr()) };
802 if ret == 0 {
803 Ok(())
804 } else {
805 errno_result()
806 }
807 }
808
get_interrupt_state(&self) -> Result<AnySnapshot>809 fn get_interrupt_state(&self) -> Result<AnySnapshot> {
810 let mut vcpu_evts: kvm_vcpu_events = Default::default();
811 let ret = {
812 // SAFETY:
813 // Safe because we know that our file is a VCPU fd, we know the kernel will only write
814 // the correct amount of memory to our pointer, and we verify the return
815 // result.
816 unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS, &mut vcpu_evts) }
817 };
818 if ret == 0 {
819 Ok(
820 AnySnapshot::to_any(VcpuEvents::from(&vcpu_evts)).map_err(|e| {
821 error!("failed to serialize vcpu_events: {:?}", e);
822 Error::new(EIO)
823 })?,
824 )
825 } else {
826 errno_result()
827 }
828 }
829
set_interrupt_state(&self, data: AnySnapshot) -> Result<()>830 fn set_interrupt_state(&self, data: AnySnapshot) -> Result<()> {
831 let vcpu_events =
832 kvm_vcpu_events::from(&AnySnapshot::from_any::<VcpuEvents>(data).map_err(|e| {
833 error!("failed to deserialize vcpu_events: {:?}", e);
834 Error::new(EIO)
835 })?);
836 let ret = {
837 // SAFETY:
838 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
839 // the correct amount of memory from our pointer, and we verify the return
840 // result.
841 unsafe { ioctl_with_ref(self, KVM_SET_VCPU_EVENTS, &vcpu_events) }
842 };
843 if ret == 0 {
844 Ok(())
845 } else {
846 errno_result()
847 }
848 }
849
get_debugregs(&self) -> Result<DebugRegs>850 fn get_debugregs(&self) -> Result<DebugRegs> {
851 let mut regs: kvm_debugregs = Default::default();
852 // SAFETY:
853 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
854 // correct amount of memory to our pointer, and we verify the return result.
855 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS, &mut regs) };
856 if ret == 0 {
857 Ok(DebugRegs::from(®s))
858 } else {
859 errno_result()
860 }
861 }
862
set_debugregs(&self, dregs: &DebugRegs) -> Result<()>863 fn set_debugregs(&self, dregs: &DebugRegs) -> Result<()> {
864 let dregs = kvm_debugregs::from(dregs);
865 let ret = {
866 // SAFETY:
867 // Here we trust the kernel not to read past the end of the kvm_debugregs struct.
868 unsafe { ioctl_with_ref(self, KVM_SET_DEBUGREGS, &dregs) }
869 };
870 if ret == 0 {
871 Ok(())
872 } else {
873 errno_result()
874 }
875 }
876
get_xcrs(&self) -> Result<BTreeMap<u32, u64>>877 fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
878 let mut regs: kvm_xcrs = Default::default();
879 // SAFETY:
880 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
881 // correct amount of memory to our pointer, and we verify the return result.
882 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS, &mut regs) };
883 if ret < 0 {
884 return errno_result();
885 }
886
887 Ok(regs
888 .xcrs
889 .iter()
890 .take(regs.nr_xcrs as usize)
891 .map(|kvm_xcr| (kvm_xcr.xcr, kvm_xcr.value))
892 .collect())
893 }
894
set_xcr(&self, xcr_index: u32, value: u64) -> Result<()>895 fn set_xcr(&self, xcr_index: u32, value: u64) -> Result<()> {
896 let mut kvm_xcr = kvm_xcrs {
897 nr_xcrs: 1,
898 ..Default::default()
899 };
900 kvm_xcr.xcrs[0].xcr = xcr_index;
901 kvm_xcr.xcrs[0].value = value;
902
903 let ret = {
904 // SAFETY:
905 // Here we trust the kernel not to read past the end of the kvm_xcrs struct.
906 unsafe { ioctl_with_ref(self, KVM_SET_XCRS, &kvm_xcr) }
907 };
908 if ret == 0 {
909 Ok(())
910 } else {
911 errno_result()
912 }
913 }
914
get_msr(&self, msr_index: u32) -> Result<u64>915 fn get_msr(&self, msr_index: u32) -> Result<u64> {
916 let mut msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(1);
917 msrs[0].nmsrs = 1;
918
919 // SAFETY: We initialize a one-element array using `vec_with_array_field` above.
920 unsafe {
921 let msr_entries = msrs[0].entries.as_mut_slice(1);
922 msr_entries[0].index = msr_index;
923 }
924
925 let ret = {
926 // SAFETY:
927 // Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
928 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut msrs[0]) }
929 };
930 if ret < 0 {
931 return errno_result();
932 }
933
934 // KVM_GET_MSRS returns the number of msr entries written.
935 if ret != 1 {
936 return Err(base::Error::new(libc::ENOENT));
937 }
938
939 // SAFETY:
940 // Safe because we trust the kernel to return the correct array length on success.
941 let value = unsafe {
942 let msr_entries = msrs[0].entries.as_slice(1);
943 msr_entries[0].data
944 };
945
946 Ok(value)
947 }
948
get_all_msrs(&self) -> Result<BTreeMap<u32, u64>>949 fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
950 let msr_index_list = self.kvm.get_msr_index_list()?;
951 let mut kvm_msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(msr_index_list.len());
952 kvm_msrs[0].nmsrs = msr_index_list.len() as u32;
953 // SAFETY:
954 // Mapping the unsized array to a slice is unsafe because the length isn't known.
955 // Providing the length used to create the struct guarantees the entire slice is valid.
956 unsafe {
957 kvm_msrs[0]
958 .entries
959 .as_mut_slice(msr_index_list.len())
960 .iter_mut()
961 .zip(msr_index_list.iter())
962 .for_each(|(msr_entry, msr_index)| msr_entry.index = *msr_index);
963 }
964
965 let ret = {
966 // SAFETY:
967 // Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
968 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSRS, &mut kvm_msrs[0]) }
969 };
970 if ret < 0 {
971 return errno_result();
972 }
973
974 // KVM_GET_MSRS returns the number of msr entries written.
975 let count = ret as usize;
976 if count != msr_index_list.len() {
977 error!(
978 "failed to get all MSRs: requested {}, got {}",
979 msr_index_list.len(),
980 count,
981 );
982 return Err(base::Error::new(libc::EPERM));
983 }
984
985 // SAFETY:
986 // Safe because we trust the kernel to return the correct array length on success.
987 let msrs = unsafe {
988 BTreeMap::from_iter(
989 kvm_msrs[0]
990 .entries
991 .as_slice(count)
992 .iter()
993 .map(|kvm_msr| (kvm_msr.index, kvm_msr.data)),
994 )
995 };
996
997 Ok(msrs)
998 }
999
set_msr(&self, msr_index: u32, value: u64) -> Result<()>1000 fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
1001 let mut kvm_msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(1);
1002 kvm_msrs[0].nmsrs = 1;
1003
1004 // SAFETY: We initialize a one-element array using `vec_with_array_field` above.
1005 unsafe {
1006 let msr_entries = kvm_msrs[0].entries.as_mut_slice(1);
1007 msr_entries[0].index = msr_index;
1008 msr_entries[0].data = value;
1009 }
1010
1011 let ret = {
1012 // SAFETY:
1013 // Here we trust the kernel not to read past the end of the kvm_msrs struct.
1014 unsafe { ioctl_with_ref(self, KVM_SET_MSRS, &kvm_msrs[0]) }
1015 };
1016 if ret < 0 {
1017 return errno_result();
1018 }
1019
1020 // KVM_SET_MSRS returns the number of msr entries written.
1021 if ret != 1 {
1022 error!("failed to set MSR {:#x} to {:#x}", msr_index, value);
1023 return Err(base::Error::new(libc::EPERM));
1024 }
1025
1026 Ok(())
1027 }
1028
set_cpuid(&self, cpuid: &CpuId) -> Result<()>1029 fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> {
1030 let cpuid = KvmCpuId::from(cpuid);
1031 let ret = {
1032 // SAFETY:
1033 // Here we trust the kernel not to read past the end of the kvm_msrs struct.
1034 unsafe { ioctl_with_ptr(self, KVM_SET_CPUID2, cpuid.as_ptr()) }
1035 };
1036 if ret == 0 {
1037 Ok(())
1038 } else {
1039 errno_result()
1040 }
1041 }
1042
set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()>1043 fn set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()> {
1044 use kvm_sys::*;
1045 let mut dbg: kvm_guest_debug = Default::default();
1046
1047 if addrs.len() > 4 {
1048 error!(
1049 "Support 4 breakpoints at most but {} addresses are passed",
1050 addrs.len()
1051 );
1052 return Err(base::Error::new(libc::EINVAL));
1053 }
1054
1055 dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1056 if enable_singlestep {
1057 dbg.control |= KVM_GUESTDBG_SINGLESTEP;
1058 }
1059
1060 // Set bits 9 and 10.
1061 // bit 9: GE (global exact breakpoint enable) flag.
1062 // bit 10: always 1.
1063 dbg.arch.debugreg[7] = 0x0600;
1064
1065 for (i, addr) in addrs.iter().enumerate() {
1066 dbg.arch.debugreg[i] = addr.0;
1067 // Set global breakpoint enable flag
1068 dbg.arch.debugreg[7] |= 2 << (i * 2);
1069 }
1070
1071 let ret = {
1072 // SAFETY:
1073 // Here we trust the kernel not to read past the end of the kvm_guest_debug struct.
1074 unsafe { ioctl_with_ref(self, KVM_SET_GUEST_DEBUG, &dbg) }
1075 };
1076 if ret == 0 {
1077 Ok(())
1078 } else {
1079 errno_result()
1080 }
1081 }
1082
1083 /// KVM does not support the VcpuExit::Cpuid exit type.
handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()>1084 fn handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()> {
1085 Err(Error::new(ENXIO))
1086 }
1087
restore_timekeeping(&self, _host_tsc_reference_moment: u64, _tsc_offset: u64) -> Result<()>1088 fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, _tsc_offset: u64) -> Result<()> {
1089 // On KVM, the TSC MSR is restored as part of SET_MSRS, and no further action is required.
1090 Ok(())
1091 }
1092 }
1093
1094 impl KvmVcpu {
1095 /// X86 specific call to get the state of the "Local Advanced Programmable Interrupt
1096 /// Controller".
1097 ///
1098 /// See the documentation for KVM_GET_LAPIC.
get_lapic(&self) -> Result<kvm_lapic_state>1099 pub fn get_lapic(&self) -> Result<kvm_lapic_state> {
1100 let mut klapic: kvm_lapic_state = Default::default();
1101
1102 let ret = {
1103 // SAFETY:
1104 // The ioctl is unsafe unless you trust the kernel not to write past the end of the
1105 // local_apic struct.
1106 unsafe { ioctl_with_mut_ref(self, KVM_GET_LAPIC, &mut klapic) }
1107 };
1108 if ret < 0 {
1109 return errno_result();
1110 }
1111 Ok(klapic)
1112 }
1113
1114 /// X86 specific call to set the state of the "Local Advanced Programmable Interrupt
1115 /// Controller".
1116 ///
1117 /// See the documentation for KVM_SET_LAPIC.
set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()>1118 pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> {
1119 let ret = {
1120 // SAFETY:
1121 // The ioctl is safe because the kernel will only read from the klapic struct.
1122 unsafe { ioctl_with_ref(self, KVM_SET_LAPIC, klapic) }
1123 };
1124 if ret < 0 {
1125 return errno_result();
1126 }
1127 Ok(())
1128 }
1129
1130 /// X86 specific call to get the value of the APIC_BASE MSR.
1131 ///
1132 /// See the documentation for The kvm_run structure, and for KVM_GET_LAPIC.
get_apic_base(&self) -> Result<u64>1133 pub fn get_apic_base(&self) -> Result<u64> {
1134 self.get_msr(MSR_IA32_APICBASE)
1135 }
1136
1137 /// X86 specific call to set the value of the APIC_BASE MSR.
1138 ///
1139 /// See the documentation for The kvm_run structure, and for KVM_GET_LAPIC.
set_apic_base(&self, apic_base: u64) -> Result<()>1140 pub fn set_apic_base(&self, apic_base: u64) -> Result<()> {
1141 self.set_msr(MSR_IA32_APICBASE, apic_base)
1142 }
1143
1144 /// Call to get pending interrupts acknowledged by the APIC but not yet injected into the CPU.
1145 ///
1146 /// See the documentation for KVM_GET_SREGS.
get_interrupt_bitmap(&self) -> Result<[u64; 4usize]>1147 pub fn get_interrupt_bitmap(&self) -> Result<[u64; 4usize]> {
1148 let mut regs: kvm_sregs = Default::default();
1149 // SAFETY:
1150 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1151 // correct amount of memory to our pointer, and we verify the return result.
1152 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1153 if ret >= 0 {
1154 Ok(regs.interrupt_bitmap)
1155 } else {
1156 errno_result()
1157 }
1158 }
1159
1160 /// Call to set pending interrupts acknowledged by the APIC but not yet injected into the CPU.
1161 ///
1162 /// See the documentation for KVM_GET_SREGS.
set_interrupt_bitmap(&self, interrupt_bitmap: [u64; 4usize]) -> Result<()>1163 pub fn set_interrupt_bitmap(&self, interrupt_bitmap: [u64; 4usize]) -> Result<()> {
1164 // Potentially racy code. Vcpu registers are set in a separate thread and this could result
1165 // in Sregs being modified from the Vcpu initialization thread and the Irq restoring
1166 // thread.
1167 let mut regs: kvm_sregs = Default::default();
1168 // SAFETY:
1169 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1170 // correct amount of memory to our pointer, and we verify the return result.
1171 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS, &mut regs) };
1172 if ret >= 0 {
1173 regs.interrupt_bitmap = interrupt_bitmap;
1174 // SAFETY:
1175 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
1176 // the correct amount of memory from our pointer, and we verify the return
1177 // result.
1178 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS, ®s) };
1179 if ret >= 0 {
1180 Ok(())
1181 } else {
1182 errno_result()
1183 }
1184 } else {
1185 errno_result()
1186 }
1187 }
1188 }
1189
1190 impl<'a> From<&'a KvmCpuId> for CpuId {
from(kvm_cpuid: &'a KvmCpuId) -> CpuId1191 fn from(kvm_cpuid: &'a KvmCpuId) -> CpuId {
1192 let kvm_entries = kvm_cpuid.entries_slice();
1193 let mut cpu_id_entries = Vec::with_capacity(kvm_entries.len());
1194
1195 for entry in kvm_entries {
1196 let cpu_id_entry = CpuIdEntry {
1197 function: entry.function,
1198 index: entry.index,
1199 flags: entry.flags,
1200 cpuid: CpuidResult {
1201 eax: entry.eax,
1202 ebx: entry.ebx,
1203 ecx: entry.ecx,
1204 edx: entry.edx,
1205 },
1206 };
1207 cpu_id_entries.push(cpu_id_entry)
1208 }
1209 CpuId { cpu_id_entries }
1210 }
1211 }
1212
1213 impl From<&CpuId> for KvmCpuId {
from(cpuid: &CpuId) -> KvmCpuId1214 fn from(cpuid: &CpuId) -> KvmCpuId {
1215 let mut kvm = KvmCpuId::new(cpuid.cpu_id_entries.len());
1216 let entries = kvm.mut_entries_slice();
1217 for (i, &e) in cpuid.cpu_id_entries.iter().enumerate() {
1218 entries[i] = kvm_cpuid_entry2 {
1219 function: e.function,
1220 index: e.index,
1221 flags: e.flags,
1222 eax: e.cpuid.eax,
1223 ebx: e.cpuid.ebx,
1224 ecx: e.cpuid.ecx,
1225 edx: e.cpuid.edx,
1226 ..Default::default()
1227 };
1228 }
1229 kvm
1230 }
1231 }
1232
1233 impl From<&ClockState> for kvm_clock_data {
from(state: &ClockState) -> Self1234 fn from(state: &ClockState) -> Self {
1235 kvm_clock_data {
1236 clock: state.clock,
1237 ..Default::default()
1238 }
1239 }
1240 }
1241
1242 impl From<&kvm_clock_data> for ClockState {
from(clock_data: &kvm_clock_data) -> Self1243 fn from(clock_data: &kvm_clock_data) -> Self {
1244 ClockState {
1245 clock: clock_data.clock,
1246 }
1247 }
1248 }
1249
1250 impl From<&kvm_pic_state> for PicState {
from(item: &kvm_pic_state) -> Self1251 fn from(item: &kvm_pic_state) -> Self {
1252 PicState {
1253 last_irr: item.last_irr,
1254 irr: item.irr,
1255 imr: item.imr,
1256 isr: item.isr,
1257 priority_add: item.priority_add,
1258 irq_base: item.irq_base,
1259 read_reg_select: item.read_reg_select != 0,
1260 poll: item.poll != 0,
1261 special_mask: item.special_mask != 0,
1262 init_state: item.init_state.into(),
1263 auto_eoi: item.auto_eoi != 0,
1264 rotate_on_auto_eoi: item.rotate_on_auto_eoi != 0,
1265 special_fully_nested_mode: item.special_fully_nested_mode != 0,
1266 use_4_byte_icw: item.init4 != 0,
1267 elcr: item.elcr,
1268 elcr_mask: item.elcr_mask,
1269 }
1270 }
1271 }
1272
1273 impl From<&PicState> for kvm_pic_state {
from(item: &PicState) -> Self1274 fn from(item: &PicState) -> Self {
1275 kvm_pic_state {
1276 last_irr: item.last_irr,
1277 irr: item.irr,
1278 imr: item.imr,
1279 isr: item.isr,
1280 priority_add: item.priority_add,
1281 irq_base: item.irq_base,
1282 read_reg_select: item.read_reg_select as u8,
1283 poll: item.poll as u8,
1284 special_mask: item.special_mask as u8,
1285 init_state: item.init_state as u8,
1286 auto_eoi: item.auto_eoi as u8,
1287 rotate_on_auto_eoi: item.rotate_on_auto_eoi as u8,
1288 special_fully_nested_mode: item.special_fully_nested_mode as u8,
1289 init4: item.use_4_byte_icw as u8,
1290 elcr: item.elcr,
1291 elcr_mask: item.elcr_mask,
1292 }
1293 }
1294 }
1295
1296 impl From<&kvm_ioapic_state> for IoapicState {
from(item: &kvm_ioapic_state) -> Self1297 fn from(item: &kvm_ioapic_state) -> Self {
1298 let mut state = IoapicState {
1299 base_address: item.base_address,
1300 ioregsel: item.ioregsel as u8,
1301 ioapicid: item.id,
1302 current_interrupt_level_bitmap: item.irr,
1303 redirect_table: [IoapicRedirectionTableEntry::default(); NUM_IOAPIC_PINS],
1304 };
1305 for (in_state, out_state) in item.redirtbl.iter().zip(state.redirect_table.iter_mut()) {
1306 *out_state = in_state.into();
1307 }
1308 state
1309 }
1310 }
1311
1312 impl From<&IoapicRedirectionTableEntry> for kvm_ioapic_state__bindgen_ty_1 {
from(item: &IoapicRedirectionTableEntry) -> Self1313 fn from(item: &IoapicRedirectionTableEntry) -> Self {
1314 kvm_ioapic_state__bindgen_ty_1 {
1315 // IoapicRedirectionTableEntry layout matches the exact bit layout of a hardware
1316 // ioapic redirection table entry, so we can simply do a 64-bit copy
1317 bits: item.get(0, 64),
1318 }
1319 }
1320 }
1321
1322 impl From<&kvm_ioapic_state__bindgen_ty_1> for IoapicRedirectionTableEntry {
from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self1323 fn from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self {
1324 let mut entry = IoapicRedirectionTableEntry::default();
1325 // SAFETY:
1326 // Safe because the 64-bit layout of the IoapicRedirectionTableEntry matches the kvm_sys
1327 // table entry layout
1328 entry.set(0, 64, unsafe { item.bits });
1329 entry
1330 }
1331 }
1332
1333 impl From<&IoapicState> for kvm_ioapic_state {
from(item: &IoapicState) -> Self1334 fn from(item: &IoapicState) -> Self {
1335 let mut state = kvm_ioapic_state {
1336 base_address: item.base_address,
1337 ioregsel: item.ioregsel as u32,
1338 id: item.ioapicid,
1339 irr: item.current_interrupt_level_bitmap,
1340 ..Default::default()
1341 };
1342 for (in_state, out_state) in item.redirect_table.iter().zip(state.redirtbl.iter_mut()) {
1343 *out_state = in_state.into();
1344 }
1345 state
1346 }
1347 }
1348
1349 impl From<&LapicState> for kvm_lapic_state {
from(item: &LapicState) -> Self1350 fn from(item: &LapicState) -> Self {
1351 let mut state = kvm_lapic_state::default();
1352 // There are 64 lapic registers
1353 for (reg, value) in item.regs.iter().enumerate() {
1354 // Each lapic register is 16 bytes, but only the first 4 are used
1355 let reg_offset = 16 * reg;
1356 let regs_slice = &mut state.regs[reg_offset..reg_offset + 4];
1357
1358 // to_le_bytes() produces an array of u8, not i8(c_char), so we can't directly use
1359 // copy_from_slice().
1360 for (i, v) in value.to_le_bytes().iter().enumerate() {
1361 regs_slice[i] = *v as i8;
1362 }
1363 }
1364 state
1365 }
1366 }
1367
1368 impl From<&kvm_lapic_state> for LapicState {
from(item: &kvm_lapic_state) -> Self1369 fn from(item: &kvm_lapic_state) -> Self {
1370 let mut state = LapicState { regs: [0; 64] };
1371 // There are 64 lapic registers
1372 for reg in 0..64 {
1373 // Each lapic register is 16 bytes, but only the first 4 are used
1374 let reg_offset = 16 * reg;
1375
1376 // from_le_bytes() only works on arrays of u8, not i8(c_char).
1377 let reg_slice = &item.regs[reg_offset..reg_offset + 4];
1378 let mut bytes = [0u8; 4];
1379 for i in 0..4 {
1380 bytes[i] = reg_slice[i] as u8;
1381 }
1382 state.regs[reg] = u32::from_le_bytes(bytes);
1383 }
1384 state
1385 }
1386 }
1387
1388 impl From<&PitState> for kvm_pit_state2 {
from(item: &PitState) -> Self1389 fn from(item: &PitState) -> Self {
1390 kvm_pit_state2 {
1391 channels: [
1392 kvm_pit_channel_state::from(&item.channels[0]),
1393 kvm_pit_channel_state::from(&item.channels[1]),
1394 kvm_pit_channel_state::from(&item.channels[2]),
1395 ],
1396 flags: item.flags,
1397 ..Default::default()
1398 }
1399 }
1400 }
1401
1402 impl From<&kvm_pit_state2> for PitState {
from(item: &kvm_pit_state2) -> Self1403 fn from(item: &kvm_pit_state2) -> Self {
1404 PitState {
1405 channels: [
1406 PitChannelState::from(&item.channels[0]),
1407 PitChannelState::from(&item.channels[1]),
1408 PitChannelState::from(&item.channels[2]),
1409 ],
1410 flags: item.flags,
1411 }
1412 }
1413 }
1414
1415 impl From<&PitChannelState> for kvm_pit_channel_state {
from(item: &PitChannelState) -> Self1416 fn from(item: &PitChannelState) -> Self {
1417 kvm_pit_channel_state {
1418 count: item.count,
1419 latched_count: item.latched_count,
1420 count_latched: item.count_latched as u8,
1421 status_latched: item.status_latched as u8,
1422 status: item.status,
1423 read_state: item.read_state as u8,
1424 write_state: item.write_state as u8,
1425 // kvm's write_latch only stores the low byte of the reload value
1426 write_latch: item.reload_value as u8,
1427 rw_mode: item.rw_mode as u8,
1428 mode: item.mode,
1429 bcd: item.bcd as u8,
1430 gate: item.gate as u8,
1431 count_load_time: item.count_load_time as i64,
1432 }
1433 }
1434 }
1435
1436 impl From<&kvm_pit_channel_state> for PitChannelState {
from(item: &kvm_pit_channel_state) -> Self1437 fn from(item: &kvm_pit_channel_state) -> Self {
1438 PitChannelState {
1439 count: item.count,
1440 latched_count: item.latched_count,
1441 count_latched: item.count_latched.into(),
1442 status_latched: item.status_latched != 0,
1443 status: item.status,
1444 read_state: item.read_state.into(),
1445 write_state: item.write_state.into(),
1446 // kvm's write_latch only stores the low byte of the reload value
1447 reload_value: item.write_latch as u16,
1448 rw_mode: item.rw_mode.into(),
1449 mode: item.mode,
1450 bcd: item.bcd != 0,
1451 gate: item.gate != 0,
1452 count_load_time: item.count_load_time as u64,
1453 }
1454 }
1455 }
1456
1457 // This function translates an IrqSrouceChip to the kvm u32 equivalent. It has a different
1458 // implementation between x86_64 and aarch64 because the irqchip KVM constants are not defined on
1459 // all architectures.
chip_to_kvm_chip(chip: IrqSourceChip) -> u321460 pub(super) fn chip_to_kvm_chip(chip: IrqSourceChip) -> u32 {
1461 match chip {
1462 IrqSourceChip::PicPrimary => KVM_IRQCHIP_PIC_MASTER,
1463 IrqSourceChip::PicSecondary => KVM_IRQCHIP_PIC_SLAVE,
1464 IrqSourceChip::Ioapic => KVM_IRQCHIP_IOAPIC,
1465 _ => {
1466 error!("Invalid IrqChipSource for X86 {:?}", chip);
1467 0
1468 }
1469 }
1470 }
1471
1472 impl From<&kvm_regs> for Regs {
from(r: &kvm_regs) -> Self1473 fn from(r: &kvm_regs) -> Self {
1474 Regs {
1475 rax: r.rax,
1476 rbx: r.rbx,
1477 rcx: r.rcx,
1478 rdx: r.rdx,
1479 rsi: r.rsi,
1480 rdi: r.rdi,
1481 rsp: r.rsp,
1482 rbp: r.rbp,
1483 r8: r.r8,
1484 r9: r.r9,
1485 r10: r.r10,
1486 r11: r.r11,
1487 r12: r.r12,
1488 r13: r.r13,
1489 r14: r.r14,
1490 r15: r.r15,
1491 rip: r.rip,
1492 rflags: r.rflags,
1493 }
1494 }
1495 }
1496
1497 impl From<&Regs> for kvm_regs {
from(r: &Regs) -> Self1498 fn from(r: &Regs) -> Self {
1499 kvm_regs {
1500 rax: r.rax,
1501 rbx: r.rbx,
1502 rcx: r.rcx,
1503 rdx: r.rdx,
1504 rsi: r.rsi,
1505 rdi: r.rdi,
1506 rsp: r.rsp,
1507 rbp: r.rbp,
1508 r8: r.r8,
1509 r9: r.r9,
1510 r10: r.r10,
1511 r11: r.r11,
1512 r12: r.r12,
1513 r13: r.r13,
1514 r14: r.r14,
1515 r15: r.r15,
1516 rip: r.rip,
1517 rflags: r.rflags,
1518 }
1519 }
1520 }
1521
1522 impl From<&VcpuEvents> for kvm_vcpu_events {
from(ve: &VcpuEvents) -> Self1523 fn from(ve: &VcpuEvents) -> Self {
1524 let mut kvm_ve: kvm_vcpu_events = Default::default();
1525
1526 kvm_ve.exception.injected = ve.exception.injected as u8;
1527 kvm_ve.exception.nr = ve.exception.nr;
1528 kvm_ve.exception.has_error_code = ve.exception.has_error_code as u8;
1529 if let Some(pending) = ve.exception.pending {
1530 kvm_ve.exception.pending = pending as u8;
1531 if ve.exception_payload.is_some() {
1532 kvm_ve.exception_has_payload = true as u8;
1533 }
1534 kvm_ve.exception_payload = ve.exception_payload.unwrap_or(0);
1535 kvm_ve.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
1536 }
1537 kvm_ve.exception.error_code = ve.exception.error_code;
1538
1539 kvm_ve.interrupt.injected = ve.interrupt.injected as u8;
1540 kvm_ve.interrupt.nr = ve.interrupt.nr;
1541 kvm_ve.interrupt.soft = ve.interrupt.soft as u8;
1542 if let Some(shadow) = ve.interrupt.shadow {
1543 kvm_ve.interrupt.shadow = shadow;
1544 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SHADOW;
1545 }
1546
1547 kvm_ve.nmi.injected = ve.nmi.injected as u8;
1548 if let Some(pending) = ve.nmi.pending {
1549 kvm_ve.nmi.pending = pending as u8;
1550 kvm_ve.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
1551 }
1552 kvm_ve.nmi.masked = ve.nmi.masked as u8;
1553
1554 if let Some(sipi_vector) = ve.sipi_vector {
1555 kvm_ve.sipi_vector = sipi_vector;
1556 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1557 }
1558
1559 if let Some(smm) = ve.smi.smm {
1560 kvm_ve.smi.smm = smm as u8;
1561 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SMM;
1562 }
1563 kvm_ve.smi.pending = ve.smi.pending as u8;
1564 kvm_ve.smi.smm_inside_nmi = ve.smi.smm_inside_nmi as u8;
1565 kvm_ve.smi.latched_init = ve.smi.latched_init;
1566
1567 if let Some(pending) = ve.triple_fault.pending {
1568 kvm_ve.triple_fault.pending = pending as u8;
1569 kvm_ve.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
1570 }
1571 kvm_ve
1572 }
1573 }
1574
1575 impl From<&kvm_vcpu_events> for VcpuEvents {
from(ve: &kvm_vcpu_events) -> Self1576 fn from(ve: &kvm_vcpu_events) -> Self {
1577 let exception = VcpuExceptionState {
1578 injected: ve.exception.injected != 0,
1579 nr: ve.exception.nr,
1580 has_error_code: ve.exception.has_error_code != 0,
1581 pending: if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1582 Some(ve.exception.pending != 0)
1583 } else {
1584 None
1585 },
1586 error_code: ve.exception.error_code,
1587 };
1588
1589 let interrupt = VcpuInterruptState {
1590 injected: ve.interrupt.injected != 0,
1591 nr: ve.interrupt.nr,
1592 soft: ve.interrupt.soft != 0,
1593 shadow: if ve.flags & KVM_VCPUEVENT_VALID_SHADOW != 0 {
1594 Some(ve.interrupt.shadow)
1595 } else {
1596 None
1597 },
1598 };
1599
1600 let nmi = VcpuNmiState {
1601 injected: ve.interrupt.injected != 0,
1602 pending: if ve.flags & KVM_VCPUEVENT_VALID_NMI_PENDING != 0 {
1603 Some(ve.nmi.pending != 0)
1604 } else {
1605 None
1606 },
1607 masked: ve.nmi.masked != 0,
1608 };
1609
1610 let sipi_vector = if ve.flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR != 0 {
1611 Some(ve.sipi_vector)
1612 } else {
1613 None
1614 };
1615
1616 let smi = VcpuSmiState {
1617 smm: if ve.flags & KVM_VCPUEVENT_VALID_SMM != 0 {
1618 Some(ve.smi.smm != 0)
1619 } else {
1620 None
1621 },
1622 pending: ve.smi.pending != 0,
1623 smm_inside_nmi: ve.smi.smm_inside_nmi != 0,
1624 latched_init: ve.smi.latched_init,
1625 };
1626
1627 let triple_fault = VcpuTripleFaultState {
1628 pending: if ve.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT != 0 {
1629 Some(ve.triple_fault.pending != 0)
1630 } else {
1631 None
1632 },
1633 };
1634
1635 let exception_payload = if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1636 Some(ve.exception_payload)
1637 } else {
1638 None
1639 };
1640
1641 VcpuEvents {
1642 exception,
1643 interrupt,
1644 nmi,
1645 sipi_vector,
1646 smi,
1647 triple_fault,
1648 exception_payload,
1649 }
1650 }
1651 }
1652
1653 impl From<&kvm_segment> for Segment {
from(s: &kvm_segment) -> Self1654 fn from(s: &kvm_segment) -> Self {
1655 Segment {
1656 base: s.base,
1657 limit_bytes: s.limit,
1658 selector: s.selector,
1659 type_: s.type_,
1660 present: s.present,
1661 dpl: s.dpl,
1662 db: s.db,
1663 s: s.s,
1664 l: s.l,
1665 g: s.g,
1666 avl: s.avl,
1667 }
1668 }
1669 }
1670
1671 impl From<&Segment> for kvm_segment {
from(s: &Segment) -> Self1672 fn from(s: &Segment) -> Self {
1673 kvm_segment {
1674 base: s.base,
1675 limit: s.limit_bytes,
1676 selector: s.selector,
1677 type_: s.type_,
1678 present: s.present,
1679 dpl: s.dpl,
1680 db: s.db,
1681 s: s.s,
1682 l: s.l,
1683 g: s.g,
1684 avl: s.avl,
1685 unusable: match s.present {
1686 0 => 1,
1687 _ => 0,
1688 },
1689 ..Default::default()
1690 }
1691 }
1692 }
1693
1694 impl From<&kvm_dtable> for DescriptorTable {
from(dt: &kvm_dtable) -> Self1695 fn from(dt: &kvm_dtable) -> Self {
1696 DescriptorTable {
1697 base: dt.base,
1698 limit: dt.limit,
1699 }
1700 }
1701 }
1702
1703 impl From<&DescriptorTable> for kvm_dtable {
from(dt: &DescriptorTable) -> Self1704 fn from(dt: &DescriptorTable) -> Self {
1705 kvm_dtable {
1706 base: dt.base,
1707 limit: dt.limit,
1708 ..Default::default()
1709 }
1710 }
1711 }
1712
1713 impl From<&kvm_sregs> for Sregs {
from(r: &kvm_sregs) -> Self1714 fn from(r: &kvm_sregs) -> Self {
1715 Sregs {
1716 cs: Segment::from(&r.cs),
1717 ds: Segment::from(&r.ds),
1718 es: Segment::from(&r.es),
1719 fs: Segment::from(&r.fs),
1720 gs: Segment::from(&r.gs),
1721 ss: Segment::from(&r.ss),
1722 tr: Segment::from(&r.tr),
1723 ldt: Segment::from(&r.ldt),
1724 gdt: DescriptorTable::from(&r.gdt),
1725 idt: DescriptorTable::from(&r.idt),
1726 cr0: r.cr0,
1727 cr2: r.cr2,
1728 cr3: r.cr3,
1729 cr4: r.cr4,
1730 cr8: r.cr8,
1731 efer: r.efer,
1732 }
1733 }
1734 }
1735
1736 impl From<&kvm_fpu> for Fpu {
from(r: &kvm_fpu) -> Self1737 fn from(r: &kvm_fpu) -> Self {
1738 Fpu {
1739 fpr: FpuReg::from_16byte_arrays(&r.fpr),
1740 fcw: r.fcw,
1741 fsw: r.fsw,
1742 ftwx: r.ftwx,
1743 last_opcode: r.last_opcode,
1744 last_ip: r.last_ip,
1745 last_dp: r.last_dp,
1746 xmm: r.xmm,
1747 mxcsr: r.mxcsr,
1748 }
1749 }
1750 }
1751
1752 impl From<&Fpu> for kvm_fpu {
from(r: &Fpu) -> Self1753 fn from(r: &Fpu) -> Self {
1754 kvm_fpu {
1755 fpr: FpuReg::to_16byte_arrays(&r.fpr),
1756 fcw: r.fcw,
1757 fsw: r.fsw,
1758 ftwx: r.ftwx,
1759 last_opcode: r.last_opcode,
1760 last_ip: r.last_ip,
1761 last_dp: r.last_dp,
1762 xmm: r.xmm,
1763 mxcsr: r.mxcsr,
1764 ..Default::default()
1765 }
1766 }
1767 }
1768
1769 impl From<&kvm_debugregs> for DebugRegs {
from(r: &kvm_debugregs) -> Self1770 fn from(r: &kvm_debugregs) -> Self {
1771 DebugRegs {
1772 db: r.db,
1773 dr6: r.dr6,
1774 dr7: r.dr7,
1775 }
1776 }
1777 }
1778
1779 impl From<&DebugRegs> for kvm_debugregs {
from(r: &DebugRegs) -> Self1780 fn from(r: &DebugRegs) -> Self {
1781 kvm_debugregs {
1782 db: r.db,
1783 dr6: r.dr6,
1784 dr7: r.dr7,
1785 ..Default::default()
1786 }
1787 }
1788 }
1789
1790 #[cfg(test)]
1791 mod tests {
1792 use super::*;
1793
1794 #[test]
vcpu_event_to_from()1795 fn vcpu_event_to_from() {
1796 // All data is random.
1797 let mut kvm_ve: kvm_vcpu_events = Default::default();
1798 kvm_ve.exception.injected = 1;
1799 kvm_ve.exception.nr = 65;
1800 kvm_ve.exception.has_error_code = 1;
1801 kvm_ve.exception.error_code = 110;
1802 kvm_ve.exception.pending = 1;
1803
1804 kvm_ve.interrupt.injected = 1;
1805 kvm_ve.interrupt.nr = 100;
1806 kvm_ve.interrupt.soft = 1;
1807 kvm_ve.interrupt.shadow = 114;
1808
1809 kvm_ve.nmi.injected = 1;
1810 kvm_ve.nmi.pending = 1;
1811 kvm_ve.nmi.masked = 0;
1812
1813 kvm_ve.sipi_vector = 105;
1814
1815 kvm_ve.smi.smm = 1;
1816 kvm_ve.smi.pending = 1;
1817 kvm_ve.smi.smm_inside_nmi = 1;
1818 kvm_ve.smi.latched_init = 100;
1819
1820 kvm_ve.triple_fault.pending = 0;
1821
1822 kvm_ve.exception_payload = 33;
1823 kvm_ve.exception_has_payload = 1;
1824
1825 kvm_ve.flags = 0
1826 | KVM_VCPUEVENT_VALID_PAYLOAD
1827 | KVM_VCPUEVENT_VALID_SMM
1828 | KVM_VCPUEVENT_VALID_NMI_PENDING
1829 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
1830 | KVM_VCPUEVENT_VALID_SHADOW;
1831
1832 let ve: VcpuEvents = VcpuEvents::from(&kvm_ve);
1833 assert_eq!(ve.exception.injected, true);
1834 assert_eq!(ve.exception.nr, 65);
1835 assert_eq!(ve.exception.has_error_code, true);
1836 assert_eq!(ve.exception.error_code, 110);
1837 assert_eq!(ve.exception.pending.unwrap(), true);
1838
1839 assert_eq!(ve.interrupt.injected, true);
1840 assert_eq!(ve.interrupt.nr, 100);
1841 assert_eq!(ve.interrupt.soft, true);
1842 assert_eq!(ve.interrupt.shadow.unwrap(), 114);
1843
1844 assert_eq!(ve.nmi.injected, true);
1845 assert_eq!(ve.nmi.pending.unwrap(), true);
1846 assert_eq!(ve.nmi.masked, false);
1847
1848 assert_eq!(ve.sipi_vector.unwrap(), 105);
1849
1850 assert_eq!(ve.smi.smm.unwrap(), true);
1851 assert_eq!(ve.smi.pending, true);
1852 assert_eq!(ve.smi.smm_inside_nmi, true);
1853 assert_eq!(ve.smi.latched_init, 100);
1854
1855 assert_eq!(ve.triple_fault.pending, None);
1856
1857 assert_eq!(ve.exception_payload.unwrap(), 33);
1858
1859 let kvm_ve_restored: kvm_vcpu_events = kvm_vcpu_events::from(&ve);
1860 assert_eq!(kvm_ve_restored.exception.injected, 1);
1861 assert_eq!(kvm_ve_restored.exception.nr, 65);
1862 assert_eq!(kvm_ve_restored.exception.has_error_code, 1);
1863 assert_eq!(kvm_ve_restored.exception.error_code, 110);
1864 assert_eq!(kvm_ve_restored.exception.pending, 1);
1865
1866 assert_eq!(kvm_ve_restored.interrupt.injected, 1);
1867 assert_eq!(kvm_ve_restored.interrupt.nr, 100);
1868 assert_eq!(kvm_ve_restored.interrupt.soft, 1);
1869 assert_eq!(kvm_ve_restored.interrupt.shadow, 114);
1870
1871 assert_eq!(kvm_ve_restored.nmi.injected, 1);
1872 assert_eq!(kvm_ve_restored.nmi.pending, 1);
1873 assert_eq!(kvm_ve_restored.nmi.masked, 0);
1874
1875 assert_eq!(kvm_ve_restored.sipi_vector, 105);
1876
1877 assert_eq!(kvm_ve_restored.smi.smm, 1);
1878 assert_eq!(kvm_ve_restored.smi.pending, 1);
1879 assert_eq!(kvm_ve_restored.smi.smm_inside_nmi, 1);
1880 assert_eq!(kvm_ve_restored.smi.latched_init, 100);
1881
1882 assert_eq!(kvm_ve_restored.triple_fault.pending, 0);
1883
1884 assert_eq!(kvm_ve_restored.exception_payload, 33);
1885 assert_eq!(kvm_ve_restored.exception_has_payload, 1);
1886 }
1887 }
1888