1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::arch::x86_64::CpuidResult;
6 use std::collections::BTreeMap;
7
8 use base::errno_result;
9 use base::error;
10 use base::ioctl;
11 use base::ioctl_with_mut_ptr;
12 use base::ioctl_with_mut_ref;
13 use base::ioctl_with_ptr;
14 use base::ioctl_with_ref;
15 use base::ioctl_with_val;
16 use base::AsRawDescriptor;
17 use base::Error;
18 use base::IoctlNr;
19 use base::MappedRegion;
20 use base::Result;
21 use data_model::vec_with_array_field;
22 use kvm_sys::*;
23 use libc::E2BIG;
24 use libc::EIO;
25 use libc::ENXIO;
26 use serde::Deserialize;
27 use serde::Serialize;
28 use vm_memory::GuestAddress;
29
30 use super::Config;
31 use super::Kvm;
32 use super::KvmVcpu;
33 use super::KvmVm;
34 use crate::host_phys_addr_bits;
35 use crate::ClockState;
36 use crate::CpuId;
37 use crate::CpuIdEntry;
38 use crate::DebugRegs;
39 use crate::DescriptorTable;
40 use crate::DeviceKind;
41 use crate::Fpu;
42 use crate::HypervisorX86_64;
43 use crate::IoapicRedirectionTableEntry;
44 use crate::IoapicState;
45 use crate::IrqSourceChip;
46 use crate::LapicState;
47 use crate::PicSelect;
48 use crate::PicState;
49 use crate::PitChannelState;
50 use crate::PitState;
51 use crate::ProtectionType;
52 use crate::Regs;
53 use crate::Segment;
54 use crate::Sregs;
55 use crate::VcpuExit;
56 use crate::VcpuX86_64;
57 use crate::VmCap;
58 use crate::VmX86_64;
59 use crate::Xsave;
60 use crate::NUM_IOAPIC_PINS;
61
62 type KvmCpuId = kvm::CpuId;
63 const KVM_XSAVE_MAX_SIZE: usize = 4096;
64 const MSR_IA32_APICBASE: u32 = 0x0000001b;
65
66 #[derive(Debug, Clone, Serialize, Deserialize)]
67 pub struct VcpuEvents {
68 pub exception: VcpuExceptionState,
69 pub interrupt: VcpuInterruptState,
70 pub nmi: VcpuNmiState,
71 pub sipi_vector: Option<u32>,
72 pub smi: VcpuSmiState,
73 pub triple_fault: VcpuTripleFaultState,
74 pub exception_payload: Option<u64>,
75 }
76
77 #[derive(Debug, Clone, Serialize, Deserialize)]
78 pub struct VcpuExceptionState {
79 pub injected: bool,
80 pub nr: u8,
81 pub has_error_code: bool,
82 pub pending: Option<bool>,
83 pub error_code: u32,
84 }
85
86 #[derive(Debug, Clone, Serialize, Deserialize)]
87 pub struct VcpuInterruptState {
88 pub injected: bool,
89 pub nr: u8,
90 pub soft: bool,
91 pub shadow: Option<u8>,
92 }
93
94 #[derive(Debug, Clone, Serialize, Deserialize)]
95 pub struct VcpuNmiState {
96 pub injected: bool,
97 pub pending: Option<bool>,
98 pub masked: bool,
99 }
100
101 #[derive(Debug, Clone, Serialize, Deserialize)]
102 pub struct VcpuSmiState {
103 pub smm: Option<bool>,
104 pub pending: bool,
105 pub smm_inside_nmi: bool,
106 pub latched_init: u8,
107 }
108
109 #[derive(Debug, Clone, Serialize, Deserialize)]
110 pub struct VcpuTripleFaultState {
111 pub pending: Option<bool>,
112 }
113
get_cpuid_with_initial_capacity<T: AsRawDescriptor>( descriptor: &T, kind: IoctlNr, initial_capacity: usize, ) -> Result<CpuId>114 pub fn get_cpuid_with_initial_capacity<T: AsRawDescriptor>(
115 descriptor: &T,
116 kind: IoctlNr,
117 initial_capacity: usize,
118 ) -> Result<CpuId> {
119 let mut entries: usize = initial_capacity;
120
121 loop {
122 let mut kvm_cpuid = KvmCpuId::new(entries);
123
124 let ret = {
125 // SAFETY:
126 // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the
127 // memory allocated for the struct. The limit is read from nent within KvmCpuId,
128 // which is set to the allocated size above.
129 unsafe { ioctl_with_mut_ptr(descriptor, kind, kvm_cpuid.as_mut_ptr()) }
130 };
131 if ret < 0 {
132 let err = Error::last();
133 match err.errno() {
134 E2BIG => {
135 // double the available memory for cpuid entries for kvm.
136 if let Some(val) = entries.checked_mul(2) {
137 entries = val;
138 } else {
139 return Err(err);
140 }
141 }
142 _ => return Err(err),
143 }
144 } else {
145 return Ok(CpuId::from(&kvm_cpuid));
146 }
147 }
148 }
149
150 impl Kvm {
get_cpuid(&self, kind: IoctlNr) -> Result<CpuId>151 pub fn get_cpuid(&self, kind: IoctlNr) -> Result<CpuId> {
152 const KVM_MAX_ENTRIES: usize = 256;
153 get_cpuid_with_initial_capacity(self, kind, KVM_MAX_ENTRIES)
154 }
155
156 // The x86 machine type is always 0. Protected VMs are not supported.
get_vm_type(&self, protection_type: ProtectionType) -> Result<u32>157 pub fn get_vm_type(&self, protection_type: ProtectionType) -> Result<u32> {
158 if protection_type == ProtectionType::Unprotected {
159 Ok(0)
160 } else {
161 error!("Protected mode is not supported on x86_64.");
162 Err(Error::new(libc::EINVAL))
163 }
164 }
165
166 /// Get the size of guest physical addresses in bits.
get_guest_phys_addr_bits(&self) -> u8167 pub fn get_guest_phys_addr_bits(&self) -> u8 {
168 // Assume the guest physical address size is the same as the host.
169 host_phys_addr_bits()
170 }
171 }
172
173 impl HypervisorX86_64 for Kvm {
get_supported_cpuid(&self) -> Result<CpuId>174 fn get_supported_cpuid(&self) -> Result<CpuId> {
175 self.get_cpuid(KVM_GET_SUPPORTED_CPUID())
176 }
177
get_emulated_cpuid(&self) -> Result<CpuId>178 fn get_emulated_cpuid(&self) -> Result<CpuId> {
179 self.get_cpuid(KVM_GET_EMULATED_CPUID())
180 }
181
get_msr_index_list(&self) -> Result<Vec<u32>>182 fn get_msr_index_list(&self) -> Result<Vec<u32>> {
183 const MAX_KVM_MSR_ENTRIES: usize = 256;
184
185 let mut msr_list = vec_with_array_field::<kvm_msr_list, u32>(MAX_KVM_MSR_ENTRIES);
186 msr_list[0].nmsrs = MAX_KVM_MSR_ENTRIES as u32;
187
188 let ret = {
189 // SAFETY:
190 // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory
191 // allocated for the struct. The limit is read from nmsrs, which is set to the allocated
192 // size (MAX_KVM_MSR_ENTRIES) above.
193 unsafe { ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST(), &mut msr_list[0]) }
194 };
195 if ret < 0 {
196 return errno_result();
197 }
198
199 let mut nmsrs = msr_list[0].nmsrs;
200
201 // SAFETY:
202 // Mapping the unsized array to a slice is unsafe because the length isn't known. Using
203 // the length we originally allocated with eliminates the possibility of overflow.
204 let indices: &[u32] = unsafe {
205 if nmsrs > MAX_KVM_MSR_ENTRIES as u32 {
206 nmsrs = MAX_KVM_MSR_ENTRIES as u32;
207 }
208 msr_list[0].indices.as_slice(nmsrs as usize)
209 };
210
211 Ok(indices.to_vec())
212 }
213 }
214
215 impl KvmVm {
216 /// Does platform specific initialization for the KvmVm.
init_arch(&self, _cfg: &Config) -> Result<()>217 pub fn init_arch(&self, _cfg: &Config) -> Result<()> {
218 Ok(())
219 }
220
221 /// Whether running under pKVM.
is_pkvm(&self) -> bool222 pub fn is_pkvm(&self) -> bool {
223 false
224 }
225
226 /// Checks if a particular `VmCap` is available, or returns None if arch-independent
227 /// Vm.check_capability() should handle the check.
check_capability_arch(&self, c: VmCap) -> Option<bool>228 pub fn check_capability_arch(&self, c: VmCap) -> Option<bool> {
229 match c {
230 VmCap::PvClock => Some(true),
231 _ => None,
232 }
233 }
234
235 /// Returns the params to pass to KVM_CREATE_DEVICE for a `kind` device on this arch, or None to
236 /// let the arch-independent `KvmVm::create_device` handle it.
get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device>237 pub fn get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device> {
238 None
239 }
240
241 /// Arch-specific implementation of `Vm::get_pvclock`.
get_pvclock_arch(&self) -> Result<ClockState>242 pub fn get_pvclock_arch(&self) -> Result<ClockState> {
243 let mut clock_data: kvm_clock_data = Default::default();
244 let ret =
245 // SAFETY:
246 // Safe because we know that our file is a VM fd, we know the kernel will only write correct
247 // amount of memory to our pointer, and we verify the return result.
248 unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK(), &mut clock_data) };
249 if ret == 0 {
250 Ok(ClockState::from(&clock_data))
251 } else {
252 errno_result()
253 }
254 }
255
256 /// Arch-specific implementation of `Vm::set_pvclock`.
set_pvclock_arch(&self, state: &ClockState) -> Result<()>257 pub fn set_pvclock_arch(&self, state: &ClockState) -> Result<()> {
258 let clock_data = kvm_clock_data::from(state);
259 // SAFETY:
260 // Safe because we know that our file is a VM fd, we know the kernel will only read correct
261 // amount of memory from our pointer, and we verify the return result.
262 let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK(), &clock_data) };
263 if ret == 0 {
264 Ok(())
265 } else {
266 errno_result()
267 }
268 }
269
270 /// Retrieves the state of given interrupt controller by issuing KVM_GET_IRQCHIP ioctl.
271 ///
272 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state>273 pub fn get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state> {
274 let mut irqchip_state = kvm_irqchip {
275 chip_id: id as u32,
276 ..Default::default()
277 };
278 let ret = {
279 // SAFETY:
280 // Safe because we know our file is a VM fd, we know the kernel will only write
281 // correct amount of memory to our pointer, and we verify the return result.
282 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) }
283 };
284 if ret == 0 {
285 Ok(
286 // SAFETY:
287 // Safe as we know that we are retrieving data related to the
288 // PIC (primary or secondary) and not IOAPIC.
289 unsafe { irqchip_state.chip.pic },
290 )
291 } else {
292 errno_result()
293 }
294 }
295
296 /// Sets the state of given interrupt controller by issuing KVM_SET_IRQCHIP ioctl.
297 ///
298 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()>299 pub fn set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()> {
300 let mut irqchip_state = kvm_irqchip {
301 chip_id: id as u32,
302 ..Default::default()
303 };
304 irqchip_state.chip.pic = *state;
305 // SAFETY:
306 // Safe because we know that our file is a VM fd, we know the kernel will only read
307 // correct amount of memory from our pointer, and we verify the return result.
308 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) };
309 if ret == 0 {
310 Ok(())
311 } else {
312 errno_result()
313 }
314 }
315
316 /// Retrieves the number of pins for emulated IO-APIC.
get_ioapic_num_pins(&self) -> Result<usize>317 pub fn get_ioapic_num_pins(&self) -> Result<usize> {
318 Ok(NUM_IOAPIC_PINS)
319 }
320
321 /// Retrieves the state of IOAPIC by issuing KVM_GET_IRQCHIP ioctl.
322 ///
323 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
get_ioapic_state(&self) -> Result<kvm_ioapic_state>324 pub fn get_ioapic_state(&self) -> Result<kvm_ioapic_state> {
325 let mut irqchip_state = kvm_irqchip {
326 chip_id: 2,
327 ..Default::default()
328 };
329 let ret = {
330 // SAFETY:
331 // Safe because we know our file is a VM fd, we know the kernel will only write
332 // correct amount of memory to our pointer, and we verify the return result.
333 unsafe { ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state) }
334 };
335 if ret == 0 {
336 Ok(
337 // SAFETY:
338 // Safe as we know that we are retrieving data related to the
339 // IOAPIC and not PIC.
340 unsafe { irqchip_state.chip.ioapic },
341 )
342 } else {
343 errno_result()
344 }
345 }
346
347 /// Sets the state of IOAPIC by issuing KVM_SET_IRQCHIP ioctl.
348 ///
349 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()>350 pub fn set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()> {
351 let mut irqchip_state = kvm_irqchip {
352 chip_id: 2,
353 ..Default::default()
354 };
355 irqchip_state.chip.ioapic = *state;
356 // SAFETY:
357 // Safe because we know that our file is a VM fd, we know the kernel will only read
358 // correct amount of memory from our pointer, and we verify the return result.
359 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) };
360 if ret == 0 {
361 Ok(())
362 } else {
363 errno_result()
364 }
365 }
366
367 /// Creates a PIT as per the KVM_CREATE_PIT2 ioctl.
368 ///
369 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
create_pit(&self) -> Result<()>370 pub fn create_pit(&self) -> Result<()> {
371 let pit_config = kvm_pit_config::default();
372 // SAFETY:
373 // Safe because we know that our file is a VM fd, we know the kernel will only read the
374 // correct amount of memory from our pointer, and we verify the return result.
375 let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2(), &pit_config) };
376 if ret == 0 {
377 Ok(())
378 } else {
379 errno_result()
380 }
381 }
382
383 /// Retrieves the state of PIT by issuing KVM_GET_PIT2 ioctl.
384 ///
385 /// Note that this call can only succeed after a call to `Vm::create_pit`.
get_pit_state(&self) -> Result<kvm_pit_state2>386 pub fn get_pit_state(&self) -> Result<kvm_pit_state2> {
387 let mut pit_state = Default::default();
388 // SAFETY:
389 // Safe because we know that our file is a VM fd, we know the kernel will only write
390 // correct amount of memory to our pointer, and we verify the return result.
391 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2(), &mut pit_state) };
392 if ret == 0 {
393 Ok(pit_state)
394 } else {
395 errno_result()
396 }
397 }
398
399 /// Sets the state of PIT by issuing KVM_SET_PIT2 ioctl.
400 ///
401 /// Note that this call can only succeed after a call to `Vm::create_pit`.
set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()>402 pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> {
403 // SAFETY:
404 // Safe because we know that our file is a VM fd, we know the kernel will only read
405 // correct amount of memory from our pointer, and we verify the return result.
406 let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2(), pit_state) };
407 if ret == 0 {
408 Ok(())
409 } else {
410 errno_result()
411 }
412 }
413
414 /// Enable userspace msr.
enable_userspace_msr(&self) -> Result<()>415 pub fn enable_userspace_msr(&self) -> Result<()> {
416 let mut cap = kvm_enable_cap {
417 cap: KVM_CAP_X86_USER_SPACE_MSR,
418 ..Default::default()
419 };
420 cap.args[0] = (KVM_MSR_EXIT_REASON_UNKNOWN
421 | KVM_MSR_EXIT_REASON_INVAL
422 | KVM_MSR_EXIT_REASON_FILTER) as u64;
423
424 // SAFETY:
425 // Safe because we know that our file is a VM fd, we know that the
426 // kernel will only read correct amount of memory from our pointer, and
427 // we verify the return result.
428 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP(), &cap) };
429 if ret < 0 {
430 errno_result()
431 } else {
432 Ok(())
433 }
434 }
435
436 /// Set MSR_PLATFORM_INFO read access.
set_platform_info_read_access(&self, allow_read: bool) -> Result<()>437 pub fn set_platform_info_read_access(&self, allow_read: bool) -> Result<()> {
438 let mut cap = kvm_enable_cap {
439 cap: KVM_CAP_MSR_PLATFORM_INFO,
440 ..Default::default()
441 };
442 cap.args[0] = allow_read as u64;
443
444 // SAFETY:
445 // Safe because we know that our file is a VM fd, we know that the
446 // kernel will only read correct amount of memory from our pointer, and
447 // we verify the return result.
448 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP(), &cap) };
449 if ret < 0 {
450 errno_result()
451 } else {
452 Ok(())
453 }
454 }
455
456 /// Set msr filter.
set_msr_filter(&self, msr_list: (Vec<u32>, Vec<u32>)) -> Result<()>457 pub fn set_msr_filter(&self, msr_list: (Vec<u32>, Vec<u32>)) -> Result<()> {
458 let mut rd_nmsrs: u32 = 0;
459 let mut wr_nmsrs: u32 = 0;
460 let mut rd_msr_bitmap: [u8; KVM_MSR_FILTER_RANGE_MAX_BYTES] =
461 [0xff; KVM_MSR_FILTER_RANGE_MAX_BYTES];
462 let mut wr_msr_bitmap: [u8; KVM_MSR_FILTER_RANGE_MAX_BYTES] =
463 [0xff; KVM_MSR_FILTER_RANGE_MAX_BYTES];
464 let (rd_msrs, wr_msrs) = msr_list;
465
466 for index in rd_msrs {
467 // currently we only consider the MSR lower than
468 // KVM_MSR_FILTER_RANGE_MAX_BITS
469 if index >= (KVM_MSR_FILTER_RANGE_MAX_BITS as u32) {
470 continue;
471 }
472 rd_nmsrs += 1;
473 rd_msr_bitmap[(index / 8) as usize] &= !(1 << (index & 0x7));
474 }
475 for index in wr_msrs {
476 // currently we only consider the MSR lower than
477 // KVM_MSR_FILTER_RANGE_MAX_BITS
478 if index >= (KVM_MSR_FILTER_RANGE_MAX_BITS as u32) {
479 continue;
480 }
481 wr_nmsrs += 1;
482 wr_msr_bitmap[(index / 8) as usize] &= !(1 << (index & 0x7));
483 }
484
485 let mut msr_filter = kvm_msr_filter {
486 flags: KVM_MSR_FILTER_DEFAULT_ALLOW,
487 ..Default::default()
488 };
489
490 let mut count = 0;
491 if rd_nmsrs > 0 {
492 msr_filter.ranges[count].flags = KVM_MSR_FILTER_READ;
493 msr_filter.ranges[count].nmsrs = KVM_MSR_FILTER_RANGE_MAX_BITS as u32;
494 msr_filter.ranges[count].base = 0x0;
495 msr_filter.ranges[count].bitmap = rd_msr_bitmap.as_mut_ptr();
496 count += 1;
497 }
498 if wr_nmsrs > 0 {
499 msr_filter.ranges[count].flags = KVM_MSR_FILTER_WRITE;
500 msr_filter.ranges[count].nmsrs = KVM_MSR_FILTER_RANGE_MAX_BITS as u32;
501 msr_filter.ranges[count].base = 0x0;
502 msr_filter.ranges[count].bitmap = wr_msr_bitmap.as_mut_ptr();
503 count += 1;
504 }
505
506 let mut ret = 0;
507 if count > 0 {
508 // SAFETY:
509 // Safe because we know that our file is a VM fd, we know that the
510 // kernel will only read correct amount of memory from our pointer, and
511 // we verify the return result.
512 ret = unsafe { ioctl_with_ref(self, KVM_X86_SET_MSR_FILTER(), &msr_filter) };
513 }
514
515 if ret < 0 {
516 errno_result()
517 } else {
518 Ok(())
519 }
520 }
521
522 /// Enable support for split-irqchip.
enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()>523 pub fn enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()> {
524 let mut cap = kvm_enable_cap {
525 cap: KVM_CAP_SPLIT_IRQCHIP,
526 ..Default::default()
527 };
528 cap.args[0] = ioapic_pins as u64;
529 // SAFETY:
530 // safe becuase we allocated the struct and we know the kernel will read
531 // exactly the size of the struct
532 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP(), &cap) };
533 if ret < 0 {
534 errno_result()
535 } else {
536 Ok(())
537 }
538 }
539 }
540
541 impl VmX86_64 for KvmVm {
get_hypervisor(&self) -> &dyn HypervisorX86_64542 fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
543 &self.kvm
544 }
545
create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>>546 fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
547 // create_vcpu is declared separately in VmAArch64 and VmX86, so it can return VcpuAArch64
548 // or VcpuX86. But both use the same implementation in KvmVm::create_vcpu.
549 Ok(Box::new(KvmVm::create_kvm_vcpu(self, id)?))
550 }
551
552 /// Sets the address of the three-page region in the VM's address space.
553 ///
554 /// See the documentation on the KVM_SET_TSS_ADDR ioctl.
set_tss_addr(&self, addr: GuestAddress) -> Result<()>555 fn set_tss_addr(&self, addr: GuestAddress) -> Result<()> {
556 // SAFETY:
557 // Safe because we know that our file is a VM fd and we verify the return result.
558 let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR(), addr.offset()) };
559 if ret == 0 {
560 Ok(())
561 } else {
562 errno_result()
563 }
564 }
565
566 /// Sets the address of a one-page region in the VM's address space.
567 ///
568 /// See the documentation on the KVM_SET_IDENTITY_MAP_ADDR ioctl.
set_identity_map_addr(&self, addr: GuestAddress) -> Result<()>569 fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> {
570 // SAFETY:
571 // Safe because we know that our file is a VM fd and we verify the return result.
572 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR(), &addr.offset()) };
573 if ret == 0 {
574 Ok(())
575 } else {
576 errno_result()
577 }
578 }
579 }
580
581 impl KvmVcpu {
582 /// Handles a `KVM_EXIT_SYSTEM_EVENT` with event type `KVM_SYSTEM_EVENT_RESET` with the given
583 /// event flags and returns the appropriate `VcpuExit` value for the run loop to handle.
system_event_reset(&self, _event_flags: u64) -> Result<VcpuExit>584 pub fn system_event_reset(&self, _event_flags: u64) -> Result<VcpuExit> {
585 Ok(VcpuExit::SystemEventReset)
586 }
587
588 /// Gets the Xsave size by checking the extension KVM_CAP_XSAVE2.
589 ///
590 /// Size should always be >=0. If size is negative, an error occurred.
591 /// If size <= 4096, XSAVE2 is not supported by the CPU or the kernel. KVM_XSAVE_MAX_SIZE is
592 /// returned (4096).
593 /// Otherwise, the size will be returned.
xsave_size(&self) -> Result<usize>594 fn xsave_size(&self) -> Result<usize> {
595 let size = {
596 // SAFETY:
597 // Safe because we know that our file is a valid VM fd
598 unsafe { ioctl_with_val(&self.vm, KVM_CHECK_EXTENSION(), KVM_CAP_XSAVE2 as u64) }
599 };
600 if size < 0 {
601 return errno_result();
602 }
603 // Safe to unwrap since we already tested for negative values
604 let size: usize = size.try_into().unwrap();
605 Ok(size.max(KVM_XSAVE_MAX_SIZE))
606 }
607 }
608
609 impl VcpuX86_64 for KvmVcpu {
610 #[allow(clippy::cast_ptr_alignment)]
set_interrupt_window_requested(&self, requested: bool)611 fn set_interrupt_window_requested(&self, requested: bool) {
612 // SAFETY:
613 // Safe because we know we mapped enough memory to hold the kvm_run struct because the
614 // kernel told us how large it was. The pointer is page aligned so casting to a different
615 // type is well defined, hence the clippy allow attribute.
616 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
617 run.request_interrupt_window = requested.into();
618 }
619
620 #[allow(clippy::cast_ptr_alignment)]
ready_for_interrupt(&self) -> bool621 fn ready_for_interrupt(&self) -> bool {
622 // SAFETY:
623 // Safe because we know we mapped enough memory to hold the kvm_run struct because the
624 // kernel told us how large it was. The pointer is page aligned so casting to a different
625 // type is well defined, hence the clippy allow attribute.
626 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
627 run.ready_for_interrupt_injection != 0 && run.if_flag != 0
628 }
629
630 /// Use the KVM_INTERRUPT ioctl to inject the specified interrupt vector.
631 ///
632 /// While this ioctl exists on PPC and MIPS as well as x86, the semantics are different and
633 /// ChromeOS doesn't support PPC or MIPS.
interrupt(&self, irq: u32) -> Result<()>634 fn interrupt(&self, irq: u32) -> Result<()> {
635 let interrupt = kvm_interrupt { irq };
636 // SAFETY:
637 // safe becuase we allocated the struct and we know the kernel will read
638 // exactly the size of the struct
639 let ret = unsafe { ioctl_with_ref(self, KVM_INTERRUPT(), &interrupt) };
640 if ret == 0 {
641 Ok(())
642 } else {
643 errno_result()
644 }
645 }
646
inject_nmi(&self) -> Result<()>647 fn inject_nmi(&self) -> Result<()> {
648 // SAFETY:
649 // Safe because we know that our file is a VCPU fd.
650 let ret = unsafe { ioctl(self, KVM_NMI()) };
651 if ret == 0 {
652 Ok(())
653 } else {
654 errno_result()
655 }
656 }
657
get_regs(&self) -> Result<Regs>658 fn get_regs(&self) -> Result<Regs> {
659 let mut regs: kvm_regs = Default::default();
660 let ret = {
661 // SAFETY:
662 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
663 // the correct amount of memory from our pointer, and we verify the return
664 // result.
665 unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS(), &mut regs) }
666 };
667 if ret == 0 {
668 Ok(Regs::from(®s))
669 } else {
670 errno_result()
671 }
672 }
673
set_regs(&self, regs: &Regs) -> Result<()>674 fn set_regs(&self, regs: &Regs) -> Result<()> {
675 let regs = kvm_regs::from(regs);
676 let ret = {
677 // SAFETY:
678 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
679 // the correct amount of memory from our pointer, and we verify the return
680 // result.
681 unsafe { ioctl_with_ref(self, KVM_SET_REGS(), ®s) }
682 };
683 if ret == 0 {
684 Ok(())
685 } else {
686 errno_result()
687 }
688 }
689
get_sregs(&self) -> Result<Sregs>690 fn get_sregs(&self) -> Result<Sregs> {
691 let mut regs: kvm_sregs = Default::default();
692 let ret = {
693 // SAFETY:
694 // Safe because we know that our file is a VCPU fd, we know the kernel will only write
695 // the correct amount of memory to our pointer, and we verify the return
696 // result.
697 unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) }
698 };
699 if ret == 0 {
700 Ok(Sregs::from(®s))
701 } else {
702 errno_result()
703 }
704 }
705
set_sregs(&self, sregs: &Sregs) -> Result<()>706 fn set_sregs(&self, sregs: &Sregs) -> Result<()> {
707 // Get the current `kvm_sregs` so we can use its `apic_base` and `interrupt_bitmap`, which
708 // are not present in `Sregs`.
709 let mut kvm_sregs: kvm_sregs = Default::default();
710 // SAFETY:
711 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
712 // correct amount of memory to our pointer, and we verify the return result.
713 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut kvm_sregs) };
714 if ret != 0 {
715 return errno_result();
716 }
717
718 kvm_sregs.cs = kvm_segment::from(&sregs.cs);
719 kvm_sregs.ds = kvm_segment::from(&sregs.ds);
720 kvm_sregs.es = kvm_segment::from(&sregs.es);
721 kvm_sregs.fs = kvm_segment::from(&sregs.fs);
722 kvm_sregs.gs = kvm_segment::from(&sregs.gs);
723 kvm_sregs.ss = kvm_segment::from(&sregs.ss);
724 kvm_sregs.tr = kvm_segment::from(&sregs.tr);
725 kvm_sregs.ldt = kvm_segment::from(&sregs.ldt);
726 kvm_sregs.gdt = kvm_dtable::from(&sregs.gdt);
727 kvm_sregs.idt = kvm_dtable::from(&sregs.idt);
728 kvm_sregs.cr0 = sregs.cr0;
729 kvm_sregs.cr2 = sregs.cr2;
730 kvm_sregs.cr3 = sregs.cr3;
731 kvm_sregs.cr4 = sregs.cr4;
732 kvm_sregs.cr8 = sregs.cr8;
733 kvm_sregs.efer = sregs.efer;
734
735 // SAFETY:
736 // Safe because we know that our file is a VCPU fd, we know the kernel will only read the
737 // correct amount of memory from our pointer, and we verify the return result.
738 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS(), &kvm_sregs) };
739 if ret == 0 {
740 Ok(())
741 } else {
742 errno_result()
743 }
744 }
745
get_fpu(&self) -> Result<Fpu>746 fn get_fpu(&self) -> Result<Fpu> {
747 let mut fpu: kvm_fpu = Default::default();
748 // SAFETY:
749 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
750 // correct amount of memory to our pointer, and we verify the return result.
751 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU(), &mut fpu) };
752 if ret == 0 {
753 Ok(Fpu::from(&fpu))
754 } else {
755 errno_result()
756 }
757 }
758
set_fpu(&self, fpu: &Fpu) -> Result<()>759 fn set_fpu(&self, fpu: &Fpu) -> Result<()> {
760 let fpu = kvm_fpu::from(fpu);
761 let ret = {
762 // SAFETY:
763 // Here we trust the kernel not to read past the end of the kvm_fpu struct.
764 unsafe { ioctl_with_ref(self, KVM_SET_FPU(), &fpu) }
765 };
766 if ret == 0 {
767 Ok(())
768 } else {
769 errno_result()
770 }
771 }
772
773 /// If the VM reports using XSave2, the function will call XSave2.
get_xsave(&self) -> Result<Xsave>774 fn get_xsave(&self) -> Result<Xsave> {
775 let size = self.xsave_size()?;
776 let ioctl_nr = if size > KVM_XSAVE_MAX_SIZE {
777 KVM_GET_XSAVE2()
778 } else {
779 KVM_GET_XSAVE()
780 };
781 let mut xsave = Xsave::new(size);
782
783 // SAFETY:
784 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
785 // correct amount of memory to our pointer, and we verify the return result.
786 let ret = unsafe { ioctl_with_mut_ptr(self, ioctl_nr, xsave.as_mut_ptr()) };
787 if ret == 0 {
788 Ok(xsave)
789 } else {
790 errno_result()
791 }
792 }
793
set_xsave(&self, xsave: &Xsave) -> Result<()>794 fn set_xsave(&self, xsave: &Xsave) -> Result<()> {
795 let size = self.xsave_size()?;
796 // Ensure xsave is the same size as used in get_xsave.
797 // Return err if sizes don't match => not the same extensions are enabled for CPU.
798 if xsave.len() != size {
799 return Err(Error::new(EIO));
800 }
801
802 // SAFETY:
803 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
804 // correct amount of memory to our pointer, and we verify the return result.
805 // Because of the len check above, and because the layout of `struct kvm_xsave` is
806 // compatible with a slice of `u32`, we can pass the pointer to `xsave` directly.
807 let ret = unsafe { ioctl_with_ptr(self, KVM_SET_XSAVE(), xsave.as_ptr()) };
808 if ret == 0 {
809 Ok(())
810 } else {
811 errno_result()
812 }
813 }
814
get_interrupt_state(&self) -> Result<serde_json::Value>815 fn get_interrupt_state(&self) -> Result<serde_json::Value> {
816 let mut vcpu_evts: kvm_vcpu_events = Default::default();
817 let ret = {
818 // SAFETY:
819 // Safe because we know that our file is a VCPU fd, we know the kernel will only write
820 // the correct amount of memory to our pointer, and we verify the return
821 // result.
822 unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS(), &mut vcpu_evts) }
823 };
824 if ret == 0 {
825 Ok(
826 serde_json::to_value(VcpuEvents::from(&vcpu_evts)).map_err(|e| {
827 error!("failed to serialize vcpu_events: {:?}", e);
828 Error::new(EIO)
829 })?,
830 )
831 } else {
832 errno_result()
833 }
834 }
835
set_interrupt_state(&self, data: serde_json::Value) -> Result<()>836 fn set_interrupt_state(&self, data: serde_json::Value) -> Result<()> {
837 let vcpu_events =
838 kvm_vcpu_events::from(&serde_json::from_value::<VcpuEvents>(data).map_err(|e| {
839 error!("failed to deserialize vcpu_events: {:?}", e);
840 Error::new(EIO)
841 })?);
842 let ret = {
843 // SAFETY:
844 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
845 // the correct amount of memory from our pointer, and we verify the return
846 // result.
847 unsafe { ioctl_with_ref(self, KVM_SET_VCPU_EVENTS(), &vcpu_events) }
848 };
849 if ret == 0 {
850 Ok(())
851 } else {
852 errno_result()
853 }
854 }
855
get_debugregs(&self) -> Result<DebugRegs>856 fn get_debugregs(&self) -> Result<DebugRegs> {
857 let mut regs: kvm_debugregs = Default::default();
858 // SAFETY:
859 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
860 // correct amount of memory to our pointer, and we verify the return result.
861 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS(), &mut regs) };
862 if ret == 0 {
863 Ok(DebugRegs::from(®s))
864 } else {
865 errno_result()
866 }
867 }
868
set_debugregs(&self, dregs: &DebugRegs) -> Result<()>869 fn set_debugregs(&self, dregs: &DebugRegs) -> Result<()> {
870 let dregs = kvm_debugregs::from(dregs);
871 let ret = {
872 // SAFETY:
873 // Here we trust the kernel not to read past the end of the kvm_debugregs struct.
874 unsafe { ioctl_with_ref(self, KVM_SET_DEBUGREGS(), &dregs) }
875 };
876 if ret == 0 {
877 Ok(())
878 } else {
879 errno_result()
880 }
881 }
882
get_xcrs(&self) -> Result<BTreeMap<u32, u64>>883 fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
884 let mut regs: kvm_xcrs = Default::default();
885 // SAFETY:
886 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
887 // correct amount of memory to our pointer, and we verify the return result.
888 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS(), &mut regs) };
889 if ret < 0 {
890 return errno_result();
891 }
892
893 Ok(regs
894 .xcrs
895 .iter()
896 .take(regs.nr_xcrs as usize)
897 .map(|kvm_xcr| (kvm_xcr.xcr, kvm_xcr.value))
898 .collect())
899 }
900
set_xcr(&self, xcr_index: u32, value: u64) -> Result<()>901 fn set_xcr(&self, xcr_index: u32, value: u64) -> Result<()> {
902 let mut kvm_xcr = kvm_xcrs {
903 nr_xcrs: 1,
904 ..Default::default()
905 };
906 kvm_xcr.xcrs[0].xcr = xcr_index;
907 kvm_xcr.xcrs[0].value = value;
908
909 let ret = {
910 // SAFETY:
911 // Here we trust the kernel not to read past the end of the kvm_xcrs struct.
912 unsafe { ioctl_with_ref(self, KVM_SET_XCRS(), &kvm_xcr) }
913 };
914 if ret == 0 {
915 Ok(())
916 } else {
917 errno_result()
918 }
919 }
920
get_msr(&self, msr_index: u32) -> Result<u64>921 fn get_msr(&self, msr_index: u32) -> Result<u64> {
922 let mut msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(1);
923 msrs[0].nmsrs = 1;
924
925 // SAFETY: We initialize a one-element array using `vec_with_array_field` above.
926 unsafe {
927 let msr_entries = msrs[0].entries.as_mut_slice(1);
928 msr_entries[0].index = msr_index;
929 }
930
931 let ret = {
932 // SAFETY:
933 // Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
934 unsafe { ioctl_with_ref(self, KVM_GET_MSRS(), &msrs[0]) }
935 };
936 if ret < 0 {
937 return errno_result();
938 }
939
940 // KVM_GET_MSRS returns the number of msr entries written.
941 if ret != 1 {
942 return Err(base::Error::new(libc::ENOENT));
943 }
944
945 // SAFETY:
946 // Safe because we trust the kernel to return the correct array length on success.
947 let value = unsafe {
948 let msr_entries = msrs[0].entries.as_slice(1);
949 msr_entries[0].data
950 };
951
952 Ok(value)
953 }
954
get_all_msrs(&self) -> Result<BTreeMap<u32, u64>>955 fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
956 let msr_index_list = self.kvm.get_msr_index_list()?;
957 let mut kvm_msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(msr_index_list.len());
958 kvm_msrs[0].nmsrs = msr_index_list.len() as u32;
959 // SAFETY:
960 // Mapping the unsized array to a slice is unsafe because the length isn't known.
961 // Providing the length used to create the struct guarantees the entire slice is valid.
962 unsafe {
963 kvm_msrs[0]
964 .entries
965 .as_mut_slice(msr_index_list.len())
966 .iter_mut()
967 .zip(msr_index_list.iter())
968 .for_each(|(msr_entry, msr_index)| msr_entry.index = *msr_index);
969 }
970
971 let ret = {
972 // SAFETY:
973 // Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
974 unsafe { ioctl_with_ref(self, KVM_GET_MSRS(), &kvm_msrs[0]) }
975 };
976 if ret < 0 {
977 return errno_result();
978 }
979
980 // KVM_GET_MSRS returns the number of msr entries written.
981 let count = ret as usize;
982 if count != msr_index_list.len() {
983 error!(
984 "failed to get all MSRs: requested {}, got {}",
985 msr_index_list.len(),
986 count,
987 );
988 return Err(base::Error::new(libc::EPERM));
989 }
990
991 // SAFETY:
992 // Safe because we trust the kernel to return the correct array length on success.
993 let msrs = unsafe {
994 BTreeMap::from_iter(
995 kvm_msrs[0]
996 .entries
997 .as_slice(count)
998 .iter()
999 .map(|kvm_msr| (kvm_msr.index, kvm_msr.data)),
1000 )
1001 };
1002
1003 Ok(msrs)
1004 }
1005
set_msr(&self, msr_index: u32, value: u64) -> Result<()>1006 fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
1007 let mut kvm_msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(1);
1008 kvm_msrs[0].nmsrs = 1;
1009
1010 // SAFETY: We initialize a one-element array using `vec_with_array_field` above.
1011 unsafe {
1012 let msr_entries = kvm_msrs[0].entries.as_mut_slice(1);
1013 msr_entries[0].index = msr_index;
1014 msr_entries[0].data = value;
1015 }
1016
1017 let ret = {
1018 // SAFETY:
1019 // Here we trust the kernel not to read past the end of the kvm_msrs struct.
1020 unsafe { ioctl_with_ref(self, KVM_SET_MSRS(), &kvm_msrs[0]) }
1021 };
1022 if ret < 0 {
1023 return errno_result();
1024 }
1025
1026 // KVM_SET_MSRS returns the number of msr entries written.
1027 if ret != 1 {
1028 error!("failed to set MSR {:#x} to {:#x}", msr_index, value);
1029 return Err(base::Error::new(libc::EPERM));
1030 }
1031
1032 Ok(())
1033 }
1034
set_cpuid(&self, cpuid: &CpuId) -> Result<()>1035 fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> {
1036 let cpuid = KvmCpuId::from(cpuid);
1037 let ret = {
1038 // SAFETY:
1039 // Here we trust the kernel not to read past the end of the kvm_msrs struct.
1040 unsafe { ioctl_with_ptr(self, KVM_SET_CPUID2(), cpuid.as_ptr()) }
1041 };
1042 if ret == 0 {
1043 Ok(())
1044 } else {
1045 errno_result()
1046 }
1047 }
1048
get_hyperv_cpuid(&self) -> Result<CpuId>1049 fn get_hyperv_cpuid(&self) -> Result<CpuId> {
1050 const KVM_MAX_ENTRIES: usize = 256;
1051 get_cpuid_with_initial_capacity(self, KVM_GET_SUPPORTED_HV_CPUID(), KVM_MAX_ENTRIES)
1052 }
1053
set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()>1054 fn set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()> {
1055 use kvm_sys::*;
1056 let mut dbg: kvm_guest_debug = Default::default();
1057
1058 if addrs.len() > 4 {
1059 error!(
1060 "Support 4 breakpoints at most but {} addresses are passed",
1061 addrs.len()
1062 );
1063 return Err(base::Error::new(libc::EINVAL));
1064 }
1065
1066 dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1067 if enable_singlestep {
1068 dbg.control |= KVM_GUESTDBG_SINGLESTEP;
1069 }
1070
1071 // Set bits 9 and 10.
1072 // bit 9: GE (global exact breakpoint enable) flag.
1073 // bit 10: always 1.
1074 dbg.arch.debugreg[7] = 0x0600;
1075
1076 for (i, addr) in addrs.iter().enumerate() {
1077 dbg.arch.debugreg[i] = addr.0;
1078 // Set global breakpoint enable flag
1079 dbg.arch.debugreg[7] |= 2 << (i * 2);
1080 }
1081
1082 let ret = {
1083 // SAFETY:
1084 // Here we trust the kernel not to read past the end of the kvm_guest_debug struct.
1085 unsafe { ioctl_with_ref(self, KVM_SET_GUEST_DEBUG(), &dbg) }
1086 };
1087 if ret == 0 {
1088 Ok(())
1089 } else {
1090 errno_result()
1091 }
1092 }
1093
1094 /// KVM does not support the VcpuExit::Cpuid exit type.
handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()>1095 fn handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()> {
1096 Err(Error::new(ENXIO))
1097 }
1098
restore_timekeeping(&self, _host_tsc_reference_moment: u64, _tsc_offset: u64) -> Result<()>1099 fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, _tsc_offset: u64) -> Result<()> {
1100 // On KVM, the TSC MSR is restored as part of SET_MSRS, and no further action is required.
1101 Ok(())
1102 }
1103 }
1104
1105 impl KvmVcpu {
1106 /// X86 specific call to get the state of the "Local Advanced Programmable Interrupt
1107 /// Controller".
1108 ///
1109 /// See the documentation for KVM_GET_LAPIC.
get_lapic(&self) -> Result<kvm_lapic_state>1110 pub fn get_lapic(&self) -> Result<kvm_lapic_state> {
1111 let mut klapic: kvm_lapic_state = Default::default();
1112
1113 let ret = {
1114 // SAFETY:
1115 // The ioctl is unsafe unless you trust the kernel not to write past the end of the
1116 // local_apic struct.
1117 unsafe { ioctl_with_mut_ref(self, KVM_GET_LAPIC(), &mut klapic) }
1118 };
1119 if ret < 0 {
1120 return errno_result();
1121 }
1122 Ok(klapic)
1123 }
1124
1125 /// X86 specific call to set the state of the "Local Advanced Programmable Interrupt
1126 /// Controller".
1127 ///
1128 /// See the documentation for KVM_SET_LAPIC.
set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()>1129 pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> {
1130 let ret = {
1131 // SAFETY:
1132 // The ioctl is safe because the kernel will only read from the klapic struct.
1133 unsafe { ioctl_with_ref(self, KVM_SET_LAPIC(), klapic) }
1134 };
1135 if ret < 0 {
1136 return errno_result();
1137 }
1138 Ok(())
1139 }
1140
1141 /// X86 specific call to get the value of the APIC_BASE MSR.
1142 ///
1143 /// See the documentation for The kvm_run structure, and for KVM_GET_LAPIC.
get_apic_base(&self) -> Result<u64>1144 pub fn get_apic_base(&self) -> Result<u64> {
1145 self.get_msr(MSR_IA32_APICBASE)
1146 }
1147
1148 /// X86 specific call to set the value of the APIC_BASE MSR.
1149 ///
1150 /// See the documentation for The kvm_run structure, and for KVM_GET_LAPIC.
set_apic_base(&self, apic_base: u64) -> Result<()>1151 pub fn set_apic_base(&self, apic_base: u64) -> Result<()> {
1152 self.set_msr(MSR_IA32_APICBASE, apic_base)
1153 }
1154
1155 /// Call to get pending interrupts acknowledged by the APIC but not yet injected into the CPU.
1156 ///
1157 /// See the documentation for KVM_GET_SREGS.
get_interrupt_bitmap(&self) -> Result<[u64; 4usize]>1158 pub fn get_interrupt_bitmap(&self) -> Result<[u64; 4usize]> {
1159 let mut regs: kvm_sregs = Default::default();
1160 // SAFETY:
1161 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1162 // correct amount of memory to our pointer, and we verify the return result.
1163 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) };
1164 if ret >= 0 {
1165 Ok(regs.interrupt_bitmap)
1166 } else {
1167 errno_result()
1168 }
1169 }
1170
1171 /// Call to set pending interrupts acknowledged by the APIC but not yet injected into the CPU.
1172 ///
1173 /// See the documentation for KVM_GET_SREGS.
set_interrupt_bitmap(&self, interrupt_bitmap: [u64; 4usize]) -> Result<()>1174 pub fn set_interrupt_bitmap(&self, interrupt_bitmap: [u64; 4usize]) -> Result<()> {
1175 // Potentially racy code. Vcpu registers are set in a separate thread and this could result
1176 // in Sregs being modified from the Vcpu initialization thread and the Irq restoring
1177 // thread.
1178 let mut regs: kvm_sregs = Default::default();
1179 // SAFETY:
1180 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1181 // correct amount of memory to our pointer, and we verify the return result.
1182 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) };
1183 if ret >= 0 {
1184 regs.interrupt_bitmap = interrupt_bitmap;
1185 // SAFETY:
1186 // Safe because we know that our file is a VCPU fd, we know the kernel will only read
1187 // the correct amount of memory from our pointer, and we verify the return
1188 // result.
1189 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS(), ®s) };
1190 if ret >= 0 {
1191 Ok(())
1192 } else {
1193 errno_result()
1194 }
1195 } else {
1196 errno_result()
1197 }
1198 }
1199 }
1200
1201 impl<'a> From<&'a KvmCpuId> for CpuId {
from(kvm_cpuid: &'a KvmCpuId) -> CpuId1202 fn from(kvm_cpuid: &'a KvmCpuId) -> CpuId {
1203 let kvm_entries = kvm_cpuid.entries_slice();
1204 let mut cpu_id_entries = Vec::with_capacity(kvm_entries.len());
1205
1206 for entry in kvm_entries {
1207 let cpu_id_entry = CpuIdEntry {
1208 function: entry.function,
1209 index: entry.index,
1210 flags: entry.flags,
1211 cpuid: CpuidResult {
1212 eax: entry.eax,
1213 ebx: entry.ebx,
1214 ecx: entry.ecx,
1215 edx: entry.edx,
1216 },
1217 };
1218 cpu_id_entries.push(cpu_id_entry)
1219 }
1220 CpuId { cpu_id_entries }
1221 }
1222 }
1223
1224 impl From<&CpuId> for KvmCpuId {
from(cpuid: &CpuId) -> KvmCpuId1225 fn from(cpuid: &CpuId) -> KvmCpuId {
1226 let mut kvm = KvmCpuId::new(cpuid.cpu_id_entries.len());
1227 let entries = kvm.mut_entries_slice();
1228 for (i, &e) in cpuid.cpu_id_entries.iter().enumerate() {
1229 entries[i] = kvm_cpuid_entry2 {
1230 function: e.function,
1231 index: e.index,
1232 flags: e.flags,
1233 eax: e.cpuid.eax,
1234 ebx: e.cpuid.ebx,
1235 ecx: e.cpuid.ecx,
1236 edx: e.cpuid.edx,
1237 ..Default::default()
1238 };
1239 }
1240 kvm
1241 }
1242 }
1243
1244 impl From<&ClockState> for kvm_clock_data {
from(state: &ClockState) -> Self1245 fn from(state: &ClockState) -> Self {
1246 kvm_clock_data {
1247 clock: state.clock,
1248 ..Default::default()
1249 }
1250 }
1251 }
1252
1253 impl From<&kvm_clock_data> for ClockState {
from(clock_data: &kvm_clock_data) -> Self1254 fn from(clock_data: &kvm_clock_data) -> Self {
1255 ClockState {
1256 clock: clock_data.clock,
1257 }
1258 }
1259 }
1260
1261 impl From<&kvm_pic_state> for PicState {
from(item: &kvm_pic_state) -> Self1262 fn from(item: &kvm_pic_state) -> Self {
1263 PicState {
1264 last_irr: item.last_irr,
1265 irr: item.irr,
1266 imr: item.imr,
1267 isr: item.isr,
1268 priority_add: item.priority_add,
1269 irq_base: item.irq_base,
1270 read_reg_select: item.read_reg_select != 0,
1271 poll: item.poll != 0,
1272 special_mask: item.special_mask != 0,
1273 init_state: item.init_state.into(),
1274 auto_eoi: item.auto_eoi != 0,
1275 rotate_on_auto_eoi: item.rotate_on_auto_eoi != 0,
1276 special_fully_nested_mode: item.special_fully_nested_mode != 0,
1277 use_4_byte_icw: item.init4 != 0,
1278 elcr: item.elcr,
1279 elcr_mask: item.elcr_mask,
1280 }
1281 }
1282 }
1283
1284 impl From<&PicState> for kvm_pic_state {
from(item: &PicState) -> Self1285 fn from(item: &PicState) -> Self {
1286 kvm_pic_state {
1287 last_irr: item.last_irr,
1288 irr: item.irr,
1289 imr: item.imr,
1290 isr: item.isr,
1291 priority_add: item.priority_add,
1292 irq_base: item.irq_base,
1293 read_reg_select: item.read_reg_select as u8,
1294 poll: item.poll as u8,
1295 special_mask: item.special_mask as u8,
1296 init_state: item.init_state as u8,
1297 auto_eoi: item.auto_eoi as u8,
1298 rotate_on_auto_eoi: item.rotate_on_auto_eoi as u8,
1299 special_fully_nested_mode: item.special_fully_nested_mode as u8,
1300 init4: item.use_4_byte_icw as u8,
1301 elcr: item.elcr,
1302 elcr_mask: item.elcr_mask,
1303 }
1304 }
1305 }
1306
1307 impl From<&kvm_ioapic_state> for IoapicState {
from(item: &kvm_ioapic_state) -> Self1308 fn from(item: &kvm_ioapic_state) -> Self {
1309 let mut state = IoapicState {
1310 base_address: item.base_address,
1311 ioregsel: item.ioregsel as u8,
1312 ioapicid: item.id,
1313 current_interrupt_level_bitmap: item.irr,
1314 redirect_table: [IoapicRedirectionTableEntry::default(); NUM_IOAPIC_PINS],
1315 };
1316 for (in_state, out_state) in item.redirtbl.iter().zip(state.redirect_table.iter_mut()) {
1317 *out_state = in_state.into();
1318 }
1319 state
1320 }
1321 }
1322
1323 impl From<&IoapicRedirectionTableEntry> for kvm_ioapic_state__bindgen_ty_1 {
from(item: &IoapicRedirectionTableEntry) -> Self1324 fn from(item: &IoapicRedirectionTableEntry) -> Self {
1325 kvm_ioapic_state__bindgen_ty_1 {
1326 // IoapicRedirectionTableEntry layout matches the exact bit layout of a hardware
1327 // ioapic redirection table entry, so we can simply do a 64-bit copy
1328 bits: item.get(0, 64),
1329 }
1330 }
1331 }
1332
1333 impl From<&kvm_ioapic_state__bindgen_ty_1> for IoapicRedirectionTableEntry {
from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self1334 fn from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self {
1335 let mut entry = IoapicRedirectionTableEntry::default();
1336 // SAFETY:
1337 // Safe because the 64-bit layout of the IoapicRedirectionTableEntry matches the kvm_sys
1338 // table entry layout
1339 entry.set(0, 64, unsafe { item.bits });
1340 entry
1341 }
1342 }
1343
1344 impl From<&IoapicState> for kvm_ioapic_state {
from(item: &IoapicState) -> Self1345 fn from(item: &IoapicState) -> Self {
1346 let mut state = kvm_ioapic_state {
1347 base_address: item.base_address,
1348 ioregsel: item.ioregsel as u32,
1349 id: item.ioapicid,
1350 irr: item.current_interrupt_level_bitmap,
1351 ..Default::default()
1352 };
1353 for (in_state, out_state) in item.redirect_table.iter().zip(state.redirtbl.iter_mut()) {
1354 *out_state = in_state.into();
1355 }
1356 state
1357 }
1358 }
1359
1360 impl From<&LapicState> for kvm_lapic_state {
from(item: &LapicState) -> Self1361 fn from(item: &LapicState) -> Self {
1362 let mut state = kvm_lapic_state::default();
1363 // There are 64 lapic registers
1364 for (reg, value) in item.regs.iter().enumerate() {
1365 // Each lapic register is 16 bytes, but only the first 4 are used
1366 let reg_offset = 16 * reg;
1367 let regs_slice = &mut state.regs[reg_offset..reg_offset + 4];
1368
1369 // to_le_bytes() produces an array of u8, not i8(c_char), so we can't directly use
1370 // copy_from_slice().
1371 for (i, v) in value.to_le_bytes().iter().enumerate() {
1372 regs_slice[i] = *v as i8;
1373 }
1374 }
1375 state
1376 }
1377 }
1378
1379 impl From<&kvm_lapic_state> for LapicState {
from(item: &kvm_lapic_state) -> Self1380 fn from(item: &kvm_lapic_state) -> Self {
1381 let mut state = LapicState { regs: [0; 64] };
1382 // There are 64 lapic registers
1383 for reg in 0..64 {
1384 // Each lapic register is 16 bytes, but only the first 4 are used
1385 let reg_offset = 16 * reg;
1386
1387 // from_le_bytes() only works on arrays of u8, not i8(c_char).
1388 let reg_slice = &item.regs[reg_offset..reg_offset + 4];
1389 let mut bytes = [0u8; 4];
1390 for i in 0..4 {
1391 bytes[i] = reg_slice[i] as u8;
1392 }
1393 state.regs[reg] = u32::from_le_bytes(bytes);
1394 }
1395 state
1396 }
1397 }
1398
1399 impl From<&PitState> for kvm_pit_state2 {
from(item: &PitState) -> Self1400 fn from(item: &PitState) -> Self {
1401 kvm_pit_state2 {
1402 channels: [
1403 kvm_pit_channel_state::from(&item.channels[0]),
1404 kvm_pit_channel_state::from(&item.channels[1]),
1405 kvm_pit_channel_state::from(&item.channels[2]),
1406 ],
1407 flags: item.flags,
1408 ..Default::default()
1409 }
1410 }
1411 }
1412
1413 impl From<&kvm_pit_state2> for PitState {
from(item: &kvm_pit_state2) -> Self1414 fn from(item: &kvm_pit_state2) -> Self {
1415 PitState {
1416 channels: [
1417 PitChannelState::from(&item.channels[0]),
1418 PitChannelState::from(&item.channels[1]),
1419 PitChannelState::from(&item.channels[2]),
1420 ],
1421 flags: item.flags,
1422 }
1423 }
1424 }
1425
1426 impl From<&PitChannelState> for kvm_pit_channel_state {
from(item: &PitChannelState) -> Self1427 fn from(item: &PitChannelState) -> Self {
1428 kvm_pit_channel_state {
1429 count: item.count,
1430 latched_count: item.latched_count,
1431 count_latched: item.count_latched as u8,
1432 status_latched: item.status_latched as u8,
1433 status: item.status,
1434 read_state: item.read_state as u8,
1435 write_state: item.write_state as u8,
1436 // kvm's write_latch only stores the low byte of the reload value
1437 write_latch: item.reload_value as u8,
1438 rw_mode: item.rw_mode as u8,
1439 mode: item.mode,
1440 bcd: item.bcd as u8,
1441 gate: item.gate as u8,
1442 count_load_time: item.count_load_time as i64,
1443 }
1444 }
1445 }
1446
1447 impl From<&kvm_pit_channel_state> for PitChannelState {
from(item: &kvm_pit_channel_state) -> Self1448 fn from(item: &kvm_pit_channel_state) -> Self {
1449 PitChannelState {
1450 count: item.count,
1451 latched_count: item.latched_count,
1452 count_latched: item.count_latched.into(),
1453 status_latched: item.status_latched != 0,
1454 status: item.status,
1455 read_state: item.read_state.into(),
1456 write_state: item.write_state.into(),
1457 // kvm's write_latch only stores the low byte of the reload value
1458 reload_value: item.write_latch as u16,
1459 rw_mode: item.rw_mode.into(),
1460 mode: item.mode,
1461 bcd: item.bcd != 0,
1462 gate: item.gate != 0,
1463 count_load_time: item.count_load_time as u64,
1464 }
1465 }
1466 }
1467
1468 // This function translates an IrqSrouceChip to the kvm u32 equivalent. It has a different
1469 // implementation between x86_64 and aarch64 because the irqchip KVM constants are not defined on
1470 // all architectures.
chip_to_kvm_chip(chip: IrqSourceChip) -> u321471 pub(super) fn chip_to_kvm_chip(chip: IrqSourceChip) -> u32 {
1472 match chip {
1473 IrqSourceChip::PicPrimary => KVM_IRQCHIP_PIC_MASTER,
1474 IrqSourceChip::PicSecondary => KVM_IRQCHIP_PIC_SLAVE,
1475 IrqSourceChip::Ioapic => KVM_IRQCHIP_IOAPIC,
1476 _ => {
1477 error!("Invalid IrqChipSource for X86 {:?}", chip);
1478 0
1479 }
1480 }
1481 }
1482
1483 impl From<&kvm_regs> for Regs {
from(r: &kvm_regs) -> Self1484 fn from(r: &kvm_regs) -> Self {
1485 Regs {
1486 rax: r.rax,
1487 rbx: r.rbx,
1488 rcx: r.rcx,
1489 rdx: r.rdx,
1490 rsi: r.rsi,
1491 rdi: r.rdi,
1492 rsp: r.rsp,
1493 rbp: r.rbp,
1494 r8: r.r8,
1495 r9: r.r9,
1496 r10: r.r10,
1497 r11: r.r11,
1498 r12: r.r12,
1499 r13: r.r13,
1500 r14: r.r14,
1501 r15: r.r15,
1502 rip: r.rip,
1503 rflags: r.rflags,
1504 }
1505 }
1506 }
1507
1508 impl From<&Regs> for kvm_regs {
from(r: &Regs) -> Self1509 fn from(r: &Regs) -> Self {
1510 kvm_regs {
1511 rax: r.rax,
1512 rbx: r.rbx,
1513 rcx: r.rcx,
1514 rdx: r.rdx,
1515 rsi: r.rsi,
1516 rdi: r.rdi,
1517 rsp: r.rsp,
1518 rbp: r.rbp,
1519 r8: r.r8,
1520 r9: r.r9,
1521 r10: r.r10,
1522 r11: r.r11,
1523 r12: r.r12,
1524 r13: r.r13,
1525 r14: r.r14,
1526 r15: r.r15,
1527 rip: r.rip,
1528 rflags: r.rflags,
1529 }
1530 }
1531 }
1532
1533 impl From<&VcpuEvents> for kvm_vcpu_events {
from(ve: &VcpuEvents) -> Self1534 fn from(ve: &VcpuEvents) -> Self {
1535 let mut kvm_ve: kvm_vcpu_events = Default::default();
1536
1537 kvm_ve.exception.injected = ve.exception.injected as u8;
1538 kvm_ve.exception.nr = ve.exception.nr;
1539 kvm_ve.exception.has_error_code = ve.exception.has_error_code as u8;
1540 if let Some(pending) = ve.exception.pending {
1541 kvm_ve.exception.pending = pending as u8;
1542 if ve.exception_payload.is_some() {
1543 kvm_ve.exception_has_payload = true as u8;
1544 }
1545 kvm_ve.exception_payload = ve.exception_payload.unwrap_or(0);
1546 kvm_ve.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
1547 }
1548 kvm_ve.exception.error_code = ve.exception.error_code;
1549
1550 kvm_ve.interrupt.injected = ve.interrupt.injected as u8;
1551 kvm_ve.interrupt.nr = ve.interrupt.nr;
1552 kvm_ve.interrupt.soft = ve.interrupt.soft as u8;
1553 if let Some(shadow) = ve.interrupt.shadow {
1554 kvm_ve.interrupt.shadow = shadow;
1555 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SHADOW;
1556 }
1557
1558 kvm_ve.nmi.injected = ve.nmi.injected as u8;
1559 if let Some(pending) = ve.nmi.pending {
1560 kvm_ve.nmi.pending = pending as u8;
1561 kvm_ve.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING;
1562 }
1563 kvm_ve.nmi.masked = ve.nmi.masked as u8;
1564
1565 if let Some(sipi_vector) = ve.sipi_vector {
1566 kvm_ve.sipi_vector = sipi_vector;
1567 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR;
1568 }
1569
1570 if let Some(smm) = ve.smi.smm {
1571 kvm_ve.smi.smm = smm as u8;
1572 kvm_ve.flags |= KVM_VCPUEVENT_VALID_SMM;
1573 }
1574 kvm_ve.smi.pending = ve.smi.pending as u8;
1575 kvm_ve.smi.smm_inside_nmi = ve.smi.smm_inside_nmi as u8;
1576 kvm_ve.smi.latched_init = ve.smi.latched_init;
1577
1578 if let Some(pending) = ve.triple_fault.pending {
1579 kvm_ve.triple_fault.pending = pending as u8;
1580 kvm_ve.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT;
1581 }
1582 kvm_ve
1583 }
1584 }
1585
1586 impl From<&kvm_vcpu_events> for VcpuEvents {
from(ve: &kvm_vcpu_events) -> Self1587 fn from(ve: &kvm_vcpu_events) -> Self {
1588 let exception = VcpuExceptionState {
1589 injected: ve.exception.injected != 0,
1590 nr: ve.exception.nr,
1591 has_error_code: ve.exception.has_error_code != 0,
1592 pending: if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1593 Some(ve.exception.pending != 0)
1594 } else {
1595 None
1596 },
1597 error_code: ve.exception.error_code,
1598 };
1599
1600 let interrupt = VcpuInterruptState {
1601 injected: ve.interrupt.injected != 0,
1602 nr: ve.interrupt.nr,
1603 soft: ve.interrupt.soft != 0,
1604 shadow: if ve.flags & KVM_VCPUEVENT_VALID_SHADOW != 0 {
1605 Some(ve.interrupt.shadow)
1606 } else {
1607 None
1608 },
1609 };
1610
1611 let nmi = VcpuNmiState {
1612 injected: ve.interrupt.injected != 0,
1613 pending: if ve.flags & KVM_VCPUEVENT_VALID_NMI_PENDING != 0 {
1614 Some(ve.nmi.pending != 0)
1615 } else {
1616 None
1617 },
1618 masked: ve.nmi.masked != 0,
1619 };
1620
1621 let sipi_vector = if ve.flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR != 0 {
1622 Some(ve.sipi_vector)
1623 } else {
1624 None
1625 };
1626
1627 let smi = VcpuSmiState {
1628 smm: if ve.flags & KVM_VCPUEVENT_VALID_SMM != 0 {
1629 Some(ve.smi.smm != 0)
1630 } else {
1631 None
1632 },
1633 pending: ve.smi.pending != 0,
1634 smm_inside_nmi: ve.smi.smm_inside_nmi != 0,
1635 latched_init: ve.smi.latched_init,
1636 };
1637
1638 let triple_fault = VcpuTripleFaultState {
1639 pending: if ve.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT != 0 {
1640 Some(ve.triple_fault.pending != 0)
1641 } else {
1642 None
1643 },
1644 };
1645
1646 let exception_payload = if ve.flags & KVM_VCPUEVENT_VALID_PAYLOAD != 0 {
1647 Some(ve.exception_payload)
1648 } else {
1649 None
1650 };
1651
1652 VcpuEvents {
1653 exception,
1654 interrupt,
1655 nmi,
1656 sipi_vector,
1657 smi,
1658 triple_fault,
1659 exception_payload,
1660 }
1661 }
1662 }
1663
1664 impl From<&kvm_segment> for Segment {
from(s: &kvm_segment) -> Self1665 fn from(s: &kvm_segment) -> Self {
1666 Segment {
1667 base: s.base,
1668 limit: s.limit,
1669 selector: s.selector,
1670 type_: s.type_,
1671 present: s.present,
1672 dpl: s.dpl,
1673 db: s.db,
1674 s: s.s,
1675 l: s.l,
1676 g: s.g,
1677 avl: s.avl,
1678 }
1679 }
1680 }
1681
1682 impl From<&Segment> for kvm_segment {
from(s: &Segment) -> Self1683 fn from(s: &Segment) -> Self {
1684 kvm_segment {
1685 base: s.base,
1686 limit: s.limit,
1687 selector: s.selector,
1688 type_: s.type_,
1689 present: s.present,
1690 dpl: s.dpl,
1691 db: s.db,
1692 s: s.s,
1693 l: s.l,
1694 g: s.g,
1695 avl: s.avl,
1696 unusable: match s.present {
1697 0 => 1,
1698 _ => 0,
1699 },
1700 ..Default::default()
1701 }
1702 }
1703 }
1704
1705 impl From<&kvm_dtable> for DescriptorTable {
from(dt: &kvm_dtable) -> Self1706 fn from(dt: &kvm_dtable) -> Self {
1707 DescriptorTable {
1708 base: dt.base,
1709 limit: dt.limit,
1710 }
1711 }
1712 }
1713
1714 impl From<&DescriptorTable> for kvm_dtable {
from(dt: &DescriptorTable) -> Self1715 fn from(dt: &DescriptorTable) -> Self {
1716 kvm_dtable {
1717 base: dt.base,
1718 limit: dt.limit,
1719 ..Default::default()
1720 }
1721 }
1722 }
1723
1724 impl From<&kvm_sregs> for Sregs {
from(r: &kvm_sregs) -> Self1725 fn from(r: &kvm_sregs) -> Self {
1726 Sregs {
1727 cs: Segment::from(&r.cs),
1728 ds: Segment::from(&r.ds),
1729 es: Segment::from(&r.es),
1730 fs: Segment::from(&r.fs),
1731 gs: Segment::from(&r.gs),
1732 ss: Segment::from(&r.ss),
1733 tr: Segment::from(&r.tr),
1734 ldt: Segment::from(&r.ldt),
1735 gdt: DescriptorTable::from(&r.gdt),
1736 idt: DescriptorTable::from(&r.idt),
1737 cr0: r.cr0,
1738 cr2: r.cr2,
1739 cr3: r.cr3,
1740 cr4: r.cr4,
1741 cr8: r.cr8,
1742 efer: r.efer,
1743 }
1744 }
1745 }
1746
1747 impl From<&kvm_fpu> for Fpu {
from(r: &kvm_fpu) -> Self1748 fn from(r: &kvm_fpu) -> Self {
1749 Fpu {
1750 fpr: r.fpr,
1751 fcw: r.fcw,
1752 fsw: r.fsw,
1753 ftwx: r.ftwx,
1754 last_opcode: r.last_opcode,
1755 last_ip: r.last_ip,
1756 last_dp: r.last_dp,
1757 xmm: r.xmm,
1758 mxcsr: r.mxcsr,
1759 }
1760 }
1761 }
1762
1763 impl From<&Fpu> for kvm_fpu {
from(r: &Fpu) -> Self1764 fn from(r: &Fpu) -> Self {
1765 kvm_fpu {
1766 fpr: r.fpr,
1767 fcw: r.fcw,
1768 fsw: r.fsw,
1769 ftwx: r.ftwx,
1770 last_opcode: r.last_opcode,
1771 last_ip: r.last_ip,
1772 last_dp: r.last_dp,
1773 xmm: r.xmm,
1774 mxcsr: r.mxcsr,
1775 ..Default::default()
1776 }
1777 }
1778 }
1779
1780 impl From<&kvm_debugregs> for DebugRegs {
from(r: &kvm_debugregs) -> Self1781 fn from(r: &kvm_debugregs) -> Self {
1782 DebugRegs {
1783 db: r.db,
1784 dr6: r.dr6,
1785 dr7: r.dr7,
1786 }
1787 }
1788 }
1789
1790 impl From<&DebugRegs> for kvm_debugregs {
from(r: &DebugRegs) -> Self1791 fn from(r: &DebugRegs) -> Self {
1792 kvm_debugregs {
1793 db: r.db,
1794 dr6: r.dr6,
1795 dr7: r.dr7,
1796 ..Default::default()
1797 }
1798 }
1799 }
1800
1801 #[cfg(test)]
1802 mod tests {
1803 use super::*;
1804
1805 #[test]
vcpu_event_to_from()1806 fn vcpu_event_to_from() {
1807 // All data is random.
1808 let mut kvm_ve: kvm_vcpu_events = Default::default();
1809 kvm_ve.exception.injected = 1;
1810 kvm_ve.exception.nr = 65;
1811 kvm_ve.exception.has_error_code = 1;
1812 kvm_ve.exception.error_code = 110;
1813 kvm_ve.exception.pending = 1;
1814
1815 kvm_ve.interrupt.injected = 1;
1816 kvm_ve.interrupt.nr = 100;
1817 kvm_ve.interrupt.soft = 1;
1818 kvm_ve.interrupt.shadow = 114;
1819
1820 kvm_ve.nmi.injected = 1;
1821 kvm_ve.nmi.pending = 1;
1822 kvm_ve.nmi.masked = 0;
1823
1824 kvm_ve.sipi_vector = 105;
1825
1826 kvm_ve.smi.smm = 1;
1827 kvm_ve.smi.pending = 1;
1828 kvm_ve.smi.smm_inside_nmi = 1;
1829 kvm_ve.smi.latched_init = 100;
1830
1831 kvm_ve.triple_fault.pending = 0;
1832
1833 kvm_ve.exception_payload = 33;
1834 kvm_ve.exception_has_payload = 1;
1835
1836 kvm_ve.flags = 0
1837 | KVM_VCPUEVENT_VALID_PAYLOAD
1838 | KVM_VCPUEVENT_VALID_SMM
1839 | KVM_VCPUEVENT_VALID_NMI_PENDING
1840 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
1841 | KVM_VCPUEVENT_VALID_SHADOW;
1842
1843 let ve: VcpuEvents = VcpuEvents::from(&kvm_ve);
1844 assert_eq!(ve.exception.injected, true);
1845 assert_eq!(ve.exception.nr, 65);
1846 assert_eq!(ve.exception.has_error_code, true);
1847 assert_eq!(ve.exception.error_code, 110);
1848 assert_eq!(ve.exception.pending.unwrap(), true);
1849
1850 assert_eq!(ve.interrupt.injected, true);
1851 assert_eq!(ve.interrupt.nr, 100);
1852 assert_eq!(ve.interrupt.soft, true);
1853 assert_eq!(ve.interrupt.shadow.unwrap(), 114);
1854
1855 assert_eq!(ve.nmi.injected, true);
1856 assert_eq!(ve.nmi.pending.unwrap(), true);
1857 assert_eq!(ve.nmi.masked, false);
1858
1859 assert_eq!(ve.sipi_vector.unwrap(), 105);
1860
1861 assert_eq!(ve.smi.smm.unwrap(), true);
1862 assert_eq!(ve.smi.pending, true);
1863 assert_eq!(ve.smi.smm_inside_nmi, true);
1864 assert_eq!(ve.smi.latched_init, 100);
1865
1866 assert_eq!(ve.triple_fault.pending, None);
1867
1868 assert_eq!(ve.exception_payload.unwrap(), 33);
1869
1870 let kvm_ve_restored: kvm_vcpu_events = kvm_vcpu_events::from(&ve);
1871 assert_eq!(kvm_ve_restored.exception.injected, 1);
1872 assert_eq!(kvm_ve_restored.exception.nr, 65);
1873 assert_eq!(kvm_ve_restored.exception.has_error_code, 1);
1874 assert_eq!(kvm_ve_restored.exception.error_code, 110);
1875 assert_eq!(kvm_ve_restored.exception.pending, 1);
1876
1877 assert_eq!(kvm_ve_restored.interrupt.injected, 1);
1878 assert_eq!(kvm_ve_restored.interrupt.nr, 100);
1879 assert_eq!(kvm_ve_restored.interrupt.soft, 1);
1880 assert_eq!(kvm_ve_restored.interrupt.shadow, 114);
1881
1882 assert_eq!(kvm_ve_restored.nmi.injected, 1);
1883 assert_eq!(kvm_ve_restored.nmi.pending, 1);
1884 assert_eq!(kvm_ve_restored.nmi.masked, 0);
1885
1886 assert_eq!(kvm_ve_restored.sipi_vector, 105);
1887
1888 assert_eq!(kvm_ve_restored.smi.smm, 1);
1889 assert_eq!(kvm_ve_restored.smi.pending, 1);
1890 assert_eq!(kvm_ve_restored.smi.smm_inside_nmi, 1);
1891 assert_eq!(kvm_ve_restored.smi.latched_init, 100);
1892
1893 assert_eq!(kvm_ve_restored.triple_fault.pending, 0);
1894
1895 assert_eq!(kvm_ve_restored.exception_payload, 33);
1896 assert_eq!(kvm_ve_restored.exception_has_payload, 1);
1897 }
1898 }
1899