1 // Copyright 2020 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use base::IoctlNr;
6 use std::convert::TryInto;
7
8 use libc::E2BIG;
9
10 use base::{
11 errno_result, error, ioctl, ioctl_with_mut_ptr, ioctl_with_mut_ref, ioctl_with_ptr,
12 ioctl_with_ref, ioctl_with_val, AsRawDescriptor, Error, MappedRegion, Result,
13 };
14 use data_model::vec_with_array_field;
15 use kvm_sys::*;
16 use vm_memory::GuestAddress;
17
18 use super::{Kvm, KvmVcpu, KvmVm};
19 use crate::{
20 ClockState, CpuId, CpuIdEntry, DebugRegs, DescriptorTable, DeviceKind, Fpu, HypervisorX86_64,
21 IoapicRedirectionTableEntry, IoapicState, IrqSourceChip, LapicState, PicSelect, PicState,
22 PitChannelState, PitState, Register, Regs, Segment, Sregs, VcpuX86_64, VmCap, VmX86_64,
23 };
24
25 type KvmCpuId = kvm::CpuId;
26
get_cpuid_with_initial_capacity<T: AsRawDescriptor>( descriptor: &T, kind: IoctlNr, initial_capacity: usize, ) -> Result<CpuId>27 fn get_cpuid_with_initial_capacity<T: AsRawDescriptor>(
28 descriptor: &T,
29 kind: IoctlNr,
30 initial_capacity: usize,
31 ) -> Result<CpuId> {
32 let mut entries: usize = initial_capacity;
33
34 loop {
35 let mut kvm_cpuid = KvmCpuId::new(entries);
36
37 let ret = unsafe {
38 // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the
39 // memory allocated for the struct. The limit is read from nent within KvmCpuId,
40 // which is set to the allocated size above.
41 ioctl_with_mut_ptr(descriptor, kind, kvm_cpuid.as_mut_ptr())
42 };
43 if ret < 0 {
44 let err = Error::last();
45 match err.errno() {
46 E2BIG => {
47 // double the available memory for cpuid entries for kvm.
48 if let Some(val) = entries.checked_mul(2) {
49 entries = val;
50 } else {
51 return Err(err);
52 }
53 }
54 _ => return Err(err),
55 }
56 } else {
57 return Ok(CpuId::from(&kvm_cpuid));
58 }
59 }
60 }
61
62 impl Kvm {
get_cpuid(&self, kind: IoctlNr) -> Result<CpuId>63 pub fn get_cpuid(&self, kind: IoctlNr) -> Result<CpuId> {
64 const KVM_MAX_ENTRIES: usize = 256;
65 get_cpuid_with_initial_capacity(self, kind, KVM_MAX_ENTRIES)
66 }
67 }
68
69 impl HypervisorX86_64 for Kvm {
get_supported_cpuid(&self) -> Result<CpuId>70 fn get_supported_cpuid(&self) -> Result<CpuId> {
71 self.get_cpuid(KVM_GET_SUPPORTED_CPUID())
72 }
73
get_emulated_cpuid(&self) -> Result<CpuId>74 fn get_emulated_cpuid(&self) -> Result<CpuId> {
75 self.get_cpuid(KVM_GET_EMULATED_CPUID())
76 }
77
get_msr_index_list(&self) -> Result<Vec<u32>>78 fn get_msr_index_list(&self) -> Result<Vec<u32>> {
79 const MAX_KVM_MSR_ENTRIES: usize = 256;
80
81 let mut msr_list = vec_with_array_field::<kvm_msr_list, u32>(MAX_KVM_MSR_ENTRIES);
82 msr_list[0].nmsrs = MAX_KVM_MSR_ENTRIES as u32;
83
84 let ret = unsafe {
85 // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory
86 // allocated for the struct. The limit is read from nmsrs, which is set to the allocated
87 // size (MAX_KVM_MSR_ENTRIES) above.
88 ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST(), &mut msr_list[0])
89 };
90 if ret < 0 {
91 return errno_result();
92 }
93
94 let mut nmsrs = msr_list[0].nmsrs;
95
96 // Mapping the unsized array to a slice is unsafe because the length isn't known. Using
97 // the length we originally allocated with eliminates the possibility of overflow.
98 let indices: &[u32] = unsafe {
99 if nmsrs > MAX_KVM_MSR_ENTRIES as u32 {
100 nmsrs = MAX_KVM_MSR_ENTRIES as u32;
101 }
102 msr_list[0].indices.as_slice(nmsrs as usize)
103 };
104
105 Ok(indices.to_vec())
106 }
107 }
108
109 impl KvmVm {
110 /// Checks if a particular `VmCap` is available, or returns None if arch-independent
111 /// Vm.check_capability() should handle the check.
check_capability_arch(&self, c: VmCap) -> Option<bool>112 pub fn check_capability_arch(&self, c: VmCap) -> Option<bool> {
113 match c {
114 VmCap::PvClock => Some(true),
115 _ => None,
116 }
117 }
118
119 /// Returns the params to pass to KVM_CREATE_DEVICE for a `kind` device on this arch, or None to
120 /// let the arch-independent `KvmVm::create_device` handle it.
get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device>121 pub fn get_device_params_arch(&self, _kind: DeviceKind) -> Option<kvm_create_device> {
122 None
123 }
124
125 /// Arch-specific implementation of `Vm::get_pvclock`.
get_pvclock_arch(&self) -> Result<ClockState>126 pub fn get_pvclock_arch(&self) -> Result<ClockState> {
127 // Safe because we know that our file is a VM fd, we know the kernel will only write correct
128 // amount of memory to our pointer, and we verify the return result.
129 let mut clock_data: kvm_clock_data = Default::default();
130 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK(), &mut clock_data) };
131 if ret == 0 {
132 Ok(ClockState::from(clock_data))
133 } else {
134 errno_result()
135 }
136 }
137
138 /// Arch-specific implementation of `Vm::set_pvclock`.
set_pvclock_arch(&self, state: &ClockState) -> Result<()>139 pub fn set_pvclock_arch(&self, state: &ClockState) -> Result<()> {
140 let clock_data = kvm_clock_data::from(*state);
141 // Safe because we know that our file is a VM fd, we know the kernel will only read correct
142 // amount of memory from our pointer, and we verify the return result.
143 let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK(), &clock_data) };
144 if ret == 0 {
145 Ok(())
146 } else {
147 errno_result()
148 }
149 }
150
151 /// Retrieves the state of given interrupt controller by issuing KVM_GET_IRQCHIP ioctl.
152 ///
153 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state>154 pub fn get_pic_state(&self, id: PicSelect) -> Result<kvm_pic_state> {
155 let mut irqchip_state = kvm_irqchip {
156 chip_id: id as u32,
157 ..Default::default()
158 };
159 let ret = unsafe {
160 // Safe because we know our file is a VM fd, we know the kernel will only write
161 // correct amount of memory to our pointer, and we verify the return result.
162 ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state)
163 };
164 if ret == 0 {
165 Ok(unsafe {
166 // Safe as we know that we are retrieving data related to the
167 // PIC (primary or secondary) and not IOAPIC.
168 irqchip_state.chip.pic
169 })
170 } else {
171 errno_result()
172 }
173 }
174
175 /// Sets the state of given interrupt controller by issuing KVM_SET_IRQCHIP ioctl.
176 ///
177 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()>178 pub fn set_pic_state(&self, id: PicSelect, state: &kvm_pic_state) -> Result<()> {
179 let mut irqchip_state = kvm_irqchip {
180 chip_id: id as u32,
181 ..Default::default()
182 };
183 irqchip_state.chip.pic = *state;
184 // Safe because we know that our file is a VM fd, we know the kernel will only read
185 // correct amount of memory from our pointer, and we verify the return result.
186 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) };
187 if ret == 0 {
188 Ok(())
189 } else {
190 errno_result()
191 }
192 }
193
194 /// Retrieves the state of IOAPIC by issuing KVM_GET_IRQCHIP ioctl.
195 ///
196 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
get_ioapic_state(&self) -> Result<kvm_ioapic_state>197 pub fn get_ioapic_state(&self) -> Result<kvm_ioapic_state> {
198 let mut irqchip_state = kvm_irqchip {
199 chip_id: 2,
200 ..Default::default()
201 };
202 let ret = unsafe {
203 // Safe because we know our file is a VM fd, we know the kernel will only write
204 // correct amount of memory to our pointer, and we verify the return result.
205 ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state)
206 };
207 if ret == 0 {
208 Ok(unsafe {
209 // Safe as we know that we are retrieving data related to the
210 // IOAPIC and not PIC.
211 irqchip_state.chip.ioapic
212 })
213 } else {
214 errno_result()
215 }
216 }
217
218 /// Sets the state of IOAPIC by issuing KVM_SET_IRQCHIP ioctl.
219 ///
220 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()>221 pub fn set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()> {
222 let mut irqchip_state = kvm_irqchip {
223 chip_id: 2,
224 ..Default::default()
225 };
226 irqchip_state.chip.ioapic = *state;
227 // Safe because we know that our file is a VM fd, we know the kernel will only read
228 // correct amount of memory from our pointer, and we verify the return result.
229 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) };
230 if ret == 0 {
231 Ok(())
232 } else {
233 errno_result()
234 }
235 }
236
237 /// Creates a PIT as per the KVM_CREATE_PIT2 ioctl.
238 ///
239 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
create_pit(&self) -> Result<()>240 pub fn create_pit(&self) -> Result<()> {
241 let pit_config = kvm_pit_config::default();
242 // Safe because we know that our file is a VM fd, we know the kernel will only read the
243 // correct amount of memory from our pointer, and we verify the return result.
244 let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2(), &pit_config) };
245 if ret == 0 {
246 Ok(())
247 } else {
248 errno_result()
249 }
250 }
251
252 /// Retrieves the state of PIT by issuing KVM_GET_PIT2 ioctl.
253 ///
254 /// Note that this call can only succeed after a call to `Vm::create_pit`.
get_pit_state(&self) -> Result<kvm_pit_state2>255 pub fn get_pit_state(&self) -> Result<kvm_pit_state2> {
256 // Safe because we know that our file is a VM fd, we know the kernel will only write
257 // correct amount of memory to our pointer, and we verify the return result.
258 let mut pit_state = Default::default();
259 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2(), &mut pit_state) };
260 if ret == 0 {
261 Ok(pit_state)
262 } else {
263 errno_result()
264 }
265 }
266
267 /// Sets the state of PIT by issuing KVM_SET_PIT2 ioctl.
268 ///
269 /// Note that this call can only succeed after a call to `Vm::create_pit`.
set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()>270 pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> {
271 // Safe because we know that our file is a VM fd, we know the kernel will only read
272 // correct amount of memory from our pointer, and we verify the return result.
273 let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2(), pit_state) };
274 if ret == 0 {
275 Ok(())
276 } else {
277 errno_result()
278 }
279 }
280
281 /// Enable support for split-irqchip.
enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()>282 pub fn enable_split_irqchip(&self, ioapic_pins: usize) -> Result<()> {
283 let mut cap = kvm_enable_cap {
284 cap: KVM_CAP_SPLIT_IRQCHIP,
285 ..Default::default()
286 };
287 cap.args[0] = ioapic_pins as u64;
288 // safe becuase we allocated the struct and we know the kernel will read
289 // exactly the size of the struct
290 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP(), &cap) };
291 if ret < 0 {
292 errno_result()
293 } else {
294 Ok(())
295 }
296 }
297 }
298
299 impl VmX86_64 for KvmVm {
get_hypervisor(&self) -> &dyn HypervisorX86_64300 fn get_hypervisor(&self) -> &dyn HypervisorX86_64 {
301 &self.kvm
302 }
303
create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>>304 fn create_vcpu(&self, id: usize) -> Result<Box<dyn VcpuX86_64>> {
305 // create_vcpu is declared separately in VmAArch64 and VmX86, so it can return VcpuAArch64
306 // or VcpuX86. But both use the same implementation in KvmVm::create_vcpu.
307 Ok(Box::new(KvmVm::create_vcpu(self, id)?))
308 }
309
310 /// Sets the address of the three-page region in the VM's address space.
311 ///
312 /// See the documentation on the KVM_SET_TSS_ADDR ioctl.
set_tss_addr(&self, addr: GuestAddress) -> Result<()>313 fn set_tss_addr(&self, addr: GuestAddress) -> Result<()> {
314 // Safe because we know that our file is a VM fd and we verify the return result.
315 let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR(), addr.offset() as u64) };
316 if ret == 0 {
317 Ok(())
318 } else {
319 errno_result()
320 }
321 }
322
323 /// Sets the address of a one-page region in the VM's address space.
324 ///
325 /// See the documentation on the KVM_SET_IDENTITY_MAP_ADDR ioctl.
set_identity_map_addr(&self, addr: GuestAddress) -> Result<()>326 fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> {
327 // Safe because we know that our file is a VM fd and we verify the return result.
328 let ret =
329 unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR(), &(addr.offset() as u64)) };
330 if ret == 0 {
331 Ok(())
332 } else {
333 errno_result()
334 }
335 }
336 }
337
338 impl KvmVcpu {
339 /// Arch-specific implementation of `Vcpu::pvclock_ctrl`.
pvclock_ctrl_arch(&self) -> Result<()>340 pub fn pvclock_ctrl_arch(&self) -> Result<()> {
341 let ret = unsafe {
342 // The ioctl is safe because it does not read or write memory in this process.
343 ioctl(self, KVM_KVMCLOCK_CTRL())
344 };
345 if ret == 0 {
346 Ok(())
347 } else {
348 errno_result()
349 }
350 }
351 }
352
353 impl VcpuX86_64 for KvmVcpu {
354 #[allow(clippy::cast_ptr_alignment)]
set_interrupt_window_requested(&self, requested: bool)355 fn set_interrupt_window_requested(&self, requested: bool) {
356 // Safe because we know we mapped enough memory to hold the kvm_run struct because the
357 // kernel told us how large it was. The pointer is page aligned so casting to a different
358 // type is well defined, hence the clippy allow attribute.
359 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
360 run.request_interrupt_window = if requested { 1 } else { 0 };
361 }
362
363 #[allow(clippy::cast_ptr_alignment)]
ready_for_interrupt(&self) -> bool364 fn ready_for_interrupt(&self) -> bool {
365 // Safe because we know we mapped enough memory to hold the kvm_run struct because the
366 // kernel told us how large it was. The pointer is page aligned so casting to a different
367 // type is well defined, hence the clippy allow attribute.
368 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
369 run.ready_for_interrupt_injection != 0 && run.if_flag != 0
370 }
371
372 /// Use the KVM_INTERRUPT ioctl to inject the specified interrupt vector.
373 ///
374 /// While this ioctl exists on PPC and MIPS as well as x86, the semantics are different and
375 /// ChromeOS doesn't support PPC or MIPS.
interrupt(&self, irq: u32) -> Result<()>376 fn interrupt(&self, irq: u32) -> Result<()> {
377 let interrupt = kvm_interrupt { irq };
378 // safe becuase we allocated the struct and we know the kernel will read
379 // exactly the size of the struct
380 let ret = unsafe { ioctl_with_ref(self, KVM_INTERRUPT(), &interrupt) };
381 if ret == 0 {
382 Ok(())
383 } else {
384 errno_result()
385 }
386 }
387
inject_nmi(&self) -> Result<()>388 fn inject_nmi(&self) -> Result<()> {
389 // Safe because we know that our file is a VCPU fd.
390 let ret = unsafe { ioctl(self, KVM_NMI()) };
391 if ret == 0 {
392 Ok(())
393 } else {
394 errno_result()
395 }
396 }
397
get_regs(&self) -> Result<Regs>398 fn get_regs(&self) -> Result<Regs> {
399 // Safe because we know that our file is a VCPU fd, we know the kernel will only read the
400 // correct amount of memory from our pointer, and we verify the return result.
401 let mut regs: kvm_regs = Default::default();
402 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS(), &mut regs) };
403 if ret == 0 {
404 Ok(Regs::from(®s))
405 } else {
406 errno_result()
407 }
408 }
409
set_regs(&self, regs: &Regs) -> Result<()>410 fn set_regs(&self, regs: &Regs) -> Result<()> {
411 let regs = kvm_regs::from(regs);
412 // Safe because we know that our file is a VCPU fd, we know the kernel will only read the
413 // correct amount of memory from our pointer, and we verify the return result.
414 let ret = unsafe { ioctl_with_ref(self, KVM_SET_REGS(), ®s) };
415 if ret == 0 {
416 Ok(())
417 } else {
418 errno_result()
419 }
420 }
421
get_sregs(&self) -> Result<Sregs>422 fn get_sregs(&self) -> Result<Sregs> {
423 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
424 // correct amount of memory to our pointer, and we verify the return result.
425 let mut regs: kvm_sregs = Default::default();
426 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) };
427 if ret == 0 {
428 Ok(Sregs::from(®s))
429 } else {
430 errno_result()
431 }
432 }
433
set_sregs(&self, sregs: &Sregs) -> Result<()>434 fn set_sregs(&self, sregs: &Sregs) -> Result<()> {
435 let sregs = kvm_sregs::from(sregs);
436 // Safe because we know that our file is a VCPU fd, we know the kernel will only read the
437 // correct amount of memory from our pointer, and we verify the return result.
438 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS(), &sregs) };
439 if ret == 0 {
440 Ok(())
441 } else {
442 errno_result()
443 }
444 }
445
get_fpu(&self) -> Result<Fpu>446 fn get_fpu(&self) -> Result<Fpu> {
447 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
448 // correct amount of memory to our pointer, and we verify the return result.
449 let mut fpu: kvm_fpu = Default::default();
450 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU(), &mut fpu) };
451 if ret == 0 {
452 Ok(Fpu::from(&fpu))
453 } else {
454 errno_result()
455 }
456 }
457
set_fpu(&self, fpu: &Fpu) -> Result<()>458 fn set_fpu(&self, fpu: &Fpu) -> Result<()> {
459 let fpu = kvm_fpu::from(fpu);
460 let ret = unsafe {
461 // Here we trust the kernel not to read past the end of the kvm_fpu struct.
462 ioctl_with_ref(self, KVM_SET_FPU(), &fpu)
463 };
464 if ret == 0 {
465 Ok(())
466 } else {
467 errno_result()
468 }
469 }
470
get_debugregs(&self) -> Result<DebugRegs>471 fn get_debugregs(&self) -> Result<DebugRegs> {
472 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
473 // correct amount of memory to our pointer, and we verify the return result.
474 let mut regs: kvm_debugregs = Default::default();
475 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS(), &mut regs) };
476 if ret == 0 {
477 Ok(DebugRegs::from(®s))
478 } else {
479 errno_result()
480 }
481 }
482
set_debugregs(&self, dregs: &DebugRegs) -> Result<()>483 fn set_debugregs(&self, dregs: &DebugRegs) -> Result<()> {
484 let dregs = kvm_debugregs::from(dregs);
485 let ret = unsafe {
486 // Here we trust the kernel not to read past the end of the kvm_debugregs struct.
487 ioctl_with_ref(self, KVM_SET_DEBUGREGS(), &dregs)
488 };
489 if ret == 0 {
490 Ok(())
491 } else {
492 errno_result()
493 }
494 }
495
get_xcrs(&self) -> Result<Vec<Register>>496 fn get_xcrs(&self) -> Result<Vec<Register>> {
497 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
498 // correct amount of memory to our pointer, and we verify the return result.
499 let mut regs: kvm_xcrs = Default::default();
500 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS(), &mut regs) };
501 if ret == 0 {
502 Ok(from_kvm_xcrs(®s))
503 } else {
504 errno_result()
505 }
506 }
507
set_xcrs(&self, xcrs: &[Register]) -> Result<()>508 fn set_xcrs(&self, xcrs: &[Register]) -> Result<()> {
509 let xcrs = to_kvm_xcrs(xcrs);
510 let ret = unsafe {
511 // Here we trust the kernel not to read past the end of the kvm_xcrs struct.
512 ioctl_with_ref(self, KVM_SET_XCRS(), &xcrs)
513 };
514 if ret == 0 {
515 Ok(())
516 } else {
517 errno_result()
518 }
519 }
520
get_msrs(&self, vec: &mut Vec<Register>) -> Result<()>521 fn get_msrs(&self, vec: &mut Vec<Register>) -> Result<()> {
522 let msrs = to_kvm_msrs(vec);
523 let ret = unsafe {
524 // Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
525 ioctl_with_ref(self, KVM_GET_MSRS(), &msrs[0])
526 };
527 // KVM_GET_MSRS actually returns the number of msr entries written.
528 if ret < 0 {
529 return errno_result();
530 }
531 // Safe because we trust the kernel to return the correct array length on success.
532 let entries = unsafe {
533 let count = ret as usize;
534 assert!(count <= vec.len());
535 msrs[0].entries.as_slice(count)
536 };
537 vec.truncate(0);
538 vec.extend(entries.iter().map(|e| Register {
539 id: e.index,
540 value: e.data,
541 }));
542 Ok(())
543 }
544
set_msrs(&self, vec: &[Register]) -> Result<()>545 fn set_msrs(&self, vec: &[Register]) -> Result<()> {
546 let msrs = to_kvm_msrs(vec);
547 let ret = unsafe {
548 // Here we trust the kernel not to read past the end of the kvm_msrs struct.
549 ioctl_with_ref(self, KVM_SET_MSRS(), &msrs[0])
550 };
551 // KVM_SET_MSRS actually returns the number of msr entries written.
552 if ret >= 0 {
553 Ok(())
554 } else {
555 errno_result()
556 }
557 }
558
set_cpuid(&self, cpuid: &CpuId) -> Result<()>559 fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> {
560 let cpuid = KvmCpuId::from(cpuid);
561 let ret = unsafe {
562 // Here we trust the kernel not to read past the end of the kvm_msrs struct.
563 ioctl_with_ptr(self, KVM_SET_CPUID2(), cpuid.as_ptr())
564 };
565 if ret == 0 {
566 Ok(())
567 } else {
568 errno_result()
569 }
570 }
571
get_hyperv_cpuid(&self) -> Result<CpuId>572 fn get_hyperv_cpuid(&self) -> Result<CpuId> {
573 const KVM_MAX_ENTRIES: usize = 256;
574 get_cpuid_with_initial_capacity(self, KVM_GET_SUPPORTED_HV_CPUID(), KVM_MAX_ENTRIES)
575 }
576
set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()>577 fn set_guest_debug(&self, addrs: &[GuestAddress], enable_singlestep: bool) -> Result<()> {
578 use kvm_sys::*;
579 let mut dbg: kvm_guest_debug = Default::default();
580
581 if addrs.len() > 4 {
582 error!(
583 "Support 4 breakpoints at most but {} addresses are passed",
584 addrs.len()
585 );
586 return Err(base::Error::new(libc::EINVAL));
587 }
588
589 dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
590 if enable_singlestep {
591 dbg.control |= KVM_GUESTDBG_SINGLESTEP;
592 }
593
594 // Set bits 9 and 10.
595 // bit 9: GE (global exact breakpoint enable) flag.
596 // bit 10: always 1.
597 dbg.arch.debugreg[7] = 0x0600;
598
599 for (i, addr) in addrs.iter().enumerate() {
600 dbg.arch.debugreg[i] = addr.0;
601 // Set global breakpoint enable flag
602 dbg.arch.debugreg[7] |= 2 << (i * 2);
603 }
604
605 let ret = unsafe {
606 // Here we trust the kernel not to read past the end of the kvm_guest_debug struct.
607 ioctl_with_ref(self, KVM_SET_GUEST_DEBUG(), &dbg)
608 };
609 if ret == 0 {
610 Ok(())
611 } else {
612 errno_result()
613 }
614 }
615 }
616
617 impl KvmVcpu {
618 /// X86 specific call to get the state of the "Local Advanced Programmable Interrupt Controller".
619 ///
620 /// See the documentation for KVM_GET_LAPIC.
get_lapic(&self) -> Result<kvm_lapic_state>621 pub fn get_lapic(&self) -> Result<kvm_lapic_state> {
622 let mut klapic: kvm_lapic_state = Default::default();
623
624 let ret = unsafe {
625 // The ioctl is unsafe unless you trust the kernel not to write past the end of the
626 // local_apic struct.
627 ioctl_with_mut_ref(self, KVM_GET_LAPIC(), &mut klapic)
628 };
629 if ret < 0 {
630 return errno_result();
631 }
632 Ok(klapic)
633 }
634
635 /// X86 specific call to set the state of the "Local Advanced Programmable Interrupt Controller".
636 ///
637 /// See the documentation for KVM_SET_LAPIC.
set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()>638 pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> {
639 let ret = unsafe {
640 // The ioctl is safe because the kernel will only read from the klapic struct.
641 ioctl_with_ref(self, KVM_SET_LAPIC(), klapic)
642 };
643 if ret < 0 {
644 return errno_result();
645 }
646 Ok(())
647 }
648 }
649
650 impl<'a> From<&'a KvmCpuId> for CpuId {
from(kvm_cpuid: &'a KvmCpuId) -> CpuId651 fn from(kvm_cpuid: &'a KvmCpuId) -> CpuId {
652 let kvm_entries = kvm_cpuid.entries_slice();
653 let mut cpu_id_entries = Vec::with_capacity(kvm_entries.len());
654
655 for entry in kvm_entries {
656 let cpu_id_entry = CpuIdEntry {
657 function: entry.function,
658 index: entry.index,
659 flags: entry.flags,
660 eax: entry.eax,
661 ebx: entry.ebx,
662 ecx: entry.ecx,
663 edx: entry.edx,
664 };
665 cpu_id_entries.push(cpu_id_entry)
666 }
667 CpuId { cpu_id_entries }
668 }
669 }
670
671 impl From<&CpuId> for KvmCpuId {
from(cpuid: &CpuId) -> KvmCpuId672 fn from(cpuid: &CpuId) -> KvmCpuId {
673 let mut kvm = KvmCpuId::new(cpuid.cpu_id_entries.len());
674 let entries = kvm.mut_entries_slice();
675 for (i, &e) in cpuid.cpu_id_entries.iter().enumerate() {
676 entries[i] = kvm_cpuid_entry2 {
677 function: e.function,
678 index: e.index,
679 flags: e.flags,
680 eax: e.eax,
681 ebx: e.ebx,
682 ecx: e.ecx,
683 edx: e.edx,
684 ..Default::default()
685 };
686 }
687 kvm
688 }
689 }
690
691 impl From<ClockState> for kvm_clock_data {
from(state: ClockState) -> Self692 fn from(state: ClockState) -> Self {
693 kvm_clock_data {
694 clock: state.clock,
695 flags: state.flags,
696 ..Default::default()
697 }
698 }
699 }
700
701 impl From<kvm_clock_data> for ClockState {
from(clock_data: kvm_clock_data) -> Self702 fn from(clock_data: kvm_clock_data) -> Self {
703 ClockState {
704 clock: clock_data.clock,
705 flags: clock_data.flags,
706 }
707 }
708 }
709
710 impl From<&kvm_pic_state> for PicState {
from(item: &kvm_pic_state) -> Self711 fn from(item: &kvm_pic_state) -> Self {
712 PicState {
713 last_irr: item.last_irr,
714 irr: item.irr,
715 imr: item.imr,
716 isr: item.isr,
717 priority_add: item.priority_add,
718 irq_base: item.irq_base,
719 read_reg_select: item.read_reg_select != 0,
720 poll: item.poll != 0,
721 special_mask: item.special_mask != 0,
722 init_state: item.init_state.into(),
723 auto_eoi: item.auto_eoi != 0,
724 rotate_on_auto_eoi: item.rotate_on_auto_eoi != 0,
725 special_fully_nested_mode: item.special_fully_nested_mode != 0,
726 use_4_byte_icw: item.init4 != 0,
727 elcr: item.elcr,
728 elcr_mask: item.elcr_mask,
729 }
730 }
731 }
732
733 impl From<&PicState> for kvm_pic_state {
from(item: &PicState) -> Self734 fn from(item: &PicState) -> Self {
735 kvm_pic_state {
736 last_irr: item.last_irr,
737 irr: item.irr,
738 imr: item.imr,
739 isr: item.isr,
740 priority_add: item.priority_add,
741 irq_base: item.irq_base,
742 read_reg_select: item.read_reg_select as u8,
743 poll: item.poll as u8,
744 special_mask: item.special_mask as u8,
745 init_state: item.init_state as u8,
746 auto_eoi: item.auto_eoi as u8,
747 rotate_on_auto_eoi: item.rotate_on_auto_eoi as u8,
748 special_fully_nested_mode: item.special_fully_nested_mode as u8,
749 init4: item.use_4_byte_icw as u8,
750 elcr: item.elcr,
751 elcr_mask: item.elcr_mask,
752 }
753 }
754 }
755
756 impl From<&kvm_ioapic_state> for IoapicState {
from(item: &kvm_ioapic_state) -> Self757 fn from(item: &kvm_ioapic_state) -> Self {
758 let mut state = IoapicState {
759 base_address: item.base_address,
760 ioregsel: item.ioregsel as u8,
761 ioapicid: item.id,
762 current_interrupt_level_bitmap: item.irr,
763 redirect_table: [IoapicRedirectionTableEntry::default(); 24],
764 };
765 for (in_state, out_state) in item.redirtbl.iter().zip(state.redirect_table.iter_mut()) {
766 *out_state = in_state.into();
767 }
768 state
769 }
770 }
771
772 impl From<&IoapicRedirectionTableEntry> for kvm_ioapic_state__bindgen_ty_1 {
from(item: &IoapicRedirectionTableEntry) -> Self773 fn from(item: &IoapicRedirectionTableEntry) -> Self {
774 kvm_ioapic_state__bindgen_ty_1 {
775 // IoapicRedirectionTableEntry layout matches the exact bit layout of a hardware
776 // ioapic redirection table entry, so we can simply do a 64-bit copy
777 bits: item.get(0, 64),
778 }
779 }
780 }
781
782 impl From<&kvm_ioapic_state__bindgen_ty_1> for IoapicRedirectionTableEntry {
from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self783 fn from(item: &kvm_ioapic_state__bindgen_ty_1) -> Self {
784 let mut entry = IoapicRedirectionTableEntry::default();
785 // Safe because the 64-bit layout of the IoapicRedirectionTableEntry matches the kvm_sys
786 // table entry layout
787 entry.set(0, 64, unsafe { item.bits as u64 });
788 entry
789 }
790 }
791
792 impl From<&IoapicState> for kvm_ioapic_state {
from(item: &IoapicState) -> Self793 fn from(item: &IoapicState) -> Self {
794 let mut state = kvm_ioapic_state {
795 base_address: item.base_address,
796 ioregsel: item.ioregsel as u32,
797 id: item.ioapicid,
798 irr: item.current_interrupt_level_bitmap,
799 ..Default::default()
800 };
801 for (in_state, out_state) in item.redirect_table.iter().zip(state.redirtbl.iter_mut()) {
802 *out_state = in_state.into();
803 }
804 state
805 }
806 }
807
808 impl From<&LapicState> for kvm_lapic_state {
from(item: &LapicState) -> Self809 fn from(item: &LapicState) -> Self {
810 let mut state = kvm_lapic_state::default();
811 // There are 64 lapic registers
812 for (reg, value) in item.regs.iter().enumerate() {
813 // Each lapic register is 16 bytes, but only the first 4 are used
814 let reg_offset = 16 * reg;
815 let sliceu8 = unsafe {
816 // This array is only accessed as parts of a u32 word, so interpret it as a u8 array.
817 // to_le_bytes() produces an array of u8, not i8(c_char).
818 std::mem::transmute::<&mut [i8], &mut [u8]>(
819 &mut state.regs[reg_offset..reg_offset + 4],
820 )
821 };
822 sliceu8.copy_from_slice(&value.to_le_bytes());
823 }
824 state
825 }
826 }
827
828 impl From<&kvm_lapic_state> for LapicState {
from(item: &kvm_lapic_state) -> Self829 fn from(item: &kvm_lapic_state) -> Self {
830 let mut state = LapicState { regs: [0; 64] };
831 // There are 64 lapic registers
832 for reg in 0..64 {
833 // Each lapic register is 16 bytes, but only the first 4 are used
834 let reg_offset = 16 * reg;
835 let bytes = unsafe {
836 // This array is only accessed as parts of a u32 word, so interpret it as a u8 array.
837 // from_le_bytes() only works on arrays of u8, not i8(c_char).
838 std::mem::transmute::<&[i8], &[u8]>(&item.regs[reg_offset..reg_offset + 4])
839 };
840 state.regs[reg] = u32::from_le_bytes(bytes.try_into().unwrap());
841 }
842 state
843 }
844 }
845
846 impl From<&PitState> for kvm_pit_state2 {
from(item: &PitState) -> Self847 fn from(item: &PitState) -> Self {
848 kvm_pit_state2 {
849 channels: [
850 kvm_pit_channel_state::from(&item.channels[0]),
851 kvm_pit_channel_state::from(&item.channels[1]),
852 kvm_pit_channel_state::from(&item.channels[2]),
853 ],
854 flags: item.flags,
855 ..Default::default()
856 }
857 }
858 }
859
860 impl From<&kvm_pit_state2> for PitState {
from(item: &kvm_pit_state2) -> Self861 fn from(item: &kvm_pit_state2) -> Self {
862 PitState {
863 channels: [
864 PitChannelState::from(&item.channels[0]),
865 PitChannelState::from(&item.channels[1]),
866 PitChannelState::from(&item.channels[2]),
867 ],
868 flags: item.flags,
869 }
870 }
871 }
872
873 impl From<&PitChannelState> for kvm_pit_channel_state {
from(item: &PitChannelState) -> Self874 fn from(item: &PitChannelState) -> Self {
875 kvm_pit_channel_state {
876 count: item.count,
877 latched_count: item.latched_count,
878 count_latched: item.count_latched as u8,
879 status_latched: item.status_latched as u8,
880 status: item.status,
881 read_state: item.read_state as u8,
882 write_state: item.write_state as u8,
883 // kvm's write_latch only stores the low byte of the reload value
884 write_latch: item.reload_value as u8,
885 rw_mode: item.rw_mode as u8,
886 mode: item.mode,
887 bcd: item.bcd as u8,
888 gate: item.gate as u8,
889 count_load_time: item.count_load_time as i64,
890 }
891 }
892 }
893
894 impl From<&kvm_pit_channel_state> for PitChannelState {
from(item: &kvm_pit_channel_state) -> Self895 fn from(item: &kvm_pit_channel_state) -> Self {
896 PitChannelState {
897 count: item.count,
898 latched_count: item.latched_count,
899 count_latched: item.count_latched.into(),
900 status_latched: item.status_latched != 0,
901 status: item.status,
902 read_state: item.read_state.into(),
903 write_state: item.write_state.into(),
904 // kvm's write_latch only stores the low byte of the reload value
905 reload_value: item.write_latch as u16,
906 rw_mode: item.rw_mode.into(),
907 mode: item.mode,
908 bcd: item.bcd != 0,
909 gate: item.gate != 0,
910 count_load_time: item.count_load_time as u64,
911 }
912 }
913 }
914
915 // This function translates an IrqSrouceChip to the kvm u32 equivalent. It has a different
916 // implementation between x86_64 and aarch64 because the irqchip KVM constants are not defined on
917 // all architectures.
chip_to_kvm_chip(chip: IrqSourceChip) -> u32918 pub(super) fn chip_to_kvm_chip(chip: IrqSourceChip) -> u32 {
919 match chip {
920 IrqSourceChip::PicPrimary => KVM_IRQCHIP_PIC_MASTER,
921 IrqSourceChip::PicSecondary => KVM_IRQCHIP_PIC_SLAVE,
922 IrqSourceChip::Ioapic => KVM_IRQCHIP_IOAPIC,
923 _ => {
924 error!("Invalid IrqChipSource for X86 {:?}", chip);
925 0
926 }
927 }
928 }
929
930 impl From<&kvm_regs> for Regs {
from(r: &kvm_regs) -> Self931 fn from(r: &kvm_regs) -> Self {
932 Regs {
933 rax: r.rax,
934 rbx: r.rbx,
935 rcx: r.rcx,
936 rdx: r.rdx,
937 rsi: r.rsi,
938 rdi: r.rdi,
939 rsp: r.rsp,
940 rbp: r.rbp,
941 r8: r.r8,
942 r9: r.r9,
943 r10: r.r10,
944 r11: r.r11,
945 r12: r.r12,
946 r13: r.r13,
947 r14: r.r14,
948 r15: r.r15,
949 rip: r.rip,
950 rflags: r.rflags,
951 }
952 }
953 }
954
955 impl From<&Regs> for kvm_regs {
from(r: &Regs) -> Self956 fn from(r: &Regs) -> Self {
957 kvm_regs {
958 rax: r.rax,
959 rbx: r.rbx,
960 rcx: r.rcx,
961 rdx: r.rdx,
962 rsi: r.rsi,
963 rdi: r.rdi,
964 rsp: r.rsp,
965 rbp: r.rbp,
966 r8: r.r8,
967 r9: r.r9,
968 r10: r.r10,
969 r11: r.r11,
970 r12: r.r12,
971 r13: r.r13,
972 r14: r.r14,
973 r15: r.r15,
974 rip: r.rip,
975 rflags: r.rflags,
976 }
977 }
978 }
979
980 impl From<&kvm_segment> for Segment {
from(s: &kvm_segment) -> Self981 fn from(s: &kvm_segment) -> Self {
982 Segment {
983 base: s.base,
984 limit: s.limit,
985 selector: s.selector,
986 type_: s.type_,
987 present: s.present,
988 dpl: s.dpl,
989 db: s.db,
990 s: s.s,
991 l: s.l,
992 g: s.g,
993 avl: s.avl,
994 }
995 }
996 }
997
998 impl From<&Segment> for kvm_segment {
from(s: &Segment) -> Self999 fn from(s: &Segment) -> Self {
1000 kvm_segment {
1001 base: s.base,
1002 limit: s.limit,
1003 selector: s.selector,
1004 type_: s.type_,
1005 present: s.present,
1006 dpl: s.dpl,
1007 db: s.db,
1008 s: s.s,
1009 l: s.l,
1010 g: s.g,
1011 avl: s.avl,
1012 unusable: match s.present {
1013 0 => 1,
1014 _ => 0,
1015 },
1016 ..Default::default()
1017 }
1018 }
1019 }
1020
1021 impl From<&kvm_dtable> for DescriptorTable {
from(dt: &kvm_dtable) -> Self1022 fn from(dt: &kvm_dtable) -> Self {
1023 DescriptorTable {
1024 base: dt.base,
1025 limit: dt.limit,
1026 }
1027 }
1028 }
1029
1030 impl From<&DescriptorTable> for kvm_dtable {
from(dt: &DescriptorTable) -> Self1031 fn from(dt: &DescriptorTable) -> Self {
1032 kvm_dtable {
1033 base: dt.base,
1034 limit: dt.limit,
1035 ..Default::default()
1036 }
1037 }
1038 }
1039
1040 impl From<&kvm_sregs> for Sregs {
from(r: &kvm_sregs) -> Self1041 fn from(r: &kvm_sregs) -> Self {
1042 Sregs {
1043 cs: Segment::from(&r.cs),
1044 ds: Segment::from(&r.ds),
1045 es: Segment::from(&r.es),
1046 fs: Segment::from(&r.fs),
1047 gs: Segment::from(&r.gs),
1048 ss: Segment::from(&r.ss),
1049 tr: Segment::from(&r.tr),
1050 ldt: Segment::from(&r.ldt),
1051 gdt: DescriptorTable::from(&r.gdt),
1052 idt: DescriptorTable::from(&r.idt),
1053 cr0: r.cr0,
1054 cr2: r.cr2,
1055 cr3: r.cr3,
1056 cr4: r.cr4,
1057 cr8: r.cr8,
1058 efer: r.efer,
1059 apic_base: r.apic_base,
1060 interrupt_bitmap: r.interrupt_bitmap,
1061 }
1062 }
1063 }
1064
1065 impl From<&Sregs> for kvm_sregs {
from(r: &Sregs) -> Self1066 fn from(r: &Sregs) -> Self {
1067 kvm_sregs {
1068 cs: kvm_segment::from(&r.cs),
1069 ds: kvm_segment::from(&r.ds),
1070 es: kvm_segment::from(&r.es),
1071 fs: kvm_segment::from(&r.fs),
1072 gs: kvm_segment::from(&r.gs),
1073 ss: kvm_segment::from(&r.ss),
1074 tr: kvm_segment::from(&r.tr),
1075 ldt: kvm_segment::from(&r.ldt),
1076 gdt: kvm_dtable::from(&r.gdt),
1077 idt: kvm_dtable::from(&r.idt),
1078 cr0: r.cr0,
1079 cr2: r.cr2,
1080 cr3: r.cr3,
1081 cr4: r.cr4,
1082 cr8: r.cr8,
1083 efer: r.efer,
1084 apic_base: r.apic_base,
1085 interrupt_bitmap: r.interrupt_bitmap,
1086 }
1087 }
1088 }
1089
1090 impl From<&kvm_fpu> for Fpu {
from(r: &kvm_fpu) -> Self1091 fn from(r: &kvm_fpu) -> Self {
1092 Fpu {
1093 fpr: r.fpr,
1094 fcw: r.fcw,
1095 fsw: r.fsw,
1096 ftwx: r.ftwx,
1097 last_opcode: r.last_opcode,
1098 last_ip: r.last_ip,
1099 last_dp: r.last_dp,
1100 xmm: r.xmm,
1101 mxcsr: r.mxcsr,
1102 }
1103 }
1104 }
1105
1106 impl From<&Fpu> for kvm_fpu {
from(r: &Fpu) -> Self1107 fn from(r: &Fpu) -> Self {
1108 kvm_fpu {
1109 fpr: r.fpr,
1110 fcw: r.fcw,
1111 fsw: r.fsw,
1112 ftwx: r.ftwx,
1113 last_opcode: r.last_opcode,
1114 last_ip: r.last_ip,
1115 last_dp: r.last_dp,
1116 xmm: r.xmm,
1117 mxcsr: r.mxcsr,
1118 ..Default::default()
1119 }
1120 }
1121 }
1122
1123 impl From<&kvm_debugregs> for DebugRegs {
from(r: &kvm_debugregs) -> Self1124 fn from(r: &kvm_debugregs) -> Self {
1125 DebugRegs {
1126 db: r.db,
1127 dr6: r.dr6,
1128 dr7: r.dr7,
1129 }
1130 }
1131 }
1132
1133 impl From<&DebugRegs> for kvm_debugregs {
from(r: &DebugRegs) -> Self1134 fn from(r: &DebugRegs) -> Self {
1135 kvm_debugregs {
1136 db: r.db,
1137 dr6: r.dr6,
1138 dr7: r.dr7,
1139 ..Default::default()
1140 }
1141 }
1142 }
1143
from_kvm_xcrs(r: &kvm_xcrs) -> Vec<Register>1144 fn from_kvm_xcrs(r: &kvm_xcrs) -> Vec<Register> {
1145 r.xcrs
1146 .iter()
1147 .take(r.nr_xcrs as usize)
1148 .map(|x| Register {
1149 id: x.xcr,
1150 value: x.value,
1151 })
1152 .collect()
1153 }
1154
to_kvm_xcrs(r: &[Register]) -> kvm_xcrs1155 fn to_kvm_xcrs(r: &[Register]) -> kvm_xcrs {
1156 let mut kvm = kvm_xcrs {
1157 nr_xcrs: r.len() as u32,
1158 ..Default::default()
1159 };
1160 for (i, &xcr) in r.iter().enumerate() {
1161 kvm.xcrs[i].xcr = xcr.id as u32;
1162 kvm.xcrs[i].value = xcr.value;
1163 }
1164 kvm
1165 }
1166
to_kvm_msrs(vec: &[Register]) -> Vec<kvm_msrs>1167 fn to_kvm_msrs(vec: &[Register]) -> Vec<kvm_msrs> {
1168 let vec: Vec<kvm_msr_entry> = vec
1169 .iter()
1170 .map(|e| kvm_msr_entry {
1171 index: e.id as u32,
1172 data: e.value,
1173 ..Default::default()
1174 })
1175 .collect();
1176
1177 let mut msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(vec.len());
1178 unsafe {
1179 // Mapping the unsized array to a slice is unsafe because the length isn't known.
1180 // Providing the length used to create the struct guarantees the entire slice is valid.
1181 msrs[0]
1182 .entries
1183 .as_mut_slice(vec.len())
1184 .copy_from_slice(&vec);
1185 }
1186 msrs[0].nmsrs = vec.len() as u32;
1187 msrs
1188 }
1189
1190 #[cfg(test)]
1191 mod tests {
1192 use super::*;
1193 use crate::{
1194 DeliveryMode, DeliveryStatus, DestinationMode, Hypervisor, HypervisorCap, HypervisorX86_64,
1195 IoapicRedirectionTableEntry, IoapicState, IrqRoute, IrqSource, IrqSourceChip, LapicState,
1196 PicInitState, PicState, PitChannelState, PitRWMode, PitRWState, PitState, TriggerMode,
1197 Vcpu, Vm,
1198 };
1199 use libc::EINVAL;
1200 use vm_memory::{GuestAddress, GuestMemory};
1201
1202 #[test]
get_supported_cpuid()1203 fn get_supported_cpuid() {
1204 let hypervisor = Kvm::new().unwrap();
1205 let cpuid = hypervisor.get_supported_cpuid().unwrap();
1206 assert!(cpuid.cpu_id_entries.len() > 0);
1207 }
1208
1209 #[test]
get_emulated_cpuid()1210 fn get_emulated_cpuid() {
1211 let hypervisor = Kvm::new().unwrap();
1212 let cpuid = hypervisor.get_emulated_cpuid().unwrap();
1213 assert!(cpuid.cpu_id_entries.len() > 0);
1214 }
1215
1216 #[test]
get_msr_index_list()1217 fn get_msr_index_list() {
1218 let kvm = Kvm::new().unwrap();
1219 let msr_list = kvm.get_msr_index_list().unwrap();
1220 assert!(msr_list.len() >= 2);
1221 }
1222
1223 #[test]
entries_double_on_error()1224 fn entries_double_on_error() {
1225 let hypervisor = Kvm::new().unwrap();
1226 let cpuid =
1227 get_cpuid_with_initial_capacity(&hypervisor, KVM_GET_SUPPORTED_CPUID(), 4).unwrap();
1228 assert!(cpuid.cpu_id_entries.len() > 4);
1229 }
1230
1231 #[test]
check_vm_arch_capability()1232 fn check_vm_arch_capability() {
1233 let kvm = Kvm::new().unwrap();
1234 let gm = GuestMemory::new(&[(GuestAddress(0), 0x1000)]).unwrap();
1235 let vm = KvmVm::new(&kvm, gm).unwrap();
1236 assert!(vm.check_capability(VmCap::PvClock));
1237 }
1238
1239 #[test]
pic_state()1240 fn pic_state() {
1241 let state = PicState {
1242 last_irr: 0b00000001,
1243 irr: 0b00000010,
1244 imr: 0b00000100,
1245 isr: 0b00001000,
1246 priority_add: 0b00010000,
1247 irq_base: 0b00100000,
1248 read_reg_select: false,
1249 poll: true,
1250 special_mask: true,
1251 init_state: PicInitState::Icw3,
1252 auto_eoi: true,
1253 rotate_on_auto_eoi: false,
1254 special_fully_nested_mode: true,
1255 use_4_byte_icw: true,
1256 elcr: 0b01000000,
1257 elcr_mask: 0b10000000,
1258 };
1259
1260 let kvm_state = kvm_pic_state::from(&state);
1261
1262 assert_eq!(kvm_state.last_irr, 0b00000001);
1263 assert_eq!(kvm_state.irr, 0b00000010);
1264 assert_eq!(kvm_state.imr, 0b00000100);
1265 assert_eq!(kvm_state.isr, 0b00001000);
1266 assert_eq!(kvm_state.priority_add, 0b00010000);
1267 assert_eq!(kvm_state.irq_base, 0b00100000);
1268 assert_eq!(kvm_state.read_reg_select, 0);
1269 assert_eq!(kvm_state.poll, 1);
1270 assert_eq!(kvm_state.special_mask, 1);
1271 assert_eq!(kvm_state.init_state, 0b10);
1272 assert_eq!(kvm_state.auto_eoi, 1);
1273 assert_eq!(kvm_state.rotate_on_auto_eoi, 0);
1274 assert_eq!(kvm_state.special_fully_nested_mode, 1);
1275 assert_eq!(kvm_state.auto_eoi, 1);
1276 assert_eq!(kvm_state.elcr, 0b01000000);
1277 assert_eq!(kvm_state.elcr_mask, 0b10000000);
1278
1279 let orig_state = PicState::from(&kvm_state);
1280 assert_eq!(state, orig_state);
1281 }
1282
1283 #[test]
ioapic_state()1284 fn ioapic_state() {
1285 let mut entry = IoapicRedirectionTableEntry::default();
1286 // default entry should be 0
1287 assert_eq!(entry.get(0, 64), 0);
1288
1289 // set some values on our entry
1290 entry.set_vector(0b11111111);
1291 entry.set_delivery_mode(DeliveryMode::SMI);
1292 entry.set_dest_mode(DestinationMode::Physical);
1293 entry.set_delivery_status(DeliveryStatus::Pending);
1294 entry.set_polarity(1);
1295 entry.set_remote_irr(true);
1296 entry.set_trigger_mode(TriggerMode::Level);
1297 entry.set_interrupt_mask(true);
1298 entry.set_dest_id(0b10101010);
1299
1300 // Bit repr as: destid-reserved--------------------------------flags----vector--
1301 let bit_repr = 0b1010101000000000000000000000000000000000000000011111001011111111;
1302 // where flags is [interrupt_mask(1), trigger_mode(Level=1), remote_irr(1), polarity(1),
1303 // delivery_status(Pending=1), dest_mode(Physical=0), delivery_mode(SMI=010)]
1304
1305 assert_eq!(entry.get(0, 64), bit_repr);
1306
1307 let state = IoapicState {
1308 base_address: 1,
1309 ioregsel: 2,
1310 ioapicid: 4,
1311 current_interrupt_level_bitmap: 8,
1312 redirect_table: [entry; 24],
1313 };
1314
1315 let kvm_state = kvm_ioapic_state::from(&state);
1316 assert_eq!(kvm_state.base_address, 1);
1317 assert_eq!(kvm_state.ioregsel, 2);
1318 assert_eq!(kvm_state.id, 4);
1319 assert_eq!(kvm_state.irr, 8);
1320 assert_eq!(kvm_state.pad, 0);
1321 // check our entries
1322 for i in 0..24 {
1323 assert_eq!(unsafe { kvm_state.redirtbl[i].bits }, bit_repr);
1324 }
1325
1326 // compare with a conversion back
1327 assert_eq!(state, IoapicState::from(&kvm_state));
1328 }
1329
1330 #[test]
lapic_state()1331 fn lapic_state() {
1332 let mut state = LapicState { regs: [0; 64] };
1333 // Apic id register, 4 bytes each with a different bit set
1334 state.regs[2] = 1 | 2 << 8 | 4 << 16 | 8 << 24;
1335
1336 let kvm_state = kvm_lapic_state::from(&state);
1337
1338 // check little endian bytes in kvm_state
1339 for i in 0..4 {
1340 assert_eq!(
1341 unsafe { std::mem::transmute::<i8, u8>(kvm_state.regs[32 + i]) } as u8,
1342 2u8.pow(i as u32)
1343 );
1344 }
1345
1346 // Test converting back to a LapicState
1347 assert_eq!(state, LapicState::from(&kvm_state));
1348 }
1349
1350 #[test]
pit_state()1351 fn pit_state() {
1352 let channel = PitChannelState {
1353 count: 256,
1354 latched_count: 512,
1355 count_latched: PitRWState::LSB,
1356 status_latched: false,
1357 status: 7,
1358 read_state: PitRWState::MSB,
1359 write_state: PitRWState::Word1,
1360 reload_value: 8,
1361 rw_mode: PitRWMode::Both,
1362 mode: 5,
1363 bcd: false,
1364 gate: true,
1365 count_load_time: 1024,
1366 };
1367
1368 let kvm_channel = kvm_pit_channel_state::from(&channel);
1369
1370 // compare the various field translations
1371 assert_eq!(kvm_channel.count, 256);
1372 assert_eq!(kvm_channel.latched_count, 512);
1373 assert_eq!(kvm_channel.count_latched, 1);
1374 assert_eq!(kvm_channel.status_latched, 0);
1375 assert_eq!(kvm_channel.status, 7);
1376 assert_eq!(kvm_channel.read_state, 2);
1377 assert_eq!(kvm_channel.write_state, 4);
1378 assert_eq!(kvm_channel.write_latch, 8);
1379 assert_eq!(kvm_channel.rw_mode, 3);
1380 assert_eq!(kvm_channel.mode, 5);
1381 assert_eq!(kvm_channel.bcd, 0);
1382 assert_eq!(kvm_channel.gate, 1);
1383 assert_eq!(kvm_channel.count_load_time, 1024);
1384
1385 // convert back and compare
1386 assert_eq!(channel, PitChannelState::from(&kvm_channel));
1387
1388 // convert the full pitstate
1389 let state = PitState {
1390 channels: [channel, channel, channel],
1391 flags: 255,
1392 };
1393 let kvm_state = kvm_pit_state2::from(&state);
1394
1395 assert_eq!(kvm_state.flags, 255);
1396
1397 // compare a channel
1398 assert_eq!(channel, PitChannelState::from(&kvm_state.channels[0]));
1399 // convert back and compare
1400 assert_eq!(state, PitState::from(&kvm_state));
1401 }
1402
1403 #[test]
clock_handling()1404 fn clock_handling() {
1405 let kvm = Kvm::new().unwrap();
1406 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
1407 let vm = KvmVm::new(&kvm, gm).unwrap();
1408 let mut clock_data = vm.get_pvclock().unwrap();
1409 clock_data.clock += 1000;
1410 vm.set_pvclock(&clock_data).unwrap();
1411 }
1412
1413 #[test]
set_gsi_routing()1414 fn set_gsi_routing() {
1415 let kvm = Kvm::new().unwrap();
1416 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
1417 let vm = KvmVm::new(&kvm, gm).unwrap();
1418 vm.create_irq_chip().unwrap();
1419 vm.set_gsi_routing(&[]).unwrap();
1420 vm.set_gsi_routing(&[IrqRoute {
1421 gsi: 1,
1422 source: IrqSource::Irqchip {
1423 chip: IrqSourceChip::Ioapic,
1424 pin: 3,
1425 },
1426 }])
1427 .unwrap();
1428 vm.set_gsi_routing(&[IrqRoute {
1429 gsi: 1,
1430 source: IrqSource::Msi {
1431 address: 0xf000000,
1432 data: 0xa0,
1433 },
1434 }])
1435 .unwrap();
1436 vm.set_gsi_routing(&[
1437 IrqRoute {
1438 gsi: 1,
1439 source: IrqSource::Irqchip {
1440 chip: IrqSourceChip::Ioapic,
1441 pin: 3,
1442 },
1443 },
1444 IrqRoute {
1445 gsi: 2,
1446 source: IrqSource::Msi {
1447 address: 0xf000000,
1448 data: 0xa0,
1449 },
1450 },
1451 ])
1452 .unwrap();
1453 }
1454
1455 #[test]
set_identity_map_addr()1456 fn set_identity_map_addr() {
1457 let kvm = Kvm::new().unwrap();
1458 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
1459 let vm = KvmVm::new(&kvm, gm).unwrap();
1460 vm.set_identity_map_addr(GuestAddress(0x20000)).unwrap();
1461 }
1462
1463 #[test]
mp_state()1464 fn mp_state() {
1465 let kvm = Kvm::new().unwrap();
1466 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
1467 let vm = KvmVm::new(&kvm, gm).unwrap();
1468 vm.create_irq_chip().unwrap();
1469 let vcpu = vm.create_vcpu(0).unwrap();
1470 let state = vcpu.get_mp_state().unwrap();
1471 vcpu.set_mp_state(&state).unwrap();
1472 }
1473
1474 #[test]
enable_feature()1475 fn enable_feature() {
1476 let kvm = Kvm::new().unwrap();
1477 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
1478 let vm = KvmVm::new(&kvm, gm).unwrap();
1479 vm.create_irq_chip().unwrap();
1480 let vcpu = vm.create_vcpu(0).unwrap();
1481 unsafe { vcpu.enable_raw_capability(kvm_sys::KVM_CAP_HYPERV_SYNIC, &[0; 4]) }.unwrap();
1482 }
1483
1484 #[test]
from_fpu()1485 fn from_fpu() {
1486 // Fpu has the largest arrays in our struct adapters. Test that they're small enough for
1487 // Rust to copy.
1488 let mut fpu: Fpu = Default::default();
1489 let m = fpu.xmm.len();
1490 let n = fpu.xmm[0].len();
1491 fpu.xmm[m - 1][n - 1] = 42;
1492
1493 let fpu = kvm_fpu::from(&fpu);
1494 assert_eq!(fpu.xmm.len(), m);
1495 assert_eq!(fpu.xmm[0].len(), n);
1496 assert_eq!(fpu.xmm[m - 1][n - 1], 42);
1497 }
1498
1499 #[test]
debugregs()1500 fn debugregs() {
1501 let kvm = Kvm::new().unwrap();
1502 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
1503 let vm = KvmVm::new(&kvm, gm).unwrap();
1504 let vcpu = vm.create_vcpu(0).unwrap();
1505 let mut dregs = vcpu.get_debugregs().unwrap();
1506 dregs.dr7 = 13;
1507 vcpu.set_debugregs(&dregs).unwrap();
1508 let dregs2 = vcpu.get_debugregs().unwrap();
1509 assert_eq!(dregs.dr7, dregs2.dr7);
1510 }
1511
1512 #[test]
xcrs()1513 fn xcrs() {
1514 let kvm = Kvm::new().unwrap();
1515 if !kvm.check_capability(&HypervisorCap::Xcrs) {
1516 return;
1517 }
1518
1519 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
1520 let vm = KvmVm::new(&kvm, gm).unwrap();
1521 let vcpu = vm.create_vcpu(0).unwrap();
1522 let mut xcrs = vcpu.get_xcrs().unwrap();
1523 xcrs[0].value = 1;
1524 vcpu.set_xcrs(&xcrs).unwrap();
1525 let xcrs2 = vcpu.get_xcrs().unwrap();
1526 assert_eq!(xcrs[0].value, xcrs2[0].value);
1527 }
1528
1529 #[test]
get_msrs()1530 fn get_msrs() {
1531 let kvm = Kvm::new().unwrap();
1532 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
1533 let vm = KvmVm::new(&kvm, gm).unwrap();
1534 let vcpu = vm.create_vcpu(0).unwrap();
1535 let mut msrs = vec![
1536 // This one should succeed
1537 Register {
1538 id: 0x0000011e,
1539 ..Default::default()
1540 },
1541 // This one will fail to fetch
1542 Register {
1543 id: 0x000003f1,
1544 ..Default::default()
1545 },
1546 ];
1547 vcpu.get_msrs(&mut msrs).unwrap();
1548 assert_eq!(msrs.len(), 1);
1549 }
1550
1551 #[test]
set_msrs()1552 fn set_msrs() {
1553 let kvm = Kvm::new().unwrap();
1554 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
1555 let vm = KvmVm::new(&kvm, gm).unwrap();
1556 let vcpu = vm.create_vcpu(0).unwrap();
1557
1558 const MSR_TSC_AUX: u32 = 0xc0000103;
1559 let mut msrs = vec![Register {
1560 id: MSR_TSC_AUX,
1561 value: 42,
1562 }];
1563 vcpu.set_msrs(&msrs).unwrap();
1564
1565 msrs[0].value = 0;
1566 vcpu.get_msrs(&mut msrs).unwrap();
1567 assert_eq!(msrs.len(), 1);
1568 assert_eq!(msrs[0].id, MSR_TSC_AUX);
1569 assert_eq!(msrs[0].value, 42);
1570 }
1571
1572 #[test]
get_hyperv_cpuid()1573 fn get_hyperv_cpuid() {
1574 let kvm = Kvm::new().unwrap();
1575 let gm = GuestMemory::new(&[(GuestAddress(0), 0x10000)]).unwrap();
1576 let vm = KvmVm::new(&kvm, gm).unwrap();
1577 let vcpu = vm.create_vcpu(0).unwrap();
1578 let cpuid = vcpu.get_hyperv_cpuid();
1579 // Older kernels don't support so tolerate this kind of failure.
1580 match cpuid {
1581 Ok(_) => {}
1582 Err(e) => {
1583 assert_eq!(e.errno(), EINVAL);
1584 }
1585 }
1586 }
1587 }
1588