1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! A safe wrapper around the kernel's KVM interface.
6
7 mod cap;
8
9 use std::cmp::{min, Ordering};
10 use std::collections::{BinaryHeap, HashMap};
11 use std::fs::File;
12 use std::mem::size_of;
13 use std::os::raw::*;
14 use std::os::unix::io::{AsRawFd, FromRawFd, RawFd};
15 use std::ptr::copy_nonoverlapping;
16
17 use libc::sigset_t;
18 use libc::{open, EINVAL, ENOENT, ENOSPC, O_CLOEXEC, O_RDWR};
19
20 use kvm_sys::*;
21
22 use msg_socket::MsgOnSocket;
23 #[allow(unused_imports)]
24 use sys_util::{
25 ioctl, ioctl_with_mut_ptr, ioctl_with_mut_ref, ioctl_with_ptr, ioctl_with_ref, ioctl_with_val,
26 pagesize, signal, warn, Error, EventFd, GuestAddress, GuestMemory, MemoryMapping,
27 MemoryMappingArena, Result,
28 };
29
30 pub use crate::cap::*;
31
errno_result<T>() -> Result<T>32 fn errno_result<T>() -> Result<T> {
33 Err(Error::last())
34 }
35
36 // Returns a `Vec<T>` with a size in ytes at least as large as `size_in_bytes`.
vec_with_size_in_bytes<T: Default>(size_in_bytes: usize) -> Vec<T>37 fn vec_with_size_in_bytes<T: Default>(size_in_bytes: usize) -> Vec<T> {
38 let rounded_size = (size_in_bytes + size_of::<T>() - 1) / size_of::<T>();
39 let mut v = Vec::with_capacity(rounded_size);
40 for _ in 0..rounded_size {
41 v.push(T::default())
42 }
43 v
44 }
45
46 // The kvm API has many structs that resemble the following `Foo` structure:
47 //
48 // ```
49 // #[repr(C)]
50 // struct Foo {
51 // some_data: u32
52 // entries: __IncompleteArrayField<__u32>,
53 // }
54 // ```
55 //
56 // In order to allocate such a structure, `size_of::<Foo>()` would be too small because it would not
57 // include any space for `entries`. To make the allocation large enough while still being aligned
58 // for `Foo`, a `Vec<Foo>` is created. Only the first element of `Vec<Foo>` would actually be used
59 // as a `Foo`. The remaining memory in the `Vec<Foo>` is for `entries`, which must be contiguous
60 // with `Foo`. This function is used to make the `Vec<Foo>` with enough space for `count` entries.
vec_with_array_field<T: Default, F>(count: usize) -> Vec<T>61 fn vec_with_array_field<T: Default, F>(count: usize) -> Vec<T> {
62 let element_space = count * size_of::<F>();
63 let vec_size_bytes = size_of::<T>() + element_space;
64 vec_with_size_in_bytes(vec_size_bytes)
65 }
66
set_user_memory_region<F: AsRawFd>( fd: &F, slot: u32, read_only: bool, log_dirty_pages: bool, guest_addr: u64, memory_size: u64, userspace_addr: *mut u8, ) -> Result<()>67 unsafe fn set_user_memory_region<F: AsRawFd>(
68 fd: &F,
69 slot: u32,
70 read_only: bool,
71 log_dirty_pages: bool,
72 guest_addr: u64,
73 memory_size: u64,
74 userspace_addr: *mut u8,
75 ) -> Result<()> {
76 let mut flags = if read_only { KVM_MEM_READONLY } else { 0 };
77 if log_dirty_pages {
78 flags |= KVM_MEM_LOG_DIRTY_PAGES;
79 }
80 let region = kvm_userspace_memory_region {
81 slot,
82 flags,
83 guest_phys_addr: guest_addr,
84 memory_size,
85 userspace_addr: userspace_addr as u64,
86 };
87
88 let ret = ioctl_with_ref(fd, KVM_SET_USER_MEMORY_REGION(), ®ion);
89 if ret == 0 {
90 Ok(())
91 } else {
92 errno_result()
93 }
94 }
95
96 /// Helper function to determine the size in bytes of a dirty log bitmap for the given memory region
97 /// size.
98 ///
99 /// # Arguments
100 ///
101 /// * `size` - Number of bytes in the memory region being queried.
dirty_log_bitmap_size(size: usize) -> usize102 pub fn dirty_log_bitmap_size(size: usize) -> usize {
103 let page_size = pagesize();
104 (((size + page_size - 1) / page_size) + 7) / 8
105 }
106
107 /// A wrapper around opening and using `/dev/kvm`.
108 ///
109 /// Useful for querying extensions and basic values from the KVM backend. A `Kvm` is required to
110 /// create a `Vm` object.
111 pub struct Kvm {
112 kvm: File,
113 }
114
115 impl Kvm {
116 /// Opens `/dev/kvm/` and returns a Kvm object on success.
new() -> Result<Kvm>117 pub fn new() -> Result<Kvm> {
118 // Open calls are safe because we give a constant nul-terminated string and verify the
119 // result.
120 let ret = unsafe { open("/dev/kvm\0".as_ptr() as *const c_char, O_RDWR | O_CLOEXEC) };
121 if ret < 0 {
122 return errno_result();
123 }
124 // Safe because we verify that ret is valid and we own the fd.
125 Ok(Kvm {
126 kvm: unsafe { File::from_raw_fd(ret) },
127 })
128 }
129
check_extension_int(&self, c: Cap) -> i32130 fn check_extension_int(&self, c: Cap) -> i32 {
131 // Safe because we know that our file is a KVM fd and that the extension is one of the ones
132 // defined by kernel.
133 unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), c as c_ulong) }
134 }
135
136 /// Checks if a particular `Cap` is available.
check_extension(&self, c: Cap) -> bool137 pub fn check_extension(&self, c: Cap) -> bool {
138 self.check_extension_int(c) == 1
139 }
140
141 /// Gets the size of the mmap required to use vcpu's `kvm_run` structure.
get_vcpu_mmap_size(&self) -> Result<usize>142 pub fn get_vcpu_mmap_size(&self) -> Result<usize> {
143 // Safe because we know that our file is a KVM fd and we verify the return result.
144 let res = unsafe { ioctl(self, KVM_GET_VCPU_MMAP_SIZE() as c_ulong) };
145 if res > 0 {
146 Ok(res as usize)
147 } else {
148 errno_result()
149 }
150 }
151
152 /// Gets the recommended maximum number of VCPUs per VM.
get_nr_vcpus(&self) -> u32153 pub fn get_nr_vcpus(&self) -> u32 {
154 match self.check_extension_int(Cap::NrVcpus) {
155 0 => 4, // according to api.txt
156 x if x > 0 => x as u32,
157 _ => {
158 warn!("kernel returned invalid number of VCPUs");
159 4
160 }
161 }
162 }
163
164 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_cpuid(&self, kind: u64) -> Result<CpuId>165 fn get_cpuid(&self, kind: u64) -> Result<CpuId> {
166 const MAX_KVM_CPUID_ENTRIES: usize = 256;
167 let mut cpuid = CpuId::new(MAX_KVM_CPUID_ENTRIES);
168
169 let ret = unsafe {
170 // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory
171 // allocated for the struct. The limit is read from nent, which is set to the allocated
172 // size(MAX_KVM_CPUID_ENTRIES) above.
173 ioctl_with_mut_ptr(self, kind, cpuid.as_mut_ptr())
174 };
175 if ret < 0 {
176 return errno_result();
177 }
178
179 Ok(cpuid)
180 }
181
182 /// X86 specific call to get the system supported CPUID values
183 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_supported_cpuid(&self) -> Result<CpuId>184 pub fn get_supported_cpuid(&self) -> Result<CpuId> {
185 self.get_cpuid(KVM_GET_SUPPORTED_CPUID())
186 }
187
188 /// X86 specific call to get the system emulated CPUID values
189 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_emulated_cpuid(&self) -> Result<CpuId>190 pub fn get_emulated_cpuid(&self) -> Result<CpuId> {
191 self.get_cpuid(KVM_GET_EMULATED_CPUID())
192 }
193
194 /// X86 specific call to get list of supported MSRS
195 ///
196 /// See the documentation for KVM_GET_MSR_INDEX_LIST.
197 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_msr_index_list(&self) -> Result<Vec<u32>>198 pub fn get_msr_index_list(&self) -> Result<Vec<u32>> {
199 const MAX_KVM_MSR_ENTRIES: usize = 256;
200
201 let mut msr_list = vec_with_array_field::<kvm_msr_list, u32>(MAX_KVM_MSR_ENTRIES);
202 msr_list[0].nmsrs = MAX_KVM_MSR_ENTRIES as u32;
203
204 let ret = unsafe {
205 // ioctl is unsafe. The kernel is trusted not to write beyond the bounds of the memory
206 // allocated for the struct. The limit is read from nmsrs, which is set to the allocated
207 // size (MAX_KVM_MSR_ENTRIES) above.
208 ioctl_with_mut_ref(self, KVM_GET_MSR_INDEX_LIST(), &mut msr_list[0])
209 };
210 if ret < 0 {
211 return errno_result();
212 }
213
214 let mut nmsrs = msr_list[0].nmsrs;
215
216 // Mapping the unsized array to a slice is unsafe because the length isn't known. Using
217 // the length we originally allocated with eliminates the possibility of overflow.
218 let indices: &[u32] = unsafe {
219 if nmsrs > MAX_KVM_MSR_ENTRIES as u32 {
220 nmsrs = MAX_KVM_MSR_ENTRIES as u32;
221 }
222 msr_list[0].indices.as_slice(nmsrs as usize)
223 };
224
225 Ok(indices.to_vec())
226 }
227 }
228
229 impl AsRawFd for Kvm {
as_raw_fd(&self) -> RawFd230 fn as_raw_fd(&self) -> RawFd {
231 self.kvm.as_raw_fd()
232 }
233 }
234
235 /// An address either in programmable I/O space or in memory mapped I/O space.
236 #[derive(Copy, Clone, Debug, MsgOnSocket)]
237 pub enum IoeventAddress {
238 Pio(u64),
239 Mmio(u64),
240 }
241
242 /// Used in `Vm::register_ioevent` to indicate a size and optionally value to match.
243 pub enum Datamatch {
244 AnyLength,
245 U8(Option<u8>),
246 U16(Option<u16>),
247 U32(Option<u32>),
248 U64(Option<u64>),
249 }
250
251 /// A source of IRQs in an `IrqRoute`.
252 pub enum IrqSource {
253 Irqchip { chip: u32, pin: u32 },
254 Msi { address: u64, data: u32 },
255 }
256
257 /// A single route for an IRQ.
258 pub struct IrqRoute {
259 pub gsi: u32,
260 pub source: IrqSource,
261 }
262
263 /// Interrupt controller IDs
264 pub enum PicId {
265 Primary = 0,
266 Secondary = 1,
267 }
268
269 /// Number of pins on the IOAPIC.
270 pub const NUM_IOAPIC_PINS: usize = 24;
271
272 // Used to invert the order when stored in a max-heap.
273 #[derive(Copy, Clone, Eq, PartialEq)]
274 struct MemSlot(u32);
275
276 impl Ord for MemSlot {
cmp(&self, other: &MemSlot) -> Ordering277 fn cmp(&self, other: &MemSlot) -> Ordering {
278 // Notice the order is inverted so the lowest magnitude slot has the highest priority in a
279 // max-heap.
280 other.0.cmp(&self.0)
281 }
282 }
283
284 impl PartialOrd for MemSlot {
partial_cmp(&self, other: &MemSlot) -> Option<Ordering>285 fn partial_cmp(&self, other: &MemSlot) -> Option<Ordering> {
286 Some(self.cmp(other))
287 }
288 }
289
290 /// A wrapper around creating and using a VM.
291 pub struct Vm {
292 vm: File,
293 guest_mem: GuestMemory,
294 device_memory: HashMap<u32, MemoryMapping>,
295 mmap_arenas: HashMap<u32, MemoryMappingArena>,
296 mem_slot_gaps: BinaryHeap<MemSlot>,
297 }
298
299 impl Vm {
300 /// Constructs a new `Vm` using the given `Kvm` instance.
new(kvm: &Kvm, guest_mem: GuestMemory) -> Result<Vm>301 pub fn new(kvm: &Kvm, guest_mem: GuestMemory) -> Result<Vm> {
302 // Safe because we know kvm is a real kvm fd as this module is the only one that can make
303 // Kvm objects.
304 let ret = unsafe { ioctl(kvm, KVM_CREATE_VM()) };
305 if ret >= 0 {
306 // Safe because we verify the value of ret and we are the owners of the fd.
307 let vm_file = unsafe { File::from_raw_fd(ret) };
308 guest_mem.with_regions(|index, guest_addr, size, host_addr, _| {
309 unsafe {
310 // Safe because the guest regions are guaranteed not to overlap.
311 set_user_memory_region(
312 &vm_file,
313 index as u32,
314 false,
315 false,
316 guest_addr.offset() as u64,
317 size as u64,
318 host_addr as *mut u8,
319 )
320 }
321 })?;
322
323 Ok(Vm {
324 vm: vm_file,
325 guest_mem,
326 device_memory: HashMap::new(),
327 mmap_arenas: HashMap::new(),
328 mem_slot_gaps: BinaryHeap::new(),
329 })
330 } else {
331 errno_result()
332 }
333 }
334
335 // Helper method for `set_user_memory_region` that tracks available slots.
set_user_memory_region( &mut self, read_only: bool, log_dirty_pages: bool, guest_addr: u64, memory_size: u64, userspace_addr: *mut u8, ) -> Result<u32>336 unsafe fn set_user_memory_region(
337 &mut self,
338 read_only: bool,
339 log_dirty_pages: bool,
340 guest_addr: u64,
341 memory_size: u64,
342 userspace_addr: *mut u8,
343 ) -> Result<u32> {
344 let slot = match self.mem_slot_gaps.pop() {
345 Some(gap) => gap.0,
346 None => {
347 (self.device_memory.len()
348 + self.guest_mem.num_regions() as usize
349 + self.mmap_arenas.len()) as u32
350 }
351 };
352
353 let res = set_user_memory_region(
354 &self.vm,
355 slot,
356 read_only,
357 log_dirty_pages,
358 guest_addr,
359 memory_size,
360 userspace_addr,
361 );
362 match res {
363 Ok(_) => Ok(slot),
364 Err(e) => {
365 self.mem_slot_gaps.push(MemSlot(slot));
366 Err(e)
367 }
368 }
369 }
370
371 // Helper method for `set_user_memory_region` that tracks available slots.
remove_user_memory_region(&mut self, slot: u32) -> Result<()>372 unsafe fn remove_user_memory_region(&mut self, slot: u32) -> Result<()> {
373 set_user_memory_region(&self.vm, slot, false, false, 0, 0, std::ptr::null_mut())?;
374 self.mem_slot_gaps.push(MemSlot(slot));
375 Ok(())
376 }
377
378 /// Checks if a particular `Cap` is available.
379 ///
380 /// This is distinct from the `Kvm` version of this method because the some extensions depend on
381 /// the particular `Vm` existence. This method is encouraged by the kernel because it more
382 /// accurately reflects the usable capabilities.
check_extension(&self, c: Cap) -> bool383 pub fn check_extension(&self, c: Cap) -> bool {
384 // Safe because we know that our file is a KVM fd and that the extension is one of the ones
385 // defined by kernel.
386 unsafe { ioctl_with_val(self, KVM_CHECK_EXTENSION(), c as c_ulong) == 1 }
387 }
388
389 /// Inserts the given `MemoryMapping` into the VM's address space at `guest_addr`.
390 ///
391 /// The slot that was assigned the device memory mapping is returned on success. The slot can be
392 /// given to `Vm::remove_device_memory` to remove the memory from the VM's address space and
393 /// take back ownership of `mem`.
394 ///
395 /// Note that memory inserted into the VM's address space must not overlap with any other memory
396 /// slot's region.
397 ///
398 /// If `read_only` is true, the guest will be able to read the memory as normal, but attempts to
399 /// write will trigger a mmio VM exit, leaving the memory untouched.
400 ///
401 /// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to
402 /// by the guest with `get_dirty_log`.
add_device_memory( &mut self, guest_addr: GuestAddress, mem: MemoryMapping, read_only: bool, log_dirty_pages: bool, ) -> Result<u32>403 pub fn add_device_memory(
404 &mut self,
405 guest_addr: GuestAddress,
406 mem: MemoryMapping,
407 read_only: bool,
408 log_dirty_pages: bool,
409 ) -> Result<u32> {
410 if guest_addr < self.guest_mem.end_addr() {
411 return Err(Error::new(ENOSPC));
412 }
413
414 // Safe because we check that the given guest address is valid and has no overlaps. We also
415 // know that the pointer and size are correct because the MemoryMapping interface ensures
416 // this. We take ownership of the memory mapping so that it won't be unmapped until the slot
417 // is removed.
418 let slot = unsafe {
419 self.set_user_memory_region(
420 read_only,
421 log_dirty_pages,
422 guest_addr.offset() as u64,
423 mem.size() as u64,
424 mem.as_ptr(),
425 )?
426 };
427 self.device_memory.insert(slot, mem);
428
429 Ok(slot)
430 }
431
432 /// Removes device memory that was previously added at the given slot.
433 ///
434 /// Ownership of the host memory mapping associated with the given slot is returned on success.
remove_device_memory(&mut self, slot: u32) -> Result<MemoryMapping>435 pub fn remove_device_memory(&mut self, slot: u32) -> Result<MemoryMapping> {
436 if self.device_memory.contains_key(&slot) {
437 // Safe because the slot is checked against the list of device memory slots.
438 unsafe {
439 self.remove_user_memory_region(slot)?;
440 }
441 // Safe to unwrap since map is checked to contain key
442 Ok(self.device_memory.remove(&slot).unwrap())
443 } else {
444 Err(Error::new(ENOENT))
445 }
446 }
447
448 /// Inserts the given `MemoryMappingArena` into the VM's address space at `guest_addr`.
449 ///
450 /// The slot that was assigned the device memory mapping is returned on success. The slot can be
451 /// given to `Vm::remove_mmap_arena` to remove the memory from the VM's address space and
452 /// take back ownership of `mmap_arena`.
453 ///
454 /// Note that memory inserted into the VM's address space must not overlap with any other memory
455 /// slot's region.
456 ///
457 /// If `read_only` is true, the guest will be able to read the memory as normal, but attempts to
458 /// write will trigger a mmio VM exit, leaving the memory untouched.
459 ///
460 /// If `log_dirty_pages` is true, the slot number can be used to retrieve the pages written to
461 /// by the guest with `get_dirty_log`.
add_mmap_arena( &mut self, guest_addr: GuestAddress, mmap_arena: MemoryMappingArena, read_only: bool, log_dirty_pages: bool, ) -> Result<u32>462 pub fn add_mmap_arena(
463 &mut self,
464 guest_addr: GuestAddress,
465 mmap_arena: MemoryMappingArena,
466 read_only: bool,
467 log_dirty_pages: bool,
468 ) -> Result<u32> {
469 if guest_addr < self.guest_mem.end_addr() {
470 return Err(Error::new(ENOSPC));
471 }
472
473 // Safe because we check that the given guest address is valid and has no overlaps. We also
474 // know that the pointer and size are correct because the MemoryMapping interface ensures
475 // this. We take ownership of the memory mapping so that it won't be unmapped until the slot
476 // is removed.
477 let slot = unsafe {
478 self.set_user_memory_region(
479 read_only,
480 log_dirty_pages,
481 guest_addr.offset() as u64,
482 mmap_arena.size() as u64,
483 mmap_arena.as_ptr(),
484 )?
485 };
486 self.mmap_arenas.insert(slot, mmap_arena);
487
488 Ok(slot)
489 }
490
491 /// Removes memory map arena that was previously added at the given slot.
492 ///
493 /// Ownership of the host memory mapping associated with the given slot is returned on success.
remove_mmap_arena(&mut self, slot: u32) -> Result<MemoryMappingArena>494 pub fn remove_mmap_arena(&mut self, slot: u32) -> Result<MemoryMappingArena> {
495 if self.mmap_arenas.contains_key(&slot) {
496 // Safe because the slot is checked against the list of device memory slots.
497 unsafe {
498 self.remove_user_memory_region(slot)?;
499 }
500 // Safe to unwrap since map is checked to contain key
501 Ok(self.mmap_arenas.remove(&slot).unwrap())
502 } else {
503 Err(Error::new(ENOENT))
504 }
505 }
506
507 /// Get a mutable reference to the memory map arena added at the given slot.
get_mmap_arena(&mut self, slot: u32) -> Option<&mut MemoryMappingArena>508 pub fn get_mmap_arena(&mut self, slot: u32) -> Option<&mut MemoryMappingArena> {
509 self.mmap_arenas.get_mut(&slot)
510 }
511
512 /// Gets the bitmap of dirty pages since the last call to `get_dirty_log` for the memory at
513 /// `slot`.
514 ///
515 /// The size of `dirty_log` must be at least as many bits as there are pages in the memory
516 /// region `slot` represents. For example, if the size of `slot` is 16 pages, `dirty_log` must
517 /// be 2 bytes or greater.
get_dirty_log(&self, slot: u32, dirty_log: &mut [u8]) -> Result<()>518 pub fn get_dirty_log(&self, slot: u32, dirty_log: &mut [u8]) -> Result<()> {
519 match self.device_memory.get(&slot) {
520 Some(mmap) => {
521 // Ensures that there are as many bytes in dirty_log as there are pages in the mmap.
522 if dirty_log_bitmap_size(mmap.size()) > dirty_log.len() {
523 return Err(Error::new(EINVAL));
524 }
525 let mut dirty_log_kvm = kvm_dirty_log {
526 slot,
527 ..Default::default()
528 };
529 dirty_log_kvm.__bindgen_anon_1.dirty_bitmap = dirty_log.as_ptr() as *mut c_void;
530 // Safe because the `dirty_bitmap` pointer assigned above is guaranteed to be valid
531 // (because it's from a slice) and we checked that it will be large enough to hold
532 // the entire log.
533 let ret = unsafe { ioctl_with_ref(self, KVM_GET_DIRTY_LOG(), &dirty_log_kvm) };
534 if ret == 0 {
535 Ok(())
536 } else {
537 errno_result()
538 }
539 }
540 _ => Err(Error::new(ENOENT)),
541 }
542 }
543
544 /// Gets a reference to the guest memory owned by this VM.
545 ///
546 /// Note that `GuestMemory` does not include any device memory that may have been added after
547 /// this VM was constructed.
get_memory(&self) -> &GuestMemory548 pub fn get_memory(&self) -> &GuestMemory {
549 &self.guest_mem
550 }
551
552 /// Sets the address of the three-page region in the VM's address space.
553 ///
554 /// See the documentation on the KVM_SET_TSS_ADDR ioctl.
555 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_tss_addr(&self, addr: GuestAddress) -> Result<()>556 pub fn set_tss_addr(&self, addr: GuestAddress) -> Result<()> {
557 // Safe because we know that our file is a VM fd and we verify the return result.
558 let ret = unsafe { ioctl_with_val(self, KVM_SET_TSS_ADDR(), addr.offset() as u64) };
559 if ret == 0 {
560 Ok(())
561 } else {
562 errno_result()
563 }
564 }
565
566 /// Sets the address of a one-page region in the VM's address space.
567 ///
568 /// See the documentation on the KVM_SET_IDENTITY_MAP_ADDR ioctl.
569 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_identity_map_addr(&self, addr: GuestAddress) -> Result<()>570 pub fn set_identity_map_addr(&self, addr: GuestAddress) -> Result<()> {
571 // Safe because we know that our file is a VM fd and we verify the return result.
572 let ret =
573 unsafe { ioctl_with_ref(self, KVM_SET_IDENTITY_MAP_ADDR(), &(addr.offset() as u64)) };
574 if ret == 0 {
575 Ok(())
576 } else {
577 errno_result()
578 }
579 }
580
581 /// Retrieves the current timestamp of kvmclock as seen by the current guest.
582 ///
583 /// See the documentation on the KVM_GET_CLOCK ioctl.
584 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_clock(&self) -> Result<kvm_clock_data>585 pub fn get_clock(&self) -> Result<kvm_clock_data> {
586 // Safe because we know that our file is a VM fd, we know the kernel will only write
587 // correct amount of memory to our pointer, and we verify the return result.
588 let mut clock_data = unsafe { std::mem::zeroed() };
589 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_CLOCK(), &mut clock_data) };
590 if ret == 0 {
591 Ok(clock_data)
592 } else {
593 errno_result()
594 }
595 }
596
597 /// Sets the current timestamp of kvmclock to the specified value.
598 ///
599 /// See the documentation on the KVM_SET_CLOCK ioctl.
600 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_clock(&self, clock_data: &kvm_clock_data) -> Result<()>601 pub fn set_clock(&self, clock_data: &kvm_clock_data) -> Result<()> {
602 // Safe because we know that our file is a VM fd, we know the kernel will only read
603 // correct amount of memory from our pointer, and we verify the return result.
604 let ret = unsafe { ioctl_with_ref(self, KVM_SET_CLOCK(), clock_data) };
605 if ret == 0 {
606 Ok(())
607 } else {
608 errno_result()
609 }
610 }
611
612 /// Crates an in kernel interrupt controller.
613 ///
614 /// See the documentation on the KVM_CREATE_IRQCHIP ioctl.
615 #[cfg(any(
616 target_arch = "x86",
617 target_arch = "x86_64",
618 target_arch = "arm",
619 target_arch = "aarch64"
620 ))]
create_irq_chip(&self) -> Result<()>621 pub fn create_irq_chip(&self) -> Result<()> {
622 // Safe because we know that our file is a VM fd and we verify the return result.
623 let ret = unsafe { ioctl(self, KVM_CREATE_IRQCHIP()) };
624 if ret == 0 {
625 Ok(())
626 } else {
627 errno_result()
628 }
629 }
630
631 /// Retrieves the state of given interrupt controller by issuing KVM_GET_IRQCHIP ioctl.
632 ///
633 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
634 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_pic_state(&self, id: PicId) -> Result<kvm_pic_state>635 pub fn get_pic_state(&self, id: PicId) -> Result<kvm_pic_state> {
636 let mut irqchip_state = kvm_irqchip::default();
637 irqchip_state.chip_id = id as u32;
638 let ret = unsafe {
639 // Safe because we know our file is a VM fd, we know the kernel will only write
640 // correct amount of memory to our pointer, and we verify the return result.
641 ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state)
642 };
643 if ret == 0 {
644 Ok(unsafe {
645 // Safe as we know that we are retrieving data related to the
646 // PIC (primary or secondary) and not IOAPIC.
647 irqchip_state.chip.pic
648 })
649 } else {
650 errno_result()
651 }
652 }
653
654 /// Sets the state of given interrupt controller by issuing KVM_SET_IRQCHIP ioctl.
655 ///
656 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
657 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_pic_state(&self, id: PicId, state: &kvm_pic_state) -> Result<()>658 pub fn set_pic_state(&self, id: PicId, state: &kvm_pic_state) -> Result<()> {
659 let mut irqchip_state = kvm_irqchip::default();
660 irqchip_state.chip_id = id as u32;
661 irqchip_state.chip.pic = *state;
662 // Safe because we know that our file is a VM fd, we know the kernel will only read
663 // correct amount of memory from our pointer, and we verify the return result.
664 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) };
665 if ret == 0 {
666 Ok(())
667 } else {
668 errno_result()
669 }
670 }
671
672 /// Retrieves the state of IOAPIC by issuing KVM_GET_IRQCHIP ioctl.
673 ///
674 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
675 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_ioapic_state(&self) -> Result<kvm_ioapic_state>676 pub fn get_ioapic_state(&self) -> Result<kvm_ioapic_state> {
677 let mut irqchip_state = kvm_irqchip::default();
678 irqchip_state.chip_id = 2;
679 let ret = unsafe {
680 // Safe because we know our file is a VM fd, we know the kernel will only write
681 // correct amount of memory to our pointer, and we verify the return result.
682 ioctl_with_mut_ref(self, KVM_GET_IRQCHIP(), &mut irqchip_state)
683 };
684 if ret == 0 {
685 Ok(unsafe {
686 // Safe as we know that we are retrieving data related to the
687 // IOAPIC and not PIC.
688 irqchip_state.chip.ioapic
689 })
690 } else {
691 errno_result()
692 }
693 }
694
695 /// Sets the state of IOAPIC by issuing KVM_SET_IRQCHIP ioctl.
696 ///
697 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
698 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()>699 pub fn set_ioapic_state(&self, state: &kvm_ioapic_state) -> Result<()> {
700 let mut irqchip_state = kvm_irqchip::default();
701 irqchip_state.chip_id = 2;
702 irqchip_state.chip.ioapic = *state;
703 // Safe because we know that our file is a VM fd, we know the kernel will only read
704 // correct amount of memory from our pointer, and we verify the return result.
705 let ret = unsafe { ioctl_with_ref(self, KVM_SET_IRQCHIP(), &irqchip_state) };
706 if ret == 0 {
707 Ok(())
708 } else {
709 errno_result()
710 }
711 }
712
713 /// Sets the level on the given irq to 1 if `active` is true, and 0 otherwise.
714 #[cfg(any(
715 target_arch = "x86",
716 target_arch = "x86_64",
717 target_arch = "arm",
718 target_arch = "aarch64"
719 ))]
set_irq_line(&self, irq: u32, active: bool) -> Result<()>720 pub fn set_irq_line(&self, irq: u32, active: bool) -> Result<()> {
721 let mut irq_level = kvm_irq_level::default();
722 irq_level.__bindgen_anon_1.irq = irq;
723 irq_level.level = if active { 1 } else { 0 };
724
725 // Safe because we know that our file is a VM fd, we know the kernel will only read the
726 // correct amount of memory from our pointer, and we verify the return result.
727 let ret = unsafe { ioctl_with_ref(self, KVM_IRQ_LINE(), &irq_level) };
728 if ret == 0 {
729 Ok(())
730 } else {
731 errno_result()
732 }
733 }
734
735 /// Creates a PIT as per the KVM_CREATE_PIT2 ioctl.
736 ///
737 /// Note that this call can only succeed after a call to `Vm::create_irq_chip`.
738 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
create_pit(&self) -> Result<()>739 pub fn create_pit(&self) -> Result<()> {
740 let pit_config = kvm_pit_config::default();
741 // Safe because we know that our file is a VM fd, we know the kernel will only read the
742 // correct amount of memory from our pointer, and we verify the return result.
743 let ret = unsafe { ioctl_with_ref(self, KVM_CREATE_PIT2(), &pit_config) };
744 if ret == 0 {
745 Ok(())
746 } else {
747 errno_result()
748 }
749 }
750
751 /// Retrieves the state of PIT by issuing KVM_GET_PIT2 ioctl.
752 ///
753 /// Note that this call can only succeed after a call to `Vm::create_pit`.
754 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_pit_state(&self) -> Result<kvm_pit_state2>755 pub fn get_pit_state(&self) -> Result<kvm_pit_state2> {
756 // Safe because we know that our file is a VM fd, we know the kernel will only write
757 // correct amount of memory to our pointer, and we verify the return result.
758 let mut pit_state = unsafe { std::mem::zeroed() };
759 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_PIT2(), &mut pit_state) };
760 if ret == 0 {
761 Ok(pit_state)
762 } else {
763 errno_result()
764 }
765 }
766
767 /// Sets the state of PIT by issuing KVM_SET_PIT2 ioctl.
768 ///
769 /// Note that this call can only succeed after a call to `Vm::create_pit`.
770 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()>771 pub fn set_pit_state(&self, pit_state: &kvm_pit_state2) -> Result<()> {
772 // Safe because we know that our file is a VM fd, we know the kernel will only read
773 // correct amount of memory from our pointer, and we verify the return result.
774 let ret = unsafe { ioctl_with_ref(self, KVM_SET_PIT2(), pit_state) };
775 if ret == 0 {
776 Ok(())
777 } else {
778 errno_result()
779 }
780 }
781
782 /// Registers an event to be signaled whenever a certain address is written to.
783 ///
784 /// The `datamatch` parameter can be used to limit signaling `evt` to only the cases where the
785 /// value being written is equal to `datamatch`. Note that the size of `datamatch` is important
786 /// and must match the expected size of the guest's write.
787 ///
788 /// In all cases where `evt` is signaled, the ordinary vmexit to userspace that would be
789 /// triggered is prevented.
register_ioevent( &self, evt: &EventFd, addr: IoeventAddress, datamatch: Datamatch, ) -> Result<()>790 pub fn register_ioevent(
791 &self,
792 evt: &EventFd,
793 addr: IoeventAddress,
794 datamatch: Datamatch,
795 ) -> Result<()> {
796 self.ioeventfd(evt, addr, datamatch, false)
797 }
798
799 /// Unregisters an event previously registered with `register_ioevent`.
800 ///
801 /// The `evt`, `addr`, and `datamatch` set must be the same as the ones passed into
802 /// `register_ioevent`.
unregister_ioevent( &self, evt: &EventFd, addr: IoeventAddress, datamatch: Datamatch, ) -> Result<()>803 pub fn unregister_ioevent(
804 &self,
805 evt: &EventFd,
806 addr: IoeventAddress,
807 datamatch: Datamatch,
808 ) -> Result<()> {
809 self.ioeventfd(evt, addr, datamatch, true)
810 }
811
ioeventfd( &self, evt: &EventFd, addr: IoeventAddress, datamatch: Datamatch, deassign: bool, ) -> Result<()>812 fn ioeventfd(
813 &self,
814 evt: &EventFd,
815 addr: IoeventAddress,
816 datamatch: Datamatch,
817 deassign: bool,
818 ) -> Result<()> {
819 let (do_datamatch, datamatch_value, datamatch_len) = match datamatch {
820 Datamatch::AnyLength => (false, 0, 0),
821 Datamatch::U8(v) => match v {
822 Some(u) => (true, u as u64, 1),
823 None => (false, 0, 1),
824 },
825 Datamatch::U16(v) => match v {
826 Some(u) => (true, u as u64, 2),
827 None => (false, 0, 2),
828 },
829 Datamatch::U32(v) => match v {
830 Some(u) => (true, u as u64, 4),
831 None => (false, 0, 4),
832 },
833 Datamatch::U64(v) => match v {
834 Some(u) => (true, u as u64, 8),
835 None => (false, 0, 8),
836 },
837 };
838 let mut flags = 0;
839 if deassign {
840 flags |= 1 << kvm_ioeventfd_flag_nr_deassign;
841 }
842 if do_datamatch {
843 flags |= 1 << kvm_ioeventfd_flag_nr_datamatch
844 }
845 if let IoeventAddress::Pio(_) = addr {
846 flags |= 1 << kvm_ioeventfd_flag_nr_pio;
847 }
848 let ioeventfd = kvm_ioeventfd {
849 datamatch: datamatch_value,
850 len: datamatch_len,
851 addr: match addr {
852 IoeventAddress::Pio(p) => p as u64,
853 IoeventAddress::Mmio(m) => m,
854 },
855 fd: evt.as_raw_fd(),
856 flags,
857 ..Default::default()
858 };
859 // Safe because we know that our file is a VM fd, we know the kernel will only read the
860 // correct amount of memory from our pointer, and we verify the return result.
861 let ret = unsafe { ioctl_with_ref(self, KVM_IOEVENTFD(), &ioeventfd) };
862 if ret == 0 {
863 Ok(())
864 } else {
865 errno_result()
866 }
867 }
868
869 /// Registers an event that will, when signalled, trigger the `gsi` irq.
870 #[cfg(any(
871 target_arch = "x86",
872 target_arch = "x86_64",
873 target_arch = "arm",
874 target_arch = "aarch64"
875 ))]
register_irqfd(&self, evt: &EventFd, gsi: u32) -> Result<()>876 pub fn register_irqfd(&self, evt: &EventFd, gsi: u32) -> Result<()> {
877 let irqfd = kvm_irqfd {
878 fd: evt.as_raw_fd() as u32,
879 gsi,
880 ..Default::default()
881 };
882 // Safe because we know that our file is a VM fd, we know the kernel will only read the
883 // correct amount of memory from our pointer, and we verify the return result.
884 let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) };
885 if ret == 0 {
886 Ok(())
887 } else {
888 errno_result()
889 }
890 }
891
892 /// Registers an event that will, when signalled, trigger the `gsi` irq, and `resample_evt` will
893 /// get triggered when the irqchip is resampled.
894 #[cfg(any(
895 target_arch = "x86",
896 target_arch = "x86_64",
897 target_arch = "arm",
898 target_arch = "aarch64"
899 ))]
register_irqfd_resample( &self, evt: &EventFd, resample_evt: &EventFd, gsi: u32, ) -> Result<()>900 pub fn register_irqfd_resample(
901 &self,
902 evt: &EventFd,
903 resample_evt: &EventFd,
904 gsi: u32,
905 ) -> Result<()> {
906 let irqfd = kvm_irqfd {
907 flags: KVM_IRQFD_FLAG_RESAMPLE,
908 fd: evt.as_raw_fd() as u32,
909 resamplefd: resample_evt.as_raw_fd() as u32,
910 gsi,
911 ..Default::default()
912 };
913 // Safe because we know that our file is a VM fd, we know the kernel will only read the
914 // correct amount of memory from our pointer, and we verify the return result.
915 let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) };
916 if ret == 0 {
917 Ok(())
918 } else {
919 errno_result()
920 }
921 }
922
923 /// Unregisters an event that was previously registered with
924 /// `register_irqfd`/`register_irqfd_resample`.
925 ///
926 /// The `evt` and `gsi` pair must be the same as the ones passed into
927 /// `register_irqfd`/`register_irqfd_resample`.
928 #[cfg(any(
929 target_arch = "x86",
930 target_arch = "x86_64",
931 target_arch = "arm",
932 target_arch = "aarch64"
933 ))]
unregister_irqfd(&self, evt: &EventFd, gsi: u32) -> Result<()>934 pub fn unregister_irqfd(&self, evt: &EventFd, gsi: u32) -> Result<()> {
935 let irqfd = kvm_irqfd {
936 fd: evt.as_raw_fd() as u32,
937 gsi,
938 flags: KVM_IRQFD_FLAG_DEASSIGN,
939 ..Default::default()
940 };
941 // Safe because we know that our file is a VM fd, we know the kernel will only read the
942 // correct amount of memory from our pointer, and we verify the return result.
943 let ret = unsafe { ioctl_with_ref(self, KVM_IRQFD(), &irqfd) };
944 if ret == 0 {
945 Ok(())
946 } else {
947 errno_result()
948 }
949 }
950
951 /// Sets the GSI routing table, replacing any table set with previous calls to
952 /// `set_gsi_routing`.
953 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_gsi_routing(&self, routes: &[IrqRoute]) -> Result<()>954 pub fn set_gsi_routing(&self, routes: &[IrqRoute]) -> Result<()> {
955 let mut irq_routing =
956 vec_with_array_field::<kvm_irq_routing, kvm_irq_routing_entry>(routes.len());
957 irq_routing[0].nr = routes.len() as u32;
958
959 // Safe because we ensured there is enough space in irq_routing to hold the number of
960 // route entries.
961 let irq_routes = unsafe { irq_routing[0].entries.as_mut_slice(routes.len()) };
962 for (route, irq_route) in routes.iter().zip(irq_routes.iter_mut()) {
963 irq_route.gsi = route.gsi;
964 match route.source {
965 IrqSource::Irqchip { chip, pin } => {
966 irq_route.type_ = KVM_IRQ_ROUTING_IRQCHIP;
967 irq_route.u.irqchip = kvm_irq_routing_irqchip { irqchip: chip, pin }
968 }
969 IrqSource::Msi { address, data } => {
970 irq_route.type_ = KVM_IRQ_ROUTING_MSI;
971 irq_route.u.msi = kvm_irq_routing_msi {
972 address_lo: address as u32,
973 address_hi: (address >> 32) as u32,
974 data,
975 ..Default::default()
976 }
977 }
978 }
979 }
980
981 let ret = unsafe { ioctl_with_ref(self, KVM_SET_GSI_ROUTING(), &irq_routing[0]) };
982 if ret == 0 {
983 Ok(())
984 } else {
985 errno_result()
986 }
987 }
988
989 /// Does KVM_CREATE_DEVICE for a generic device.
create_device(&self, device: &mut kvm_create_device) -> Result<()>990 pub fn create_device(&self, device: &mut kvm_create_device) -> Result<()> {
991 let ret = unsafe { sys_util::ioctl_with_ref(self, KVM_CREATE_DEVICE(), device) };
992 if ret == 0 {
993 Ok(())
994 } else {
995 errno_result()
996 }
997 }
998
999 /// This queries the kernel for the preferred target CPU type.
1000 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
arm_preferred_target(&self, kvi: &mut kvm_vcpu_init) -> Result<()>1001 pub fn arm_preferred_target(&self, kvi: &mut kvm_vcpu_init) -> Result<()> {
1002 // The ioctl is safe because we allocated the struct and we know the
1003 // kernel will write exactly the size of the struct.
1004 let ret = unsafe { ioctl_with_mut_ref(self, KVM_ARM_PREFERRED_TARGET(), kvi) };
1005 if ret < 0 {
1006 return errno_result();
1007 }
1008 Ok(())
1009 }
1010
1011 /// Enable the specified capability.
1012 /// See documentation for KVM_ENABLE_CAP.
kvm_enable_cap(&self, cap: &kvm_enable_cap) -> Result<()>1013 pub fn kvm_enable_cap(&self, cap: &kvm_enable_cap) -> Result<()> {
1014 // safe becuase we allocated the struct and we know the kernel will read
1015 // exactly the size of the struct
1016 let ret = unsafe { ioctl_with_ref(self, KVM_ENABLE_CAP(), cap) };
1017 if ret < 0 {
1018 errno_result()
1019 } else {
1020 Ok(())
1021 }
1022 }
1023
1024 /// (x86-only): Enable support for split-irqchip.
1025 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
enable_split_irqchip(&self) -> Result<()>1026 pub fn enable_split_irqchip(&self) -> Result<()> {
1027 let mut cap: kvm_enable_cap = Default::default();
1028 cap.cap = KVM_CAP_SPLIT_IRQCHIP;
1029 cap.args[0] = NUM_IOAPIC_PINS as u64;
1030 self.kvm_enable_cap(&cap)
1031 }
1032
1033 /// Request that the kernel inject the specified MSI message.
1034 /// Returns Ok(true) on delivery, Ok(false) if the guest blocked delivery, or an error.
1035 /// See kernel documentation for KVM_SIGNAL_MSI.
signal_msi(&self, msi: &kvm_msi) -> Result<bool>1036 pub fn signal_msi(&self, msi: &kvm_msi) -> Result<bool> {
1037 // safe becuase we allocated the struct and we know the kernel will read
1038 // exactly the size of the struct
1039 let ret = unsafe { ioctl_with_ref(self, KVM_SIGNAL_MSI(), msi) };
1040 if ret < 0 {
1041 errno_result()
1042 } else {
1043 Ok(ret > 0)
1044 }
1045 }
1046 }
1047
1048 impl AsRawFd for Vm {
as_raw_fd(&self) -> RawFd1049 fn as_raw_fd(&self) -> RawFd {
1050 self.vm.as_raw_fd()
1051 }
1052 }
1053
1054 /// A reason why a VCPU exited. One of these returns every time `Vcpu::run` is called.
1055 #[derive(Debug)]
1056 pub enum VcpuExit {
1057 /// An out port instruction was run on the given port with the given data.
1058 IoOut {
1059 port: u16,
1060 size: usize,
1061 data: [u8; 8],
1062 },
1063 /// An in port instruction was run on the given port.
1064 ///
1065 /// The date that the instruction receives should be set with `set_data` before `Vcpu::run` is
1066 /// called again.
1067 IoIn {
1068 port: u16,
1069 size: usize,
1070 },
1071 /// A read instruction was run against the given MMIO address.
1072 ///
1073 /// The date that the instruction receives should be set with `set_data` before `Vcpu::run` is
1074 /// called again.
1075 MmioRead {
1076 address: u64,
1077 size: usize,
1078 },
1079 /// A write instruction was run against the given MMIO address with the given data.
1080 MmioWrite {
1081 address: u64,
1082 size: usize,
1083 data: [u8; 8],
1084 },
1085 Unknown,
1086 Exception,
1087 Hypercall,
1088 Debug,
1089 Hlt,
1090 IrqWindowOpen,
1091 Shutdown,
1092 FailEntry,
1093 Intr,
1094 SetTpr,
1095 TprAccess,
1096 S390Sieic,
1097 S390Reset,
1098 Dcr,
1099 Nmi,
1100 InternalError,
1101 Osi,
1102 PaprHcall,
1103 S390Ucontrol,
1104 Watchdog,
1105 S390Tsch,
1106 Epr,
1107 /// The cpu triggered a system level event which is specified by the type field.
1108 /// The first field is the event type and the second field is flags.
1109 /// The possible event types are shutdown, reset, or crash. So far there
1110 /// are not any flags defined.
1111 SystemEvent(u32 /* event_type */, u64 /* flags */),
1112 }
1113
1114 /// A wrapper around creating and using a VCPU.
1115 pub struct Vcpu {
1116 vcpu: File,
1117 run_mmap: MemoryMapping,
1118 guest_mem: GuestMemory,
1119 }
1120
1121 impl Vcpu {
1122 /// Constructs a new VCPU for `vm`.
1123 ///
1124 /// The `id` argument is the CPU number between [0, max vcpus).
new(id: c_ulong, kvm: &Kvm, vm: &Vm) -> Result<Vcpu>1125 pub fn new(id: c_ulong, kvm: &Kvm, vm: &Vm) -> Result<Vcpu> {
1126 let run_mmap_size = kvm.get_vcpu_mmap_size()?;
1127
1128 // Safe because we know that vm a VM fd and we verify the return result.
1129 let vcpu_fd = unsafe { ioctl_with_val(vm, KVM_CREATE_VCPU(), id) };
1130 if vcpu_fd < 0 {
1131 return errno_result();
1132 }
1133
1134 // Wrap the vcpu now in case the following ? returns early. This is safe because we verified
1135 // the value of the fd and we own the fd.
1136 let vcpu = unsafe { File::from_raw_fd(vcpu_fd) };
1137
1138 let run_mmap =
1139 MemoryMapping::from_fd(&vcpu, run_mmap_size).map_err(|_| Error::new(ENOSPC))?;
1140
1141 let guest_mem = vm.guest_mem.clone();
1142
1143 Ok(Vcpu {
1144 vcpu,
1145 run_mmap,
1146 guest_mem,
1147 })
1148 }
1149
1150 /// Gets a reference to the guest memory owned by this VM of this VCPU.
1151 ///
1152 /// Note that `GuestMemory` does not include any device memory that may have been added after
1153 /// this VM was constructed.
get_memory(&self) -> &GuestMemory1154 pub fn get_memory(&self) -> &GuestMemory {
1155 &self.guest_mem
1156 }
1157
1158 /// Sets the data received by an mmio or ioport read/in instruction.
1159 ///
1160 /// This function should be called after `Vcpu::run` returns an `VcpuExit::IoIn` or
1161 /// `Vcpu::MmioRead`.
1162 #[allow(clippy::cast_ptr_alignment)]
set_data(&self, data: &[u8]) -> Result<()>1163 pub fn set_data(&self, data: &[u8]) -> Result<()> {
1164 // Safe because we know we mapped enough memory to hold the kvm_run struct because the
1165 // kernel told us how large it was. The pointer is page aligned so casting to a different
1166 // type is well defined, hence the clippy allow attribute.
1167 let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut kvm_run) };
1168 match run.exit_reason {
1169 KVM_EXIT_IO => {
1170 let run_start = run as *mut kvm_run as *mut u8;
1171 // Safe because the exit_reason (which comes from the kernel) told us which
1172 // union field to use.
1173 let io = unsafe { run.__bindgen_anon_1.io };
1174 if io.direction as u32 != KVM_EXIT_IO_IN {
1175 return Err(Error::new(EINVAL));
1176 }
1177 let data_size = (io.count as usize) * (io.size as usize);
1178 if data_size != data.len() {
1179 return Err(Error::new(EINVAL));
1180 }
1181 // The data_offset is defined by the kernel to be some number of bytes into the
1182 // kvm_run structure, which we have fully mmap'd.
1183 unsafe {
1184 let data_ptr = run_start.offset(io.data_offset as isize);
1185 copy_nonoverlapping(data.as_ptr(), data_ptr, data_size);
1186 }
1187 Ok(())
1188 }
1189 KVM_EXIT_MMIO => {
1190 // Safe because the exit_reason (which comes from the kernel) told us which
1191 // union field to use.
1192 let mmio = unsafe { &mut run.__bindgen_anon_1.mmio };
1193 if mmio.is_write != 0 {
1194 return Err(Error::new(EINVAL));
1195 }
1196 let len = mmio.len as usize;
1197 if len != data.len() {
1198 return Err(Error::new(EINVAL));
1199 }
1200 mmio.data[..len].copy_from_slice(data);
1201 Ok(())
1202 }
1203 _ => Err(Error::new(EINVAL)),
1204 }
1205 }
1206
1207 /// Runs the VCPU until it exits, returning the reason.
1208 ///
1209 /// Note that the state of the VCPU and associated VM must be setup first for this to do
1210 /// anything useful.
1211 #[allow(clippy::cast_ptr_alignment)]
1212 // The pointer is page aligned so casting to a different type is well defined, hence the clippy
1213 // allow attribute.
run(&self) -> Result<VcpuExit>1214 pub fn run(&self) -> Result<VcpuExit> {
1215 // Safe because we know that our file is a VCPU fd and we verify the return result.
1216 let ret = unsafe { ioctl(self, KVM_RUN()) };
1217 if ret == 0 {
1218 // Safe because we know we mapped enough memory to hold the kvm_run struct because the
1219 // kernel told us how large it was.
1220 let run = unsafe { &*(self.run_mmap.as_ptr() as *const kvm_run) };
1221 match run.exit_reason {
1222 KVM_EXIT_IO => {
1223 // Safe because the exit_reason (which comes from the kernel) told us which
1224 // union field to use.
1225 let io = unsafe { run.__bindgen_anon_1.io };
1226 let port = io.port;
1227 let size = (io.count as usize) * (io.size as usize);
1228 match io.direction as u32 {
1229 KVM_EXIT_IO_IN => Ok(VcpuExit::IoIn { port, size }),
1230 KVM_EXIT_IO_OUT => {
1231 let mut data = [0; 8];
1232 let run_start = run as *const kvm_run as *const u8;
1233 // The data_offset is defined by the kernel to be some number of bytes
1234 // into the kvm_run structure, which we have fully mmap'd.
1235 unsafe {
1236 let data_ptr = run_start.offset(io.data_offset as isize);
1237 copy_nonoverlapping(
1238 data_ptr,
1239 data.as_mut_ptr(),
1240 min(size, data.len()),
1241 );
1242 }
1243 Ok(VcpuExit::IoOut { port, size, data })
1244 }
1245 _ => Err(Error::new(EINVAL)),
1246 }
1247 }
1248 KVM_EXIT_MMIO => {
1249 // Safe because the exit_reason (which comes from the kernel) told us which
1250 // union field to use.
1251 let mmio = unsafe { &run.__bindgen_anon_1.mmio };
1252 let address = mmio.phys_addr;
1253 let size = min(mmio.len as usize, mmio.data.len());
1254 if mmio.is_write != 0 {
1255 Ok(VcpuExit::MmioWrite {
1256 address,
1257 size,
1258 data: mmio.data,
1259 })
1260 } else {
1261 Ok(VcpuExit::MmioRead { address, size })
1262 }
1263 }
1264 KVM_EXIT_UNKNOWN => Ok(VcpuExit::Unknown),
1265 KVM_EXIT_EXCEPTION => Ok(VcpuExit::Exception),
1266 KVM_EXIT_HYPERCALL => Ok(VcpuExit::Hypercall),
1267 KVM_EXIT_DEBUG => Ok(VcpuExit::Debug),
1268 KVM_EXIT_HLT => Ok(VcpuExit::Hlt),
1269 KVM_EXIT_IRQ_WINDOW_OPEN => Ok(VcpuExit::IrqWindowOpen),
1270 KVM_EXIT_SHUTDOWN => Ok(VcpuExit::Shutdown),
1271 KVM_EXIT_FAIL_ENTRY => Ok(VcpuExit::FailEntry),
1272 KVM_EXIT_INTR => Ok(VcpuExit::Intr),
1273 KVM_EXIT_SET_TPR => Ok(VcpuExit::SetTpr),
1274 KVM_EXIT_TPR_ACCESS => Ok(VcpuExit::TprAccess),
1275 KVM_EXIT_S390_SIEIC => Ok(VcpuExit::S390Sieic),
1276 KVM_EXIT_S390_RESET => Ok(VcpuExit::S390Reset),
1277 KVM_EXIT_DCR => Ok(VcpuExit::Dcr),
1278 KVM_EXIT_NMI => Ok(VcpuExit::Nmi),
1279 KVM_EXIT_INTERNAL_ERROR => Ok(VcpuExit::InternalError),
1280 KVM_EXIT_OSI => Ok(VcpuExit::Osi),
1281 KVM_EXIT_PAPR_HCALL => Ok(VcpuExit::PaprHcall),
1282 KVM_EXIT_S390_UCONTROL => Ok(VcpuExit::S390Ucontrol),
1283 KVM_EXIT_WATCHDOG => Ok(VcpuExit::Watchdog),
1284 KVM_EXIT_S390_TSCH => Ok(VcpuExit::S390Tsch),
1285 KVM_EXIT_EPR => Ok(VcpuExit::Epr),
1286 KVM_EXIT_SYSTEM_EVENT => {
1287 // Safe because we know the exit reason told us this union
1288 // field is valid
1289 let event_type = unsafe { run.__bindgen_anon_1.system_event.type_ };
1290 let event_flags = unsafe { run.__bindgen_anon_1.system_event.flags };
1291 Ok(VcpuExit::SystemEvent(event_type, event_flags))
1292 }
1293 r => panic!("unknown kvm exit reason: {}", r),
1294 }
1295 } else {
1296 errno_result()
1297 }
1298 }
1299
1300 /// Gets the VCPU registers.
1301 #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))]
get_regs(&self) -> Result<kvm_regs>1302 pub fn get_regs(&self) -> Result<kvm_regs> {
1303 // Safe because we know that our file is a VCPU fd, we know the kernel will only read the
1304 // correct amount of memory from our pointer, and we verify the return result.
1305 let mut regs = unsafe { std::mem::zeroed() };
1306 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_REGS(), &mut regs) };
1307 if ret != 0 {
1308 return errno_result();
1309 }
1310 Ok(regs)
1311 }
1312
1313 /// Sets the VCPU registers.
1314 #[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))]
set_regs(&self, regs: &kvm_regs) -> Result<()>1315 pub fn set_regs(&self, regs: &kvm_regs) -> Result<()> {
1316 // Safe because we know that our file is a VCPU fd, we know the kernel will only read the
1317 // correct amount of memory from our pointer, and we verify the return result.
1318 let ret = unsafe { ioctl_with_ref(self, KVM_SET_REGS(), regs) };
1319 if ret != 0 {
1320 return errno_result();
1321 }
1322 Ok(())
1323 }
1324
1325 /// Gets the VCPU special registers.
1326 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_sregs(&self) -> Result<kvm_sregs>1327 pub fn get_sregs(&self) -> Result<kvm_sregs> {
1328 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1329 // correct amount of memory to our pointer, and we verify the return result.
1330 let mut regs = unsafe { std::mem::zeroed() };
1331 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_SREGS(), &mut regs) };
1332 if ret != 0 {
1333 return errno_result();
1334 }
1335 Ok(regs)
1336 }
1337
1338 /// Sets the VCPU special registers.
1339 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_sregs(&self, sregs: &kvm_sregs) -> Result<()>1340 pub fn set_sregs(&self, sregs: &kvm_sregs) -> Result<()> {
1341 // Safe because we know that our file is a VCPU fd, we know the kernel will only read the
1342 // correct amount of memory from our pointer, and we verify the return result.
1343 let ret = unsafe { ioctl_with_ref(self, KVM_SET_SREGS(), sregs) };
1344 if ret != 0 {
1345 return errno_result();
1346 }
1347 Ok(())
1348 }
1349
1350 /// Gets the VCPU FPU registers.
1351 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_fpu(&self) -> Result<kvm_fpu>1352 pub fn get_fpu(&self) -> Result<kvm_fpu> {
1353 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1354 // correct amount of memory to our pointer, and we verify the return result.
1355 let mut regs = unsafe { std::mem::zeroed() };
1356 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_FPU(), &mut regs) };
1357 if ret != 0 {
1358 return errno_result();
1359 }
1360 Ok(regs)
1361 }
1362
1363 /// X86 specific call to setup the FPU
1364 ///
1365 /// See the documentation for KVM_SET_FPU.
1366 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_fpu(&self, fpu: &kvm_fpu) -> Result<()>1367 pub fn set_fpu(&self, fpu: &kvm_fpu) -> Result<()> {
1368 let ret = unsafe {
1369 // Here we trust the kernel not to read past the end of the kvm_fpu struct.
1370 ioctl_with_ref(self, KVM_SET_FPU(), fpu)
1371 };
1372 if ret < 0 {
1373 return errno_result();
1374 }
1375 Ok(())
1376 }
1377
1378 /// Gets the VCPU debug registers.
1379 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_debugregs(&self) -> Result<kvm_debugregs>1380 pub fn get_debugregs(&self) -> Result<kvm_debugregs> {
1381 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1382 // correct amount of memory to our pointer, and we verify the return result.
1383 let mut regs = unsafe { std::mem::zeroed() };
1384 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_DEBUGREGS(), &mut regs) };
1385 if ret != 0 {
1386 return errno_result();
1387 }
1388 Ok(regs)
1389 }
1390
1391 /// Sets the VCPU debug registers
1392 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_debugregs(&self, dregs: &kvm_debugregs) -> Result<()>1393 pub fn set_debugregs(&self, dregs: &kvm_debugregs) -> Result<()> {
1394 let ret = unsafe {
1395 // Here we trust the kernel not to read past the end of the kvm_fpu struct.
1396 ioctl_with_ref(self, KVM_SET_DEBUGREGS(), dregs)
1397 };
1398 if ret < 0 {
1399 return errno_result();
1400 }
1401 Ok(())
1402 }
1403
1404 /// Gets the VCPU extended control registers
1405 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_xcrs(&self) -> Result<kvm_xcrs>1406 pub fn get_xcrs(&self) -> Result<kvm_xcrs> {
1407 // Safe because we know that our file is a VCPU fd, we know the kernel will only write the
1408 // correct amount of memory to our pointer, and we verify the return result.
1409 let mut regs = unsafe { std::mem::zeroed() };
1410 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_XCRS(), &mut regs) };
1411 if ret != 0 {
1412 return errno_result();
1413 }
1414 Ok(regs)
1415 }
1416
1417 /// Sets the VCPU extended control registers
1418 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_xcrs(&self, xcrs: &kvm_xcrs) -> Result<()>1419 pub fn set_xcrs(&self, xcrs: &kvm_xcrs) -> Result<()> {
1420 let ret = unsafe {
1421 // Here we trust the kernel not to read past the end of the kvm_xcrs struct.
1422 ioctl_with_ref(self, KVM_SET_XCRS(), xcrs)
1423 };
1424 if ret < 0 {
1425 return errno_result();
1426 }
1427 Ok(())
1428 }
1429
1430 /// X86 specific call to get the MSRS
1431 ///
1432 /// See the documentation for KVM_SET_MSRS.
1433 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_msrs(&self, msr_entries: &mut Vec<kvm_msr_entry>) -> Result<()>1434 pub fn get_msrs(&self, msr_entries: &mut Vec<kvm_msr_entry>) -> Result<()> {
1435 let mut msrs = vec_with_array_field::<kvm_msrs, kvm_msr_entry>(msr_entries.len());
1436 unsafe {
1437 // Mapping the unsized array to a slice is unsafe because the length isn't known.
1438 // Providing the length used to create the struct guarantees the entire slice is valid.
1439 let entries: &mut [kvm_msr_entry] = msrs[0].entries.as_mut_slice(msr_entries.len());
1440 entries.copy_from_slice(&msr_entries);
1441 }
1442 msrs[0].nmsrs = msr_entries.len() as u32;
1443 let ret = unsafe {
1444 // Here we trust the kernel not to read or write past the end of the kvm_msrs struct.
1445 ioctl_with_ref(self, KVM_GET_MSRS(), &msrs[0])
1446 };
1447 if ret < 0 {
1448 // KVM_SET_MSRS actually returns the number of msr entries written.
1449 return errno_result();
1450 }
1451 unsafe {
1452 let count = ret as usize;
1453 assert!(count <= msr_entries.len());
1454 let entries: &mut [kvm_msr_entry] = msrs[0].entries.as_mut_slice(count);
1455 msr_entries.truncate(count);
1456 msr_entries.copy_from_slice(&entries);
1457 }
1458 Ok(())
1459 }
1460
1461 /// X86 specific call to setup the MSRS
1462 ///
1463 /// See the documentation for KVM_SET_MSRS.
1464 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_msrs(&self, msrs: &kvm_msrs) -> Result<()>1465 pub fn set_msrs(&self, msrs: &kvm_msrs) -> Result<()> {
1466 let ret = unsafe {
1467 // Here we trust the kernel not to read past the end of the kvm_msrs struct.
1468 ioctl_with_ref(self, KVM_SET_MSRS(), msrs)
1469 };
1470 if ret < 0 {
1471 // KVM_SET_MSRS actually returns the number of msr entries written.
1472 return errno_result();
1473 }
1474 Ok(())
1475 }
1476
1477 /// X86 specific call to setup the CPUID registers
1478 ///
1479 /// See the documentation for KVM_SET_CPUID2.
1480 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_cpuid2(&self, cpuid: &CpuId) -> Result<()>1481 pub fn set_cpuid2(&self, cpuid: &CpuId) -> Result<()> {
1482 let ret = unsafe {
1483 // Here we trust the kernel not to read past the end of the kvm_msrs struct.
1484 ioctl_with_ptr(self, KVM_SET_CPUID2(), cpuid.as_ptr())
1485 };
1486 if ret < 0 {
1487 return errno_result();
1488 }
1489 Ok(())
1490 }
1491
1492 /// X86 specific call to get the state of the "Local Advanced Programmable Interrupt Controller".
1493 ///
1494 /// See the documentation for KVM_GET_LAPIC.
1495 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_lapic(&self) -> Result<kvm_lapic_state>1496 pub fn get_lapic(&self) -> Result<kvm_lapic_state> {
1497 let mut klapic: kvm_lapic_state = Default::default();
1498
1499 let ret = unsafe {
1500 // The ioctl is unsafe unless you trust the kernel not to write past the end of the
1501 // local_apic struct.
1502 ioctl_with_mut_ref(self, KVM_GET_LAPIC(), &mut klapic)
1503 };
1504 if ret < 0 {
1505 return errno_result();
1506 }
1507 Ok(klapic)
1508 }
1509
1510 /// X86 specific call to set the state of the "Local Advanced Programmable Interrupt Controller".
1511 ///
1512 /// See the documentation for KVM_SET_LAPIC.
1513 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()>1514 pub fn set_lapic(&self, klapic: &kvm_lapic_state) -> Result<()> {
1515 let ret = unsafe {
1516 // The ioctl is safe because the kernel will only read from the klapic struct.
1517 ioctl_with_ref(self, KVM_SET_LAPIC(), klapic)
1518 };
1519 if ret < 0 {
1520 return errno_result();
1521 }
1522 Ok(())
1523 }
1524
1525 /// Gets the vcpu's current "multiprocessing state".
1526 ///
1527 /// See the documentation for KVM_GET_MP_STATE. This call can only succeed after
1528 /// a call to `Vm::create_irq_chip`.
1529 ///
1530 /// Note that KVM defines the call for both x86 and s390 but we do not expect anyone
1531 /// to run crosvm on s390.
1532 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_mp_state(&self) -> Result<kvm_mp_state>1533 pub fn get_mp_state(&self) -> Result<kvm_mp_state> {
1534 // Safe because we know that our file is a VCPU fd, we know the kernel will only
1535 // write correct amount of memory to our pointer, and we verify the return result.
1536 let mut state: kvm_mp_state = unsafe { std::mem::zeroed() };
1537 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_MP_STATE(), &mut state) };
1538 if ret < 0 {
1539 return errno_result();
1540 }
1541 Ok(state)
1542 }
1543
1544 /// Sets the vcpu's current "multiprocessing state".
1545 ///
1546 /// See the documentation for KVM_SET_MP_STATE. This call can only succeed after
1547 /// a call to `Vm::create_irq_chip`.
1548 ///
1549 /// Note that KVM defines the call for both x86 and s390 but we do not expect anyone
1550 /// to run crosvm on s390.
1551 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_mp_state(&self, state: &kvm_mp_state) -> Result<()>1552 pub fn set_mp_state(&self, state: &kvm_mp_state) -> Result<()> {
1553 let ret = unsafe {
1554 // The ioctl is safe because the kernel will only read from the kvm_mp_state struct.
1555 ioctl_with_ref(self, KVM_SET_MP_STATE(), state)
1556 };
1557 if ret < 0 {
1558 return errno_result();
1559 }
1560 Ok(())
1561 }
1562
1563 /// Gets the vcpu's currently pending exceptions, interrupts, NMIs, etc
1564 ///
1565 /// See the documentation for KVM_GET_VCPU_EVENTS.
1566 ///
1567 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_vcpu_events(&self) -> Result<kvm_vcpu_events>1568 pub fn get_vcpu_events(&self) -> Result<kvm_vcpu_events> {
1569 // Safe because we know that our file is a VCPU fd, we know the kernel
1570 // will only write correct amount of memory to our pointer, and we
1571 // verify the return result.
1572 let mut events: kvm_vcpu_events = unsafe { std::mem::zeroed() };
1573 let ret = unsafe { ioctl_with_mut_ref(self, KVM_GET_VCPU_EVENTS(), &mut events) };
1574 if ret < 0 {
1575 return errno_result();
1576 }
1577 Ok(events)
1578 }
1579
1580 /// Sets the vcpu's currently pending exceptions, interrupts, NMIs, etc
1581 ///
1582 /// See the documentation for KVM_SET_VCPU_EVENTS.
1583 ///
1584 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_vcpu_events(&self, events: &kvm_vcpu_events) -> Result<()>1585 pub fn set_vcpu_events(&self, events: &kvm_vcpu_events) -> Result<()> {
1586 let ret = unsafe {
1587 // The ioctl is safe because the kernel will only read from the
1588 // kvm_vcpu_events.
1589 ioctl_with_ref(self, KVM_SET_VCPU_EVENTS(), events)
1590 };
1591 if ret < 0 {
1592 return errno_result();
1593 }
1594 Ok(())
1595 }
1596
1597 /// Signals to the host kernel that this VCPU is about to be paused.
1598 ///
1599 /// See the documentation for KVM_KVMCLOCK_CTRL.
kvmclock_ctrl(&self) -> Result<()>1600 pub fn kvmclock_ctrl(&self) -> Result<()> {
1601 let ret = unsafe {
1602 // The ioctl is safe because it does not read or write memory in this process.
1603 ioctl(self, KVM_KVMCLOCK_CTRL())
1604 };
1605
1606 if ret < 0 {
1607 return errno_result();
1608 }
1609 Ok(())
1610 }
1611
1612 /// Specifies set of signals that are blocked during execution of KVM_RUN.
1613 /// Signals that are not blocked will will cause KVM_RUN to return
1614 /// with -EINTR.
1615 ///
1616 /// See the documentation for KVM_SET_SIGNAL_MASK
set_signal_mask(&self, signals: &[c_int]) -> Result<()>1617 pub fn set_signal_mask(&self, signals: &[c_int]) -> Result<()> {
1618 let sigset = signal::create_sigset(signals)?;
1619
1620 let mut kvm_sigmask = vec_with_array_field::<kvm_signal_mask, sigset_t>(1);
1621 // Rust definition of sigset_t takes 128 bytes, but the kernel only
1622 // expects 8-bytes structure, so we can't write
1623 // kvm_sigmask.len = size_of::<sigset_t>() as u32;
1624 kvm_sigmask[0].len = 8;
1625 // Ensure the length is not too big.
1626 const _ASSERT: usize = size_of::<sigset_t>() - 8 as usize;
1627
1628 // Safe as we allocated exactly the needed space
1629 unsafe {
1630 copy_nonoverlapping(
1631 &sigset as *const sigset_t as *const u8,
1632 kvm_sigmask[0].sigset.as_mut_ptr(),
1633 8,
1634 );
1635 }
1636
1637 let ret = unsafe {
1638 // The ioctl is safe because the kernel will only read from the
1639 // kvm_signal_mask structure.
1640 ioctl_with_ref(self, KVM_SET_SIGNAL_MASK(), &kvm_sigmask[0])
1641 };
1642 if ret < 0 {
1643 return errno_result();
1644 }
1645 Ok(())
1646 }
1647
1648 /// Sets the value of one register on this VCPU. The id of the register is
1649 /// encoded as specified in the kernel documentation for KVM_SET_ONE_REG.
1650 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
set_one_reg(&self, reg_id: u64, data: u64) -> Result<()>1651 pub fn set_one_reg(&self, reg_id: u64, data: u64) -> Result<()> {
1652 let data_ref = &data as *const u64;
1653 let onereg = kvm_one_reg {
1654 id: reg_id,
1655 addr: data_ref as u64,
1656 };
1657 // safe becuase we allocated the struct and we know the kernel will read
1658 // exactly the size of the struct
1659 let ret = unsafe { ioctl_with_ref(self, KVM_SET_ONE_REG(), &onereg) };
1660 if ret < 0 {
1661 return errno_result();
1662 }
1663 Ok(())
1664 }
1665
1666 /// This initializes an ARM VCPU to the specified type with the specified features
1667 /// and resets the values of all of its registers to defaults.
1668 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
arm_vcpu_init(&self, kvi: &kvm_vcpu_init) -> Result<()>1669 pub fn arm_vcpu_init(&self, kvi: &kvm_vcpu_init) -> Result<()> {
1670 // safe becuase we allocated the struct and we know the kernel will read
1671 // exactly the size of the struct
1672 let ret = unsafe { ioctl_with_ref(self, KVM_ARM_VCPU_INIT(), kvi) };
1673 if ret < 0 {
1674 return errno_result();
1675 }
1676 Ok(())
1677 }
1678
1679 /// Use the KVM_INTERRUPT ioctl to inject the specified interrupt vector.
1680 ///
1681 /// While this ioctl exits on PPC and MIPS as well as x86, the semantics are different and
1682 /// ChromeOS doesn't support PPC or MIPS.
1683 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
interrupt(&self, irq: u32) -> Result<()>1684 pub fn interrupt(&self, irq: u32) -> Result<()> {
1685 let interrupt = kvm_interrupt { irq };
1686 // safe becuase we allocated the struct and we know the kernel will read
1687 // exactly the size of the struct
1688 let ret = unsafe { ioctl_with_ref(self, KVM_INTERRUPT(), &interrupt) };
1689 if ret < 0 {
1690 errno_result()
1691 } else {
1692 Ok(())
1693 }
1694 }
1695 }
1696
1697 impl AsRawFd for Vcpu {
as_raw_fd(&self) -> RawFd1698 fn as_raw_fd(&self) -> RawFd {
1699 self.vcpu.as_raw_fd()
1700 }
1701 }
1702
1703 /// Wrapper for kvm_cpuid2 which has a zero length array at the end.
1704 /// Hides the zero length array behind a bounds check.
1705 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
1706 pub struct CpuId {
1707 kvm_cpuid: Vec<kvm_cpuid2>,
1708 allocated_len: usize, // Number of kvm_cpuid_entry2 structs at the end of kvm_cpuid2.
1709 }
1710
1711 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
1712 impl CpuId {
new(array_len: usize) -> CpuId1713 pub fn new(array_len: usize) -> CpuId {
1714 let mut kvm_cpuid = vec_with_array_field::<kvm_cpuid2, kvm_cpuid_entry2>(array_len);
1715 kvm_cpuid[0].nent = array_len as u32;
1716
1717 CpuId {
1718 kvm_cpuid,
1719 allocated_len: array_len,
1720 }
1721 }
1722
1723 /// Get the entries slice so they can be modified before passing to the VCPU.
mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2]1724 pub fn mut_entries_slice(&mut self) -> &mut [kvm_cpuid_entry2] {
1725 // Mapping the unsized array to a slice is unsafe because the length isn't known. Using
1726 // the length we originally allocated with eliminates the possibility of overflow.
1727 if self.kvm_cpuid[0].nent as usize > self.allocated_len {
1728 self.kvm_cpuid[0].nent = self.allocated_len as u32;
1729 }
1730 let nent = self.kvm_cpuid[0].nent as usize;
1731 unsafe { self.kvm_cpuid[0].entries.as_mut_slice(nent) }
1732 }
1733
1734 /// Get a pointer so it can be passed to the kernel. Using this pointer is unsafe.
as_ptr(&self) -> *const kvm_cpuid21735 pub fn as_ptr(&self) -> *const kvm_cpuid2 {
1736 &self.kvm_cpuid[0]
1737 }
1738
1739 /// Get a mutable pointer so it can be passed to the kernel. Using this pointer is unsafe.
as_mut_ptr(&mut self) -> *mut kvm_cpuid21740 pub fn as_mut_ptr(&mut self) -> *mut kvm_cpuid2 {
1741 &mut self.kvm_cpuid[0]
1742 }
1743 }
1744
1745 #[cfg(test)]
1746 mod tests {
1747 use super::*;
1748
1749 #[test]
dirty_log_size()1750 fn dirty_log_size() {
1751 let page_size = pagesize();
1752 assert_eq!(dirty_log_bitmap_size(0), 0);
1753 assert_eq!(dirty_log_bitmap_size(page_size), 1);
1754 assert_eq!(dirty_log_bitmap_size(page_size * 8), 1);
1755 assert_eq!(dirty_log_bitmap_size(page_size * 8 + 1), 2);
1756 assert_eq!(dirty_log_bitmap_size(page_size * 100), 13);
1757 }
1758
1759 #[test]
new()1760 fn new() {
1761 Kvm::new().unwrap();
1762 }
1763
1764 #[test]
create_vm()1765 fn create_vm() {
1766 let kvm = Kvm::new().unwrap();
1767 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
1768 Vm::new(&kvm, gm).unwrap();
1769 }
1770
1771 #[test]
check_extension()1772 fn check_extension() {
1773 let kvm = Kvm::new().unwrap();
1774 assert!(kvm.check_extension(Cap::UserMemory));
1775 // I assume nobody is testing this on s390
1776 assert!(!kvm.check_extension(Cap::S390UserSigp));
1777 }
1778
1779 #[test]
check_vm_extension()1780 fn check_vm_extension() {
1781 let kvm = Kvm::new().unwrap();
1782 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
1783 let vm = Vm::new(&kvm, gm).unwrap();
1784 assert!(vm.check_extension(Cap::UserMemory));
1785 // I assume nobody is testing this on s390
1786 assert!(!vm.check_extension(Cap::S390UserSigp));
1787 }
1788
1789 #[test]
get_supported_cpuid()1790 fn get_supported_cpuid() {
1791 let kvm = Kvm::new().unwrap();
1792 let mut cpuid = kvm.get_supported_cpuid().unwrap();
1793 let cpuid_entries = cpuid.mut_entries_slice();
1794 assert!(cpuid_entries.len() > 0);
1795 }
1796
1797 #[test]
get_emulated_cpuid()1798 fn get_emulated_cpuid() {
1799 let kvm = Kvm::new().unwrap();
1800 kvm.get_emulated_cpuid().unwrap();
1801 }
1802
1803 #[test]
1804 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_msr_index_list()1805 fn get_msr_index_list() {
1806 let kvm = Kvm::new().unwrap();
1807 let msr_list = kvm.get_msr_index_list().unwrap();
1808 assert!(msr_list.len() >= 2);
1809 }
1810
1811 #[test]
add_memory()1812 fn add_memory() {
1813 let kvm = Kvm::new().unwrap();
1814 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
1815 let mut vm = Vm::new(&kvm, gm).unwrap();
1816 let mem_size = 0x1000;
1817 let mem = MemoryMapping::new(mem_size).unwrap();
1818 vm.add_device_memory(GuestAddress(0x1000), mem, false, false)
1819 .unwrap();
1820 }
1821
1822 #[test]
add_memory_ro()1823 fn add_memory_ro() {
1824 let kvm = Kvm::new().unwrap();
1825 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
1826 let mut vm = Vm::new(&kvm, gm).unwrap();
1827 let mem_size = 0x1000;
1828 let mem = MemoryMapping::new(mem_size).unwrap();
1829 vm.add_device_memory(GuestAddress(0x1000), mem, true, false)
1830 .unwrap();
1831 }
1832
1833 #[test]
remove_memory()1834 fn remove_memory() {
1835 let kvm = Kvm::new().unwrap();
1836 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
1837 let mut vm = Vm::new(&kvm, gm).unwrap();
1838 let mem_size = 0x1000;
1839 let mem = MemoryMapping::new(mem_size).unwrap();
1840 let mem_ptr = mem.as_ptr();
1841 let slot = vm
1842 .add_device_memory(GuestAddress(0x1000), mem, false, false)
1843 .unwrap();
1844 let mem = vm.remove_device_memory(slot).unwrap();
1845 assert_eq!(mem.size(), mem_size);
1846 assert_eq!(mem.as_ptr(), mem_ptr);
1847 }
1848
1849 #[test]
remove_invalid_memory()1850 fn remove_invalid_memory() {
1851 let kvm = Kvm::new().unwrap();
1852 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
1853 let mut vm = Vm::new(&kvm, gm).unwrap();
1854 assert!(vm.remove_device_memory(0).is_err());
1855 }
1856
1857 #[test]
overlap_memory()1858 fn overlap_memory() {
1859 let kvm = Kvm::new().unwrap();
1860 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
1861 let mut vm = Vm::new(&kvm, gm).unwrap();
1862 let mem_size = 0x2000;
1863 let mem = MemoryMapping::new(mem_size).unwrap();
1864 assert!(vm
1865 .add_device_memory(GuestAddress(0x2000), mem, false, false)
1866 .is_err());
1867 }
1868
1869 #[test]
get_memory()1870 fn get_memory() {
1871 let kvm = Kvm::new().unwrap();
1872 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x1000)]).unwrap();
1873 let vm = Vm::new(&kvm, gm).unwrap();
1874 let obj_addr = GuestAddress(0xf0);
1875 vm.get_memory().write_obj_at_addr(67u8, obj_addr).unwrap();
1876 let read_val: u8 = vm.get_memory().read_obj_from_addr(obj_addr).unwrap();
1877 assert_eq!(read_val, 67u8);
1878 }
1879
1880 #[test]
1881 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
clock_handling()1882 fn clock_handling() {
1883 let kvm = Kvm::new().unwrap();
1884 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
1885 let vm = Vm::new(&kvm, gm).unwrap();
1886 let mut clock_data = vm.get_clock().unwrap();
1887 clock_data.clock += 1000;
1888 vm.set_clock(&clock_data).unwrap();
1889 }
1890
1891 #[test]
1892 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pic_handling()1893 fn pic_handling() {
1894 let kvm = Kvm::new().unwrap();
1895 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
1896 let vm = Vm::new(&kvm, gm).unwrap();
1897 vm.create_irq_chip().unwrap();
1898 let pic_state = vm.get_pic_state(PicId::Secondary).unwrap();
1899 vm.set_pic_state(PicId::Secondary, &pic_state).unwrap();
1900 }
1901
1902 #[test]
1903 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
ioapic_handling()1904 fn ioapic_handling() {
1905 let kvm = Kvm::new().unwrap();
1906 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
1907 let vm = Vm::new(&kvm, gm).unwrap();
1908 vm.create_irq_chip().unwrap();
1909 let ioapic_state = vm.get_ioapic_state().unwrap();
1910 vm.set_ioapic_state(&ioapic_state).unwrap();
1911 }
1912
1913 #[test]
1914 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
pit_handling()1915 fn pit_handling() {
1916 let kvm = Kvm::new().unwrap();
1917 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
1918 let vm = Vm::new(&kvm, gm).unwrap();
1919 vm.create_irq_chip().unwrap();
1920 vm.create_pit().unwrap();
1921 let pit_state = vm.get_pit_state().unwrap();
1922 vm.set_pit_state(&pit_state).unwrap();
1923 }
1924
1925 #[test]
register_ioevent()1926 fn register_ioevent() {
1927 let kvm = Kvm::new().unwrap();
1928 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
1929 let vm = Vm::new(&kvm, gm).unwrap();
1930 let evtfd = EventFd::new().unwrap();
1931 vm.register_ioevent(&evtfd, IoeventAddress::Pio(0xf4), Datamatch::AnyLength)
1932 .unwrap();
1933 vm.register_ioevent(&evtfd, IoeventAddress::Mmio(0x1000), Datamatch::AnyLength)
1934 .unwrap();
1935 vm.register_ioevent(
1936 &evtfd,
1937 IoeventAddress::Pio(0xc1),
1938 Datamatch::U8(Some(0x7fu8)),
1939 )
1940 .unwrap();
1941 vm.register_ioevent(
1942 &evtfd,
1943 IoeventAddress::Pio(0xc2),
1944 Datamatch::U16(Some(0x1337u16)),
1945 )
1946 .unwrap();
1947 vm.register_ioevent(
1948 &evtfd,
1949 IoeventAddress::Pio(0xc4),
1950 Datamatch::U32(Some(0xdeadbeefu32)),
1951 )
1952 .unwrap();
1953 vm.register_ioevent(
1954 &evtfd,
1955 IoeventAddress::Pio(0xc8),
1956 Datamatch::U64(Some(0xdeadbeefdeadbeefu64)),
1957 )
1958 .unwrap();
1959 }
1960
1961 #[test]
unregister_ioevent()1962 fn unregister_ioevent() {
1963 let kvm = Kvm::new().unwrap();
1964 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
1965 let vm = Vm::new(&kvm, gm).unwrap();
1966 let evtfd = EventFd::new().unwrap();
1967 vm.register_ioevent(&evtfd, IoeventAddress::Pio(0xf4), Datamatch::AnyLength)
1968 .unwrap();
1969 vm.register_ioevent(&evtfd, IoeventAddress::Mmio(0x1000), Datamatch::AnyLength)
1970 .unwrap();
1971 vm.register_ioevent(
1972 &evtfd,
1973 IoeventAddress::Mmio(0x1004),
1974 Datamatch::U8(Some(0x7fu8)),
1975 )
1976 .unwrap();
1977 vm.unregister_ioevent(&evtfd, IoeventAddress::Pio(0xf4), Datamatch::AnyLength)
1978 .unwrap();
1979 vm.unregister_ioevent(&evtfd, IoeventAddress::Mmio(0x1000), Datamatch::AnyLength)
1980 .unwrap();
1981 vm.unregister_ioevent(
1982 &evtfd,
1983 IoeventAddress::Mmio(0x1004),
1984 Datamatch::U8(Some(0x7fu8)),
1985 )
1986 .unwrap();
1987 }
1988
1989 #[test]
register_irqfd()1990 fn register_irqfd() {
1991 let kvm = Kvm::new().unwrap();
1992 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
1993 let vm = Vm::new(&kvm, gm).unwrap();
1994 let evtfd1 = EventFd::new().unwrap();
1995 let evtfd2 = EventFd::new().unwrap();
1996 let evtfd3 = EventFd::new().unwrap();
1997 vm.register_irqfd(&evtfd1, 4).unwrap();
1998 vm.register_irqfd(&evtfd2, 8).unwrap();
1999 vm.register_irqfd(&evtfd3, 4).unwrap();
2000 vm.register_irqfd(&evtfd3, 4).unwrap_err();
2001 }
2002
2003 #[test]
unregister_irqfd()2004 fn unregister_irqfd() {
2005 let kvm = Kvm::new().unwrap();
2006 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
2007 let vm = Vm::new(&kvm, gm).unwrap();
2008 let evtfd1 = EventFd::new().unwrap();
2009 let evtfd2 = EventFd::new().unwrap();
2010 let evtfd3 = EventFd::new().unwrap();
2011 vm.register_irqfd(&evtfd1, 4).unwrap();
2012 vm.register_irqfd(&evtfd2, 8).unwrap();
2013 vm.register_irqfd(&evtfd3, 4).unwrap();
2014 vm.unregister_irqfd(&evtfd1, 4).unwrap();
2015 vm.unregister_irqfd(&evtfd2, 8).unwrap();
2016 vm.unregister_irqfd(&evtfd3, 4).unwrap();
2017 }
2018
2019 #[test]
irqfd_resample()2020 fn irqfd_resample() {
2021 let kvm = Kvm::new().unwrap();
2022 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
2023 let vm = Vm::new(&kvm, gm).unwrap();
2024 let evtfd1 = EventFd::new().unwrap();
2025 let evtfd2 = EventFd::new().unwrap();
2026 vm.register_irqfd_resample(&evtfd1, &evtfd2, 4).unwrap();
2027 vm.unregister_irqfd(&evtfd1, 4).unwrap();
2028 // Ensures the ioctl is actually reading the resamplefd.
2029 vm.register_irqfd_resample(&evtfd1, unsafe { &EventFd::from_raw_fd(-1) }, 4)
2030 .unwrap_err();
2031 }
2032
2033 #[test]
2034 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_gsi_routing()2035 fn set_gsi_routing() {
2036 let kvm = Kvm::new().unwrap();
2037 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
2038 let vm = Vm::new(&kvm, gm).unwrap();
2039 vm.create_irq_chip().unwrap();
2040 vm.set_gsi_routing(&[]).unwrap();
2041 vm.set_gsi_routing(&[IrqRoute {
2042 gsi: 1,
2043 source: IrqSource::Irqchip {
2044 chip: KVM_IRQCHIP_IOAPIC,
2045 pin: 3,
2046 },
2047 }])
2048 .unwrap();
2049 vm.set_gsi_routing(&[IrqRoute {
2050 gsi: 1,
2051 source: IrqSource::Msi {
2052 address: 0xf000000,
2053 data: 0xa0,
2054 },
2055 }])
2056 .unwrap();
2057 vm.set_gsi_routing(&[
2058 IrqRoute {
2059 gsi: 1,
2060 source: IrqSource::Irqchip {
2061 chip: KVM_IRQCHIP_IOAPIC,
2062 pin: 3,
2063 },
2064 },
2065 IrqRoute {
2066 gsi: 2,
2067 source: IrqSource::Msi {
2068 address: 0xf000000,
2069 data: 0xa0,
2070 },
2071 },
2072 ])
2073 .unwrap();
2074 }
2075
2076 #[test]
create_vcpu()2077 fn create_vcpu() {
2078 let kvm = Kvm::new().unwrap();
2079 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
2080 let vm = Vm::new(&kvm, gm).unwrap();
2081 Vcpu::new(0, &kvm, &vm).unwrap();
2082 }
2083
2084 #[test]
2085 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
debugregs()2086 fn debugregs() {
2087 let kvm = Kvm::new().unwrap();
2088 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
2089 let vm = Vm::new(&kvm, gm).unwrap();
2090 let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
2091 let mut dregs = vcpu.get_debugregs().unwrap();
2092 dregs.dr7 = 13;
2093 vcpu.set_debugregs(&dregs).unwrap();
2094 let dregs2 = vcpu.get_debugregs().unwrap();
2095 assert_eq!(dregs.dr7, dregs2.dr7);
2096 }
2097
2098 #[test]
2099 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
xcrs()2100 fn xcrs() {
2101 let kvm = Kvm::new().unwrap();
2102 if !kvm.check_extension(Cap::Xcrs) {
2103 return;
2104 }
2105
2106 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
2107 let vm = Vm::new(&kvm, gm).unwrap();
2108 let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
2109 let mut xcrs = vcpu.get_xcrs().unwrap();
2110 xcrs.xcrs[0].value = 1;
2111 vcpu.set_xcrs(&xcrs).unwrap();
2112 let xcrs2 = vcpu.get_xcrs().unwrap();
2113 assert_eq!(xcrs.xcrs[0].value, xcrs2.xcrs[0].value);
2114 }
2115
2116 #[test]
2117 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
get_msrs()2118 fn get_msrs() {
2119 let kvm = Kvm::new().unwrap();
2120 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
2121 let vm = Vm::new(&kvm, gm).unwrap();
2122 let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
2123 let mut msrs = vec![
2124 // This one should succeed
2125 kvm_msr_entry {
2126 index: 0x0000011e,
2127 ..Default::default()
2128 },
2129 // This one will fail to fetch
2130 kvm_msr_entry {
2131 index: 0x000003f1,
2132 ..Default::default()
2133 },
2134 ];
2135 vcpu.get_msrs(&mut msrs).unwrap();
2136 assert_eq!(msrs.len(), 1);
2137 }
2138
2139 #[test]
2140 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
mp_state()2141 fn mp_state() {
2142 let kvm = Kvm::new().unwrap();
2143 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
2144 let vm = Vm::new(&kvm, gm).unwrap();
2145 vm.create_irq_chip().unwrap();
2146 let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
2147 let state = vcpu.get_mp_state().unwrap();
2148 vcpu.set_mp_state(&state).unwrap();
2149 }
2150
2151 #[test]
set_signal_mask()2152 fn set_signal_mask() {
2153 let kvm = Kvm::new().unwrap();
2154 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
2155 let vm = Vm::new(&kvm, gm).unwrap();
2156 let vcpu = Vcpu::new(0, &kvm, &vm).unwrap();
2157 vcpu.set_signal_mask(&[sys_util::SIGRTMIN() + 0]).unwrap();
2158 }
2159
2160 #[test]
vcpu_mmap_size()2161 fn vcpu_mmap_size() {
2162 let kvm = Kvm::new().unwrap();
2163 let mmap_size = kvm.get_vcpu_mmap_size().unwrap();
2164 let page_size = pagesize();
2165 assert!(mmap_size >= page_size);
2166 assert!(mmap_size % page_size == 0);
2167 }
2168
2169 #[test]
2170 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
set_identity_map_addr()2171 fn set_identity_map_addr() {
2172 let kvm = Kvm::new().unwrap();
2173 let gm = GuestMemory::new(&vec![(GuestAddress(0), 0x10000)]).unwrap();
2174 let vm = Vm::new(&kvm, gm).unwrap();
2175 vm.set_identity_map_addr(GuestAddress(0x20000)).unwrap();
2176 }
2177 }
2178