• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use core::ffi::c_void;
6 use std::arch::x86_64::CpuidResult;
7 use std::cmp::min;
8 use std::collections::BTreeMap;
9 use std::intrinsics::copy_nonoverlapping;
10 use std::mem::size_of;
11 
12 use base::errno_result;
13 use base::ioctl;
14 use base::ioctl_with_mut_ref;
15 use base::ioctl_with_ptr_sized;
16 use base::ioctl_with_ref;
17 use base::warn;
18 use base::AsRawDescriptor;
19 use base::Error;
20 use base::RawDescriptor;
21 use base::Result;
22 use base::SafeDescriptor;
23 use data_model::vec_with_array_field;
24 use libc::EINVAL;
25 use libc::ENOENT;
26 use libc::ENXIO;
27 use libc::EOPNOTSUPP;
28 use vm_memory::GuestAddress;
29 
30 use super::*;
31 use crate::CpuId;
32 use crate::CpuIdEntry;
33 use crate::DebugRegs;
34 use crate::DescriptorTable;
35 use crate::Fpu;
36 use crate::HypervHypercall;
37 use crate::IoOperation;
38 use crate::IoParams;
39 use crate::Regs;
40 use crate::Segment;
41 use crate::Sregs;
42 use crate::Vcpu;
43 use crate::VcpuExit;
44 use crate::VcpuX86_64;
45 use crate::Xsave;
46 
47 // HAXM exit reasons
48 // IO port request
49 const HAX_EXIT_IO: u32 = 1;
50 // MMIO instruction emulation, should not happen anymore, replaced with
51 // HAX_EXIT_FAST_MMIO
52 #[allow(dead_code)]
53 const HAX_EXIT_MMIO: u32 = 2;
54 // Real mode emulation when unrestricted guest is disabled
55 #[allow(dead_code)]
56 const HAX_EXIT_REALMODE: u32 = 3;
57 // Interrupt window open, crosvm can inject an interrupt now.
58 // Also used when vcpu thread receives a signal
59 const HAX_EXIT_INTERRUPT: u32 = 4;
60 // Unknown vmexit, mostly trigger reboot
61 const HAX_EXIT_UNKNOWN: u32 = 5;
62 // HALT from guest
63 const HAX_EXIT_HLT: u32 = 6;
64 // Reboot request, like because of triple fault in guest
65 const HAX_EXIT_STATECHANGE: u32 = 7;
66 // Paused by crosvm setting _exit_reason to HAX_EXIT_PAUSED before entry
67 pub(crate) const HAX_EXIT_PAUSED: u32 = 8;
68 // MMIO instruction emulation through io_buffer
69 const HAX_EXIT_FAST_MMIO: u32 = 9;
70 // Page fault that was not able to be handled by HAXM
71 const HAX_EXIT_PAGEFAULT: u32 = 10;
72 // A debug exception caused a vmexit
73 const HAX_EXIT_DEBUG: u32 = 11;
74 
75 // HAXM exit directions
76 const HAX_EXIT_DIRECTION_PIO_IN: u32 = 1;
77 const HAX_EXIT_DIRECTION_PIO_OUT: u32 = 0;
78 const HAX_EXIT_DIRECTION_MMIO_READ: u8 = 0;
79 const HAX_EXIT_DIRECTION_MMIO_WRITE: u8 = 1;
80 
81 pub struct HaxmVcpu {
82     pub(super) descriptor: SafeDescriptor,
83     pub(super) id: usize,
84     pub(super) tunnel: *mut hax_tunnel,
85     pub(super) io_buffer: *mut c_void,
86 }
87 
88 // TODO(b/315998194): Add safety comment
89 #[allow(clippy::undocumented_unsafe_blocks)]
90 unsafe impl Send for HaxmVcpu {}
91 // TODO(b/315998194): Add safety comment
92 #[allow(clippy::undocumented_unsafe_blocks)]
93 unsafe impl Sync for HaxmVcpu {}
94 
95 impl AsRawDescriptor for HaxmVcpu {
as_raw_descriptor(&self) -> RawDescriptor96     fn as_raw_descriptor(&self) -> RawDescriptor {
97         self.descriptor.as_raw_descriptor()
98     }
99 }
100 
101 impl HaxmVcpu {
get_vcpu_state(&self) -> Result<VcpuState>102     fn get_vcpu_state(&self) -> Result<VcpuState> {
103         let mut state = vcpu_state_t::default();
104 
105         // SAFETY: trivially safe with return value checked.
106         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_GET_REGS(), &mut state) };
107         if ret != 0 {
108             return errno_result();
109         }
110 
111         // Also read efer MSR
112         state._efer = self.get_msr(IA32_EFER)? as u32;
113 
114         Ok(VcpuState { state })
115     }
116 
set_vcpu_state(&self, state: &mut VcpuState) -> Result<()>117     fn set_vcpu_state(&self, state: &mut VcpuState) -> Result<()> {
118         // SAFETY: trivially safe with return value checked.
119         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_SET_REGS(), &mut state.state) };
120         if ret != 0 {
121             return errno_result();
122         }
123 
124         // Also set efer MSR
125         self.set_msr(IA32_EFER, state.state._efer as u64)
126     }
127 }
128 
129 impl Vcpu for HaxmVcpu {
130     /// Makes a shallow clone of this `Vcpu`.
try_clone(&self) -> Result<Self>131     fn try_clone(&self) -> Result<Self> {
132         Ok(HaxmVcpu {
133             descriptor: self.descriptor.try_clone()?,
134             id: self.id,
135             tunnel: self.tunnel,
136             io_buffer: self.io_buffer,
137         })
138     }
139 
as_vcpu(&self) -> &dyn Vcpu140     fn as_vcpu(&self) -> &dyn Vcpu {
141         self
142     }
143 
144     /// Returns the vcpu id.
id(&self) -> usize145     fn id(&self) -> usize {
146         self.id
147     }
148 
149     /// Sets the bit that requests an immediate exit.
set_immediate_exit(&self, exit: bool)150     fn set_immediate_exit(&self, exit: bool) {
151         // SAFETY:
152         // Safe because we know the tunnel is a pointer to a hax_tunnel and we know its size.
153         // Crosvm's HAXM implementation does not use the _exit_reason, so it's fine if we
154         // overwrite it.
155         unsafe {
156             (*self.tunnel)._exit_reason = if exit { HAX_EXIT_PAUSED } else { 0 };
157         }
158     }
159 
160     /// Signals to the hypervisor that this guest is being paused by userspace.
on_suspend(&self) -> Result<()>161     fn on_suspend(&self) -> Result<()> {
162         Ok(())
163     }
164 
165     /// Enables a hypervisor-specific extension on this Vcpu.  `cap` is a constant defined by the
166     /// hypervisor API.  `args` are the arguments for enabling the feature, if any.
enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()>167     unsafe fn enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()> {
168         // Haxm does not support enable_capability
169         Err(Error::new(libc::ENXIO))
170     }
171 
172     /// This function should be called after `Vcpu::run` returns `VcpuExit::Mmio`.
173     ///
174     /// Once called, it will determine whether a mmio read or mmio write was the reason for the mmio
175     /// exit, call `handle_fn` with the respective IoOperation to perform the mmio read or
176     /// write, and set the return data in the vcpu so that the vcpu can resume running.
handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>177     fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()> {
178         // SAFETY:
179         // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
180         // kernel told us how large it was.
181         // Verify that the handler is called for mmio context only.
182         unsafe {
183             assert!((*self.tunnel)._exit_status == HAX_EXIT_FAST_MMIO);
184         }
185         let mmio = self.io_buffer as *mut hax_fastmmio;
186         let (address, size, direction) =
187             // SAFETY:
188             // Safe because the exit_reason (which comes from the kernel) told us which
189             // union field to use.
190             unsafe { ((*mmio).gpa, (*mmio).size as usize, (*mmio).direction) };
191 
192         match direction {
193             HAX_EXIT_DIRECTION_MMIO_READ => {
194                 if let Some(data) = handle_fn(IoParams {
195                     address,
196                     size,
197                     operation: IoOperation::Read,
198                 }) {
199                     let data = u64::from_ne_bytes(data);
200                     // SAFETY:
201                     // Safe because we know this is an mmio read, so we need to put data into the
202                     // "value" field of the hax_fastmmio.
203                     unsafe {
204                         (*mmio).__bindgen_anon_1.value = data;
205                     }
206                 }
207                 Ok(())
208             }
209             HAX_EXIT_DIRECTION_MMIO_WRITE => {
210                 // SAFETY:
211                 // safe because we trust haxm to fill in the union properly.
212                 let data = unsafe { (*mmio).__bindgen_anon_1.value };
213                 handle_fn(IoParams {
214                     address,
215                     size,
216                     operation: IoOperation::Write {
217                         data: data.to_ne_bytes(),
218                     },
219                 });
220                 Ok(())
221             }
222             _ => Err(Error::new(EINVAL)),
223         }
224     }
225 
226     /// This function should be called after `Vcpu::run` returns `VcpuExit::Io`.
227     ///
228     /// Once called, it will determine whether an io in or io out was the reason for the io exit,
229     /// call `handle_fn` with the respective IoOperation to perform the io in or io out,
230     /// and set the return data in the vcpu so that the vcpu can resume running.
231     #[allow(clippy::cast_ptr_alignment)]
handle_io(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>232     fn handle_io(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()> {
233         // SAFETY:
234         // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
235         // kernel told us how large it was.
236         // Verify that the handler is called for io context only.
237         unsafe {
238             assert!((*self.tunnel)._exit_status == HAX_EXIT_IO);
239         }
240         // SAFETY:
241         // Safe because the exit_reason (which comes from the kernel) told us which
242         // union field to use.
243         let io = unsafe { (*self.tunnel).__bindgen_anon_1.io };
244         let address = io._port.into();
245         let size = (io._count as usize) * (io._size as usize);
246         match io._direction as u32 {
247             HAX_EXIT_DIRECTION_PIO_IN => {
248                 if let Some(data) = handle_fn(IoParams {
249                     address,
250                     size,
251                     operation: IoOperation::Read,
252                 }) {
253                     // SAFETY:
254                     // Safe because the exit_reason (which comes from the kernel) told us that
255                     // this is port io, where the iobuf can be treated as a *u8
256                     unsafe {
257                         copy_nonoverlapping(data.as_ptr(), self.io_buffer as *mut u8, size);
258                     }
259                 }
260                 Ok(())
261             }
262             HAX_EXIT_DIRECTION_PIO_OUT => {
263                 let mut data = [0; 8];
264                 // SAFETY:
265                 // safe because we check the size, from what the kernel told us is the max to copy.
266                 unsafe {
267                     copy_nonoverlapping(
268                         self.io_buffer as *const u8,
269                         data.as_mut_ptr(),
270                         min(size, data.len()),
271                     );
272                 }
273                 handle_fn(IoParams {
274                     address,
275                     size,
276                     operation: IoOperation::Write { data },
277                 });
278                 Ok(())
279             }
280             _ => Err(Error::new(EINVAL)),
281         }
282     }
283 
284     /// haxm does not handle hypervcalls.
handle_hyperv_hypercall(&self, _func: &mut dyn FnMut(HypervHypercall) -> u64) -> Result<()>285     fn handle_hyperv_hypercall(&self, _func: &mut dyn FnMut(HypervHypercall) -> u64) -> Result<()> {
286         Err(Error::new(libc::ENXIO))
287     }
288 
289     /// This function should be called after `Vcpu::run` returns `VcpuExit::RdMsr`,
290     /// and in the same thread as run.
291     ///
292     /// It will put `data` into the user buffer and return.
handle_rdmsr(&self, _data: u64) -> Result<()>293     fn handle_rdmsr(&self, _data: u64) -> Result<()> {
294         // TODO(b/233766326): Implement.
295         Err(Error::new(libc::ENXIO))
296     }
297 
298     /// This function should be called after `Vcpu::run` returns `VcpuExit::WrMsr`,
299     /// and in the same thread as run.
handle_wrmsr(&self)300     fn handle_wrmsr(&self) {
301         // TODO(b/233766326): Implement.
302     }
303 
304     #[allow(clippy::cast_ptr_alignment)]
305     // The pointer is page aligned so casting to a different type is well defined, hence the clippy
306     // allow attribute.
run(&mut self) -> Result<VcpuExit>307     fn run(&mut self) -> Result<VcpuExit> {
308         // TODO(b/315998194): Add safety comment
309         #[allow(clippy::undocumented_unsafe_blocks)]
310         let ret = unsafe { ioctl(self, HAX_VCPU_IOCTL_RUN()) };
311         if ret != 0 {
312             return errno_result();
313         }
314 
315         // SAFETY:
316         // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
317         // kernel told us how large it was.
318         let exit_status = unsafe { (*self.tunnel)._exit_status };
319 
320         match exit_status {
321             HAX_EXIT_IO => Ok(VcpuExit::Io),
322             HAX_EXIT_INTERRUPT => Ok(VcpuExit::Intr),
323             HAX_EXIT_UNKNOWN => Ok(VcpuExit::Unknown),
324             HAX_EXIT_HLT => Ok(VcpuExit::Hlt),
325             HAX_EXIT_STATECHANGE => Ok(VcpuExit::Shutdown),
326             HAX_EXIT_FAST_MMIO => Ok(VcpuExit::Mmio),
327             HAX_EXIT_PAGEFAULT => Ok(VcpuExit::Exception),
328             HAX_EXIT_DEBUG => Ok(VcpuExit::Debug),
329             HAX_EXIT_PAUSED => Ok(VcpuExit::Exception),
330             r => panic!("unknown exit reason: {}", r),
331         }
332     }
333 }
334 
335 impl VcpuX86_64 for HaxmVcpu {
336     /// Sets or clears the flag that requests the VCPU to exit when it becomes possible to inject
337     /// interrupts into the guest.
set_interrupt_window_requested(&self, requested: bool)338     fn set_interrupt_window_requested(&self, requested: bool) {
339         // SAFETY:
340         // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
341         // kernel told us how large it was.
342         unsafe {
343             (*self.tunnel).request_interrupt_window = i32::from(requested);
344         }
345     }
346 
347     /// Checks if we can inject an interrupt into the VCPU.
ready_for_interrupt(&self) -> bool348     fn ready_for_interrupt(&self) -> bool {
349         // SAFETY:
350         // Safe because we know we mapped enough memory to hold the hax_tunnel struct because the
351         // kernel told us how large it was.
352         unsafe { (*self.tunnel).ready_for_interrupt_injection != 0 }
353     }
354 
355     /// Injects interrupt vector `irq` into the VCPU.
interrupt(&self, irq: u32) -> Result<()>356     fn interrupt(&self, irq: u32) -> Result<()> {
357         // TODO(b/315998194): Add safety comment
358         #[allow(clippy::undocumented_unsafe_blocks)]
359         let ret = unsafe { ioctl_with_ref(self, HAX_VCPU_IOCTL_INTERRUPT(), &irq) };
360         if ret != 0 {
361             return errno_result();
362         }
363         Ok(())
364     }
365 
366     /// Injects a non-maskable interrupt into the VCPU.
inject_nmi(&self) -> Result<()>367     fn inject_nmi(&self) -> Result<()> {
368         warn!("HAXM does not support injecting NMIs");
369         Ok(())
370     }
371 
372     /// Gets the VCPU general purpose registers.
get_regs(&self) -> Result<Regs>373     fn get_regs(&self) -> Result<Regs> {
374         Ok(self.get_vcpu_state()?.get_regs())
375     }
376 
377     /// Sets the VCPU general purpose registers.
set_regs(&self, regs: &Regs) -> Result<()>378     fn set_regs(&self, regs: &Regs) -> Result<()> {
379         let mut state = self.get_vcpu_state()?;
380         state.set_regs(regs);
381         self.set_vcpu_state(&mut state)?;
382         Ok(())
383     }
384 
385     /// Gets the VCPU special registers.
get_sregs(&self) -> Result<Sregs>386     fn get_sregs(&self) -> Result<Sregs> {
387         Ok(self.get_vcpu_state()?.get_sregs())
388     }
389 
390     /// Sets the VCPU special registers.
set_sregs(&self, sregs: &Sregs) -> Result<()>391     fn set_sregs(&self, sregs: &Sregs) -> Result<()> {
392         let mut state = self.get_vcpu_state()?;
393         state.set_sregs(sregs);
394         self.set_vcpu_state(&mut state)?;
395         Ok(())
396     }
397 
398     /// Gets the VCPU FPU registers.
get_fpu(&self) -> Result<Fpu>399     fn get_fpu(&self) -> Result<Fpu> {
400         let mut fpu = fx_layout::default();
401         // TODO(b/315998194): Add safety comment
402         #[allow(clippy::undocumented_unsafe_blocks)]
403         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_FPU(), &mut fpu) };
404 
405         if ret != 0 {
406             return errno_result();
407         }
408 
409         Ok(Fpu::from(&fpu))
410     }
411 
412     /// Sets the VCPU FPU registers.
set_fpu(&self, fpu: &Fpu) -> Result<()>413     fn set_fpu(&self, fpu: &Fpu) -> Result<()> {
414         let mut current_fpu = fx_layout::default();
415         // TODO(b/315998194): Add safety comment
416         #[allow(clippy::undocumented_unsafe_blocks)]
417         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_FPU(), &mut current_fpu) };
418 
419         if ret != 0 {
420             return errno_result();
421         }
422 
423         let mut new_fpu = fx_layout::from(fpu);
424 
425         // the mxcsr mask is something that isn't part of the Fpu state, so we make the new
426         // fpu state's mxcsr_mask matches its current value
427         new_fpu.mxcsr_mask = current_fpu.mxcsr_mask;
428 
429         // TODO(b/315998194): Add safety comment
430         #[allow(clippy::undocumented_unsafe_blocks)]
431         let ret = unsafe { ioctl_with_ref(self, HAX_VCPU_IOCTL_SET_FPU(), &new_fpu) };
432 
433         if ret != 0 {
434             return errno_result();
435         }
436 
437         Ok(())
438     }
439 
get_xsave(&self) -> Result<Xsave>440     fn get_xsave(&self) -> Result<Xsave> {
441         Err(Error::new(EOPNOTSUPP))
442     }
443 
set_xsave(&self, _xsave: &Xsave) -> Result<()>444     fn set_xsave(&self, _xsave: &Xsave) -> Result<()> {
445         Err(Error::new(EOPNOTSUPP))
446     }
447 
get_interrupt_state(&self) -> Result<serde_json::Value>448     fn get_interrupt_state(&self) -> Result<serde_json::Value> {
449         Err(Error::new(EOPNOTSUPP))
450     }
451 
set_interrupt_state(&self, _data: serde_json::Value) -> Result<()>452     fn set_interrupt_state(&self, _data: serde_json::Value) -> Result<()> {
453         Err(Error::new(EOPNOTSUPP))
454     }
455 
456     /// Gets the VCPU debug registers.
get_debugregs(&self) -> Result<DebugRegs>457     fn get_debugregs(&self) -> Result<DebugRegs> {
458         Ok(self.get_vcpu_state()?.get_debugregs())
459     }
460 
461     /// Sets the VCPU debug registers.
set_debugregs(&self, debugregs: &DebugRegs) -> Result<()>462     fn set_debugregs(&self, debugregs: &DebugRegs) -> Result<()> {
463         let mut state = self.get_vcpu_state()?;
464         state.set_debugregs(debugregs);
465         self.set_vcpu_state(&mut state)?;
466         Ok(())
467     }
468 
469     /// Gets the VCPU extended control registers.
get_xcrs(&self) -> Result<BTreeMap<u32, u64>>470     fn get_xcrs(&self) -> Result<BTreeMap<u32, u64>> {
471         // Haxm does not support getting XCRs
472         Err(Error::new(libc::ENXIO))
473     }
474 
475     /// Sets a VCPU extended control register.
set_xcr(&self, _xcr_index: u32, _value: u64) -> Result<()>476     fn set_xcr(&self, _xcr_index: u32, _value: u64) -> Result<()> {
477         // Haxm does not support setting XCRs
478         Err(Error::new(libc::ENXIO))
479     }
480 
481     /// Gets the value of one model-specific register.
get_msr(&self, msr_index: u32) -> Result<u64>482     fn get_msr(&self, msr_index: u32) -> Result<u64> {
483         let mut msr_data = hax_msr_data {
484             nr_msr: 1,
485             ..Default::default()
486         };
487         msr_data.entries[0].entry = u64::from(msr_index);
488 
489         // TODO(b/315998194): Add safety comment
490         #[allow(clippy::undocumented_unsafe_blocks)]
491         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_GET_MSRS(), &mut msr_data) };
492         if ret != 0 {
493             return errno_result();
494         }
495 
496         Ok(msr_data.entries[0].value)
497     }
498 
get_all_msrs(&self) -> Result<BTreeMap<u32, u64>>499     fn get_all_msrs(&self) -> Result<BTreeMap<u32, u64>> {
500         Err(Error::new(EOPNOTSUPP))
501     }
502 
503     /// Sets the value of one model-specific register.
set_msr(&self, msr_index: u32, value: u64) -> Result<()>504     fn set_msr(&self, msr_index: u32, value: u64) -> Result<()> {
505         let mut msr_data = hax_msr_data {
506             nr_msr: 1,
507             ..Default::default()
508         };
509         msr_data.entries[0].entry = u64::from(msr_index);
510         msr_data.entries[0].value = value;
511 
512         // TODO(b/315998194): Add safety comment
513         #[allow(clippy::undocumented_unsafe_blocks)]
514         let ret = unsafe { ioctl_with_mut_ref(self, HAX_VCPU_IOCTL_SET_MSRS(), &mut msr_data) };
515         if ret != 0 {
516             return errno_result();
517         }
518 
519         Ok(())
520     }
521 
522     /// Sets up the data returned by the CPUID instruction.
set_cpuid(&self, cpuid: &CpuId) -> Result<()>523     fn set_cpuid(&self, cpuid: &CpuId) -> Result<()> {
524         let total = cpuid.cpu_id_entries.len();
525         let mut hax = vec_with_array_field::<hax_cpuid, hax_cpuid_entry>(total);
526         hax[0].total = total as u32;
527         // TODO(b/315998194): Add safety comment
528         #[allow(clippy::undocumented_unsafe_blocks)]
529         let entries = unsafe { hax[0].entries.as_mut_slice(total) };
530         for (i, e) in cpuid.cpu_id_entries.iter().enumerate() {
531             entries[i] = hax_cpuid_entry::from(e);
532         }
533 
534         // TODO(b/315998194): Add safety comment
535         #[allow(clippy::undocumented_unsafe_blocks)]
536         let ret = unsafe {
537             ioctl_with_ptr_sized(
538                 self,
539                 HAX_VCPU_IOCTL_SET_CPUID(),
540                 hax.as_ptr(),
541                 size_of::<hax_cpuid>() + total * size_of::<hax_cpuid_entry>(),
542             )
543         };
544 
545         if ret != 0 {
546             return errno_result();
547         }
548         Ok(())
549     }
550 
551     /// This function should be called after `Vcpu::run` returns `VcpuExit::Cpuid`, and `entry`
552     /// should represent the result of emulating the CPUID instruction. The `handle_cpuid` function
553     /// will then set the appropriate registers on the vcpu.
554     /// HAXM does not support the VcpuExit::Cpuid exit type.
handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()>555     fn handle_cpuid(&mut self, _entry: &CpuIdEntry) -> Result<()> {
556         Err(Error::new(ENXIO))
557     }
558 
559     /// Gets the system emulated hyper-v CPUID values.
get_hyperv_cpuid(&self) -> Result<CpuId>560     fn get_hyperv_cpuid(&self) -> Result<CpuId> {
561         // HaxmVcpu does not support hyperv_cpuid
562         Err(Error::new(libc::ENXIO))
563     }
564 
set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()>565     fn set_guest_debug(&self, _addrs: &[GuestAddress], _enable_singlestep: bool) -> Result<()> {
566         // TODO(b/173807302): Implement this
567         Err(Error::new(ENOENT))
568     }
569 
restore_timekeeping(&self, _host_tsc_reference_moment: u64, tsc_offset: u64) -> Result<()>570     fn restore_timekeeping(&self, _host_tsc_reference_moment: u64, tsc_offset: u64) -> Result<()> {
571         // HAXM sets TSC_OFFSET based on what we set TSC to; however, it does
572         // not yet handle syncing. This means it computes
573         // TSC_OFFSET = new_tsc - rdtsc(), so if we want to target the same
574         // offset value, we need new_tsc = rdtsc() + target_offset. This is what
575         // Self::set_tsc_offset does.
576         //
577         // TODO(b/311793539): haxm doesn't yet support syncing TSCs across VCPUs
578         // if the TSC value is non-zero. Once we have that support, we can
579         // switch to calling Self::set_tsc_value here with the common host
580         // reference moment. (Alternatively, we may just expose a way to set the
581         // offset directly.)
582         self.set_tsc_offset(tsc_offset)
583     }
584 }
585 
586 struct VcpuState {
587     state: vcpu_state_t,
588 }
589 
590 impl VcpuState {
get_regs(&self) -> Regs591     fn get_regs(&self) -> Regs {
592         // TODO(b/315998194): Add safety comment
593         #[allow(clippy::undocumented_unsafe_blocks)]
594         unsafe {
595             Regs {
596                 rax: self
597                     .state
598                     .__bindgen_anon_1
599                     .__bindgen_anon_1
600                     .__bindgen_anon_1
601                     ._rax,
602                 rbx: self
603                     .state
604                     .__bindgen_anon_1
605                     .__bindgen_anon_1
606                     .__bindgen_anon_4
607                     ._rbx,
608                 rcx: self
609                     .state
610                     .__bindgen_anon_1
611                     .__bindgen_anon_1
612                     .__bindgen_anon_2
613                     ._rcx,
614                 rdx: self
615                     .state
616                     .__bindgen_anon_1
617                     .__bindgen_anon_1
618                     .__bindgen_anon_3
619                     ._rdx,
620                 rsi: self
621                     .state
622                     .__bindgen_anon_1
623                     .__bindgen_anon_1
624                     .__bindgen_anon_7
625                     ._rsi,
626                 rdi: self
627                     .state
628                     .__bindgen_anon_1
629                     .__bindgen_anon_1
630                     .__bindgen_anon_8
631                     ._rdi,
632                 rsp: self
633                     .state
634                     .__bindgen_anon_1
635                     .__bindgen_anon_1
636                     .__bindgen_anon_5
637                     ._rsp,
638                 rbp: self
639                     .state
640                     .__bindgen_anon_1
641                     .__bindgen_anon_1
642                     .__bindgen_anon_6
643                     ._rbp,
644                 r8: self.state.__bindgen_anon_1.__bindgen_anon_1._r8,
645                 r9: self.state.__bindgen_anon_1.__bindgen_anon_1._r9,
646                 r10: self.state.__bindgen_anon_1.__bindgen_anon_1._r10,
647                 r11: self.state.__bindgen_anon_1.__bindgen_anon_1._r11,
648                 r12: self.state.__bindgen_anon_1.__bindgen_anon_1._r12,
649                 r13: self.state.__bindgen_anon_1.__bindgen_anon_1._r13,
650                 r14: self.state.__bindgen_anon_1.__bindgen_anon_1._r14,
651                 r15: self.state.__bindgen_anon_1.__bindgen_anon_1._r15,
652                 rip: self.state.__bindgen_anon_2._rip,
653                 rflags: self.state.__bindgen_anon_3._rflags,
654             }
655         }
656     }
657 
set_regs(&mut self, regs: &Regs)658     fn set_regs(&mut self, regs: &Regs) {
659         self.state
660             .__bindgen_anon_1
661             .__bindgen_anon_1
662             .__bindgen_anon_1
663             ._rax = regs.rax;
664         self.state
665             .__bindgen_anon_1
666             .__bindgen_anon_1
667             .__bindgen_anon_4
668             ._rbx = regs.rbx;
669         self.state
670             .__bindgen_anon_1
671             .__bindgen_anon_1
672             .__bindgen_anon_2
673             ._rcx = regs.rcx;
674         self.state
675             .__bindgen_anon_1
676             .__bindgen_anon_1
677             .__bindgen_anon_3
678             ._rdx = regs.rdx;
679         self.state
680             .__bindgen_anon_1
681             .__bindgen_anon_1
682             .__bindgen_anon_7
683             ._rsi = regs.rsi;
684         self.state
685             .__bindgen_anon_1
686             .__bindgen_anon_1
687             .__bindgen_anon_8
688             ._rdi = regs.rdi;
689         self.state
690             .__bindgen_anon_1
691             .__bindgen_anon_1
692             .__bindgen_anon_5
693             ._rsp = regs.rsp;
694         self.state
695             .__bindgen_anon_1
696             .__bindgen_anon_1
697             .__bindgen_anon_6
698             ._rbp = regs.rbp;
699         self.state.__bindgen_anon_1.__bindgen_anon_1._r8 = regs.r8;
700         self.state.__bindgen_anon_1.__bindgen_anon_1._r9 = regs.r9;
701         self.state.__bindgen_anon_1.__bindgen_anon_1._r10 = regs.r10;
702         self.state.__bindgen_anon_1.__bindgen_anon_1._r11 = regs.r11;
703         self.state.__bindgen_anon_1.__bindgen_anon_1._r12 = regs.r12;
704         self.state.__bindgen_anon_1.__bindgen_anon_1._r13 = regs.r13;
705         self.state.__bindgen_anon_1.__bindgen_anon_1._r14 = regs.r14;
706         self.state.__bindgen_anon_1.__bindgen_anon_1._r15 = regs.r15;
707         self.state.__bindgen_anon_2._rip = regs.rip;
708         self.state.__bindgen_anon_3._rflags = regs.rflags;
709     }
710 
get_sregs(&self) -> Sregs711     fn get_sregs(&self) -> Sregs {
712         Sregs {
713             cs: Segment::from(&self.state._cs),
714             ds: Segment::from(&self.state._ds),
715             es: Segment::from(&self.state._es),
716             fs: Segment::from(&self.state._fs),
717             gs: Segment::from(&self.state._gs),
718             ss: Segment::from(&self.state._ss),
719             tr: Segment::from(&self.state._tr),
720             ldt: Segment::from(&self.state._ldt),
721             gdt: DescriptorTable::from(&self.state._gdt),
722             idt: DescriptorTable::from(&self.state._idt),
723             cr0: self.state._cr0,
724             cr2: self.state._cr2,
725             cr3: self.state._cr3,
726             cr4: self.state._cr4,
727             // HAXM does not support setting cr8
728             cr8: 0,
729             efer: self.state._efer as u64,
730         }
731     }
732 
set_sregs(&mut self, sregs: &Sregs)733     fn set_sregs(&mut self, sregs: &Sregs) {
734         self.state._cs = segment_desc_t::from(&sregs.cs);
735         self.state._ds = segment_desc_t::from(&sregs.ds);
736         self.state._es = segment_desc_t::from(&sregs.es);
737         self.state._fs = segment_desc_t::from(&sregs.fs);
738         self.state._gs = segment_desc_t::from(&sregs.gs);
739         self.state._ss = segment_desc_t::from(&sregs.ss);
740         self.state._tr = segment_desc_t::from(&sregs.tr);
741         self.state._ldt = segment_desc_t::from(&sregs.ldt);
742         self.state._gdt = segment_desc_t::from(&sregs.gdt);
743         self.state._idt = segment_desc_t::from(&sregs.idt);
744         self.state._cr0 = sregs.cr0;
745         self.state._cr2 = sregs.cr2;
746         self.state._cr3 = sregs.cr3;
747         self.state._cr4 = sregs.cr4;
748         self.state._efer = sregs.efer as u32;
749     }
750 
get_debugregs(&self) -> DebugRegs751     fn get_debugregs(&self) -> DebugRegs {
752         DebugRegs {
753             db: [
754                 self.state._dr0,
755                 self.state._dr1,
756                 self.state._dr2,
757                 self.state._dr3,
758             ],
759             dr6: self.state._dr6,
760             dr7: self.state._dr7,
761         }
762     }
763 
set_debugregs(&mut self, debugregs: &DebugRegs)764     fn set_debugregs(&mut self, debugregs: &DebugRegs) {
765         self.state._dr0 = debugregs.db[0];
766         self.state._dr1 = debugregs.db[1];
767         self.state._dr2 = debugregs.db[2];
768         self.state._dr3 = debugregs.db[3];
769         self.state._dr6 = debugregs.dr6;
770         self.state._dr7 = debugregs.dr7;
771     }
772 }
773 
774 // HAXM's segment descriptor format matches exactly with the VMCS structure. The format
775 // of the AR bits is described in the Intel System Programming Guide Part 3, chapter 24.4.1,
776 // table 24-2. The main confusing thing is that the type_ field in haxm is 4 bits, meaning
777 // the 3 least significant bits represent the normal type field, and the most significant
778 // bit represents the "descriptor type" field.
779 
780 impl From<&segment_desc_t> for Segment {
from(item: &segment_desc_t) -> Self781     fn from(item: &segment_desc_t) -> Self {
782         // TODO(b/315998194): Add safety comment
783         #[allow(clippy::undocumented_unsafe_blocks)]
784         unsafe {
785             Segment {
786                 base: item.base,
787                 limit: item.limit,
788                 selector: item.selector,
789                 type_: item.__bindgen_anon_1.__bindgen_anon_1.type_() as u8,
790                 present: item.__bindgen_anon_1.__bindgen_anon_1.present() as u8,
791                 dpl: item.__bindgen_anon_1.__bindgen_anon_1.dpl() as u8,
792                 db: item.__bindgen_anon_1.__bindgen_anon_1.operand_size() as u8,
793                 s: item.__bindgen_anon_1.__bindgen_anon_1.desc() as u8,
794                 l: item.__bindgen_anon_1.__bindgen_anon_1.long_mode() as u8,
795                 g: item.__bindgen_anon_1.__bindgen_anon_1.granularity() as u8,
796                 avl: item.__bindgen_anon_1.__bindgen_anon_1.available() as u8,
797             }
798         }
799     }
800 }
801 
802 impl From<&Segment> for segment_desc_t {
from(item: &Segment) -> Self803     fn from(item: &Segment) -> Self {
804         let mut segment = segment_desc_t {
805             base: item.base,
806             limit: item.limit,
807             selector: item.selector,
808             ..Default::default()
809         };
810 
811         // TODO(b/315998194): Add safety comment
812         #[allow(clippy::undocumented_unsafe_blocks)]
813         unsafe {
814             segment
815                 .__bindgen_anon_1
816                 .__bindgen_anon_1
817                 .set_type(item.type_ as u32);
818             segment
819                 .__bindgen_anon_1
820                 .__bindgen_anon_1
821                 .set_desc(item.s as u32);
822             segment
823                 .__bindgen_anon_1
824                 .__bindgen_anon_1
825                 .set_present(item.present as u32);
826             segment
827                 .__bindgen_anon_1
828                 .__bindgen_anon_1
829                 .set_dpl(item.dpl as u32);
830             segment
831                 .__bindgen_anon_1
832                 .__bindgen_anon_1
833                 .set_operand_size(item.db as u32);
834             segment
835                 .__bindgen_anon_1
836                 .__bindgen_anon_1
837                 .set_long_mode(item.l as u32);
838             segment
839                 .__bindgen_anon_1
840                 .__bindgen_anon_1
841                 .set_granularity(item.g as u32);
842             segment
843                 .__bindgen_anon_1
844                 .__bindgen_anon_1
845                 .set_available(item.avl as u32);
846         }
847 
848         segment
849     }
850 }
851 
852 impl From<&segment_desc_t> for DescriptorTable {
from(item: &segment_desc_t) -> Self853     fn from(item: &segment_desc_t) -> Self {
854         DescriptorTable {
855             base: item.base,
856             limit: item.limit as u16,
857         }
858     }
859 }
860 
861 impl From<&DescriptorTable> for segment_desc_t {
from(item: &DescriptorTable) -> Self862     fn from(item: &DescriptorTable) -> Self {
863         segment_desc_t {
864             base: item.base,
865             limit: item.limit as u32,
866             ..Default::default()
867         }
868     }
869 }
870 
871 impl From<&fx_layout> for Fpu {
from(item: &fx_layout) -> Self872     fn from(item: &fx_layout) -> Self {
873         let mut fpu = Fpu {
874             fpr: item.st_mm,
875             fcw: item.fcw,
876             fsw: item.fsw,
877             ftwx: item.ftw,
878             last_opcode: item.fop,
879             // SAFETY: trivially safe
880             last_ip: unsafe { item.__bindgen_anon_1.fpu_ip },
881             // SAFETY: trivially safe
882             last_dp: unsafe { item.__bindgen_anon_2.fpu_dp },
883             xmm: [[0; 16]; 16],
884             mxcsr: item.mxcsr,
885         };
886 
887         fpu.xmm[..8].copy_from_slice(&item.mmx_1[..]);
888         fpu.xmm[8..].copy_from_slice(&item.mmx_2[..]);
889 
890         fpu
891     }
892 }
893 
894 impl From<&Fpu> for fx_layout {
from(item: &Fpu) -> Self895     fn from(item: &Fpu) -> Self {
896         let mut fpu = fx_layout {
897             fcw: item.fcw,
898             fsw: item.fsw,
899             ftw: item.ftwx,
900             res1: 0,
901             fop: item.last_opcode,
902             __bindgen_anon_1: fx_layout__bindgen_ty_1 {
903                 fpu_ip: item.last_ip,
904             },
905             __bindgen_anon_2: fx_layout__bindgen_ty_2 {
906                 fpu_dp: item.last_dp,
907             },
908             mxcsr: item.mxcsr,
909             mxcsr_mask: 0,
910             st_mm: item.fpr,
911             mmx_1: [[0; 16]; 8],
912             mmx_2: [[0; 16]; 8],
913             pad: [0; 96],
914         };
915 
916         fpu.mmx_1.copy_from_slice(&item.xmm[..8]);
917         fpu.mmx_2.copy_from_slice(&item.xmm[8..]);
918 
919         fpu
920     }
921 }
922 
923 impl From<&hax_cpuid_entry> for CpuIdEntry {
from(item: &hax_cpuid_entry) -> Self924     fn from(item: &hax_cpuid_entry) -> Self {
925         CpuIdEntry {
926             function: item.function,
927             index: item.index,
928             flags: item.flags,
929             cpuid: CpuidResult {
930                 eax: item.eax,
931                 ebx: item.ebx,
932                 ecx: item.ecx,
933                 edx: item.edx,
934             },
935         }
936     }
937 }
938 
939 impl From<&CpuIdEntry> for hax_cpuid_entry {
from(item: &CpuIdEntry) -> Self940     fn from(item: &CpuIdEntry) -> Self {
941         hax_cpuid_entry {
942             function: item.function,
943             index: item.index,
944             flags: item.flags,
945             eax: item.cpuid.eax,
946             ebx: item.cpuid.ebx,
947             ecx: item.cpuid.ecx,
948             edx: item.cpuid.edx,
949             pad: Default::default(),
950         }
951     }
952 }
953 
954 // TODO(b:241252288): Enable tests disabled with dummy feature flag - enable_haxm_tests.
955 #[cfg(test)]
956 #[cfg(feature = "enable_haxm_tests")]
957 mod tests {
958     use vm_memory::GuestAddress;
959     use vm_memory::GuestMemory;
960 
961     use super::*;
962     use crate::VmX86_64;
963 
964     // EFER Bits
965     const EFER_SCE: u64 = 0x00000001;
966     const EFER_LME: u64 = 0x00000100;
967     const EFER_LMA: u64 = 0x00000400;
968 
969     // CR0 bits
970     const CR0_PG: u64 = 1 << 31;
971 
972     #[test]
get_regs()973     fn get_regs() {
974         let haxm = Haxm::new().expect("failed to instantiate HAXM");
975         let mem =
976             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
977         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
978         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
979 
980         vcpu.get_regs().expect("failed to get regs");
981     }
982 
983     #[test]
get_fpu()984     fn get_fpu() {
985         let haxm = Haxm::new().expect("failed to instantiate HAXM");
986         let mem =
987             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
988         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
989         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
990 
991         vcpu.get_fpu().expect("failed to get fpu");
992     }
993 
994     #[test]
set_msr()995     fn set_msr() {
996         let haxm = Haxm::new().expect("failed to instantiate HAXM");
997         let mem =
998             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
999         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
1000         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
1001 
1002         vcpu.set_msr(38, 0x300).expect("failed to set MSR");
1003     }
1004 
1005     #[test]
get_msr()1006     fn get_msr() {
1007         let haxm = Haxm::new().expect("failed to instantiate HAXM");
1008         let mem =
1009             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
1010         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
1011         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
1012 
1013         let _value = vcpu.get_msr(38).expect("failed to get MSR");
1014     }
1015 
1016     #[test]
set_cpuid()1017     fn set_cpuid() {
1018         let haxm = Haxm::new().expect("failed to instantiate HAXM");
1019         let mem =
1020             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
1021         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
1022         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
1023 
1024         let mut cpuid = haxm
1025             .get_supported_cpuid()
1026             .expect("failed to get supported cpuids");
1027         for entry in &mut cpuid.cpu_id_entries {
1028             if entry.function == 1 {
1029                 // Disable XSAVE and OSXSAVE
1030                 entry.cpuid.ecx &= !(1 << 26);
1031                 entry.cpuid.ecx &= !(1 << 27);
1032             }
1033         }
1034 
1035         vcpu.set_cpuid(&cpuid).expect("failed to set cpuid");
1036     }
1037 
1038     #[test]
set_efer()1039     fn set_efer() {
1040         // HAXM efer setting requires some extra code, so we have this test specifically
1041         // checking that it's working.
1042         let haxm = Haxm::new().expect("failed to instantiate HAXM");
1043         let mem =
1044             GuestMemory::new(&[(GuestAddress(0), 0x1000)]).expect("failed to create guest memory");
1045         let vm = HaxmVm::new(&haxm, mem).expect("failed to create vm");
1046         let vcpu = vm.create_vcpu(0).expect("failed to create vcpu");
1047 
1048         let mut sregs = vcpu.get_sregs().expect("failed to get sregs");
1049         // Initial value should be 0
1050         assert_eq!(sregs.efer, 0);
1051 
1052         // Enable and activate long mode
1053         sregs.efer = EFER_LMA | EFER_LME;
1054         // Need to enable paging or LMA will be turned off
1055         sregs.cr0 |= CR0_PG;
1056         vcpu.set_sregs(&sregs).expect("failed to set sregs");
1057 
1058         // Verify that setting stuck
1059         let sregs = vcpu.get_sregs().expect("failed to get sregs");
1060         assert_eq!(sregs.efer, EFER_LMA | EFER_LME);
1061 
1062         // IA32_EFER register value should match
1063         let efer = vcpu.get_msr(IA32_EFER).expect("failed to get msr");
1064         assert_eq!(efer, EFER_LMA | EFER_LME);
1065 
1066         // Enable SCE via set_msrs
1067         vcpu.set_msr(IA32_EFER, efer | EFER_SCE)
1068             .expect("failed to set msr");
1069 
1070         // Verify that setting stuck
1071         let sregs = vcpu.get_sregs().expect("failed to get sregs");
1072         assert_eq!(sregs.efer, EFER_SCE | EFER_LME | EFER_LMA);
1073         let new_efer = vcpu.get_msr(IA32_EFER).expect("failed to get msrs");
1074         assert_eq!(new_efer, EFER_SCE | EFER_LME | EFER_LMA);
1075     }
1076 }
1077