• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
6 mod aarch64;
7 
8 mod gunyah_sys;
9 use std::cmp::min;
10 use std::cmp::Reverse;
11 use std::collections::BTreeMap;
12 use std::collections::BinaryHeap;
13 use std::collections::HashSet;
14 use std::ffi::CString;
15 use std::mem::size_of;
16 use std::os::raw::c_ulong;
17 use std::os::unix::prelude::OsStrExt;
18 use std::path::Path;
19 use std::path::PathBuf;
20 use std::sync::Arc;
21 
22 use base::errno_result;
23 use base::info;
24 use base::ioctl;
25 use base::ioctl_with_ref;
26 use base::ioctl_with_val;
27 use base::linux::MemoryMappingBuilderUnix;
28 use base::pagesize;
29 use base::warn;
30 use base::Error;
31 use base::FromRawDescriptor;
32 use base::MemoryMapping;
33 use base::MemoryMappingBuilder;
34 use base::MmapError;
35 use base::RawDescriptor;
36 use gunyah_sys::*;
37 use libc::open;
38 use libc::EFAULT;
39 use libc::EINVAL;
40 use libc::EIO;
41 use libc::ENOENT;
42 use libc::ENOSPC;
43 use libc::EOVERFLOW;
44 use libc::O_CLOEXEC;
45 use libc::O_RDWR;
46 use sync::Mutex;
47 use vm_memory::MemoryRegionPurpose;
48 
49 use crate::*;
50 
51 pub struct Gunyah {
52     gunyah: SafeDescriptor,
53 }
54 
55 impl AsRawDescriptor for Gunyah {
as_raw_descriptor(&self) -> RawDescriptor56     fn as_raw_descriptor(&self) -> RawDescriptor {
57         self.gunyah.as_raw_descriptor()
58     }
59 }
60 
61 impl Gunyah {
new_with_path(device_path: &Path) -> Result<Gunyah>62     pub fn new_with_path(device_path: &Path) -> Result<Gunyah> {
63         let c_path = CString::new(device_path.as_os_str().as_bytes()).unwrap();
64         // SAFETY:
65         // Open calls are safe because we give a nul-terminated string and verify the result.
66         let ret = unsafe { open(c_path.as_ptr(), O_RDWR | O_CLOEXEC) };
67         if ret < 0 {
68             return errno_result();
69         }
70         Ok(Gunyah {
71             // SAFETY:
72             // Safe because we verify that ret is valid and we own the fd.
73             gunyah: unsafe { SafeDescriptor::from_raw_descriptor(ret) },
74         })
75     }
76 
new() -> Result<Gunyah>77     pub fn new() -> Result<Gunyah> {
78         Gunyah::new_with_path(&PathBuf::from("/dev/gunyah"))
79     }
80 }
81 
82 impl Hypervisor for Gunyah {
try_clone(&self) -> Result<Self> where Self: Sized,83     fn try_clone(&self) -> Result<Self>
84     where
85         Self: Sized,
86     {
87         Ok(Gunyah {
88             gunyah: self.gunyah.try_clone()?,
89         })
90     }
91 
check_capability(&self, cap: HypervisorCap) -> bool92     fn check_capability(&self, cap: HypervisorCap) -> bool {
93         match cap {
94             HypervisorCap::UserMemory => true,
95             HypervisorCap::ArmPmuV3 => false,
96             HypervisorCap::ImmediateExit => true,
97             HypervisorCap::StaticSwiotlbAllocationRequired => true,
98             HypervisorCap::HypervisorInitializedBootContext => true,
99             HypervisorCap::S390UserSigp | HypervisorCap::TscDeadlineTimer => false,
100             #[cfg(target_arch = "x86_64")]
101             HypervisorCap::Xcrs | HypervisorCap::CalibratedTscLeafRequired => false,
102         }
103     }
104 }
105 
android_lend_user_memory_region( vm: &SafeDescriptor, slot: MemSlot, read_only: bool, guest_addr: u64, memory_size: u64, userspace_addr: *mut u8, ) -> Result<()>106 unsafe fn android_lend_user_memory_region(
107     vm: &SafeDescriptor,
108     slot: MemSlot,
109     read_only: bool,
110     guest_addr: u64,
111     memory_size: u64,
112     userspace_addr: *mut u8,
113 ) -> Result<()> {
114     let mut flags = 0;
115 
116     flags |= GH_MEM_ALLOW_READ | GH_MEM_ALLOW_EXEC;
117     if !read_only {
118         flags |= GH_MEM_ALLOW_WRITE;
119     }
120 
121     let region = gh_userspace_memory_region {
122         label: slot,
123         flags,
124         guest_phys_addr: guest_addr,
125         memory_size,
126         userspace_addr: userspace_addr as u64,
127     };
128 
129     let ret = ioctl_with_ref(vm, GH_VM_ANDROID_LEND_USER_MEM(), &region);
130     if ret == 0 {
131         Ok(())
132     } else {
133         errno_result()
134     }
135 }
136 
137 // Wrapper around GH_SET_USER_MEMORY_REGION ioctl, which creates, modifies, or deletes a mapping
138 // from guest physical to host user pages.
139 //
140 // SAFETY:
141 // Safe when the guest regions are guaranteed not to overlap.
set_user_memory_region( vm: &SafeDescriptor, slot: MemSlot, read_only: bool, guest_addr: u64, memory_size: u64, userspace_addr: *mut u8, ) -> Result<()>142 unsafe fn set_user_memory_region(
143     vm: &SafeDescriptor,
144     slot: MemSlot,
145     read_only: bool,
146     guest_addr: u64,
147     memory_size: u64,
148     userspace_addr: *mut u8,
149 ) -> Result<()> {
150     let mut flags = 0;
151 
152     flags |= GH_MEM_ALLOW_READ | GH_MEM_ALLOW_EXEC;
153     if !read_only {
154         flags |= GH_MEM_ALLOW_WRITE;
155     }
156 
157     let region = gh_userspace_memory_region {
158         label: slot,
159         flags,
160         guest_phys_addr: guest_addr,
161         memory_size,
162         userspace_addr: userspace_addr as u64,
163     };
164 
165     let ret = ioctl_with_ref(vm, GH_VM_SET_USER_MEM_REGION(), &region);
166     if ret == 0 {
167         Ok(())
168     } else {
169         errno_result()
170     }
171 }
172 
173 #[derive(PartialEq, Eq, Hash)]
174 pub struct GunyahIrqRoute {
175     irq: u32,
176     level: bool,
177 }
178 
179 pub struct GunyahVm {
180     gh: Gunyah,
181     vm: SafeDescriptor,
182     guest_mem: GuestMemory,
183     mem_regions: Arc<Mutex<BTreeMap<MemSlot, (Box<dyn MappedRegion>, GuestAddress)>>>,
184     /// A min heap of MemSlot numbers that were used and then removed and can now be re-used
185     mem_slot_gaps: Arc<Mutex<BinaryHeap<Reverse<MemSlot>>>>,
186     routes: Arc<Mutex<HashSet<GunyahIrqRoute>>>,
187     hv_cfg: crate::Config,
188 }
189 
190 impl AsRawDescriptor for GunyahVm {
as_raw_descriptor(&self) -> RawDescriptor191     fn as_raw_descriptor(&self) -> RawDescriptor {
192         self.vm.as_raw_descriptor()
193     }
194 }
195 
196 impl GunyahVm {
new(gh: &Gunyah, guest_mem: GuestMemory, cfg: Config) -> Result<GunyahVm>197     pub fn new(gh: &Gunyah, guest_mem: GuestMemory, cfg: Config) -> Result<GunyahVm> {
198         // SAFETY:
199         // Safe because we know gunyah is a real gunyah fd as this module is the only one that can
200         // make Gunyah objects.
201         let ret = unsafe { ioctl_with_val(gh, GH_CREATE_VM(), 0 as c_ulong) };
202         if ret < 0 {
203             return errno_result();
204         }
205 
206         // SAFETY:
207         // Safe because we verify that ret is valid and we own the fd.
208         let vm_descriptor = unsafe { SafeDescriptor::from_raw_descriptor(ret) };
209         for region in guest_mem.regions() {
210             let lend = if cfg.protection_type.isolates_memory() {
211                 match region.options.purpose {
212                     MemoryRegionPurpose::GuestMemoryRegion => true,
213                     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
214                     MemoryRegionPurpose::ProtectedFirmwareRegion => true,
215                     #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
216                     MemoryRegionPurpose::StaticSwiotlbRegion => false,
217                 }
218             } else {
219                 false
220             };
221             if lend {
222                 // SAFETY:
223                 // Safe because the guest regions are guarnteed not to overlap.
224                 unsafe {
225                     android_lend_user_memory_region(
226                         &vm_descriptor,
227                         region.index as MemSlot,
228                         false,
229                         region.guest_addr.offset(),
230                         region.size.try_into().unwrap(),
231                         region.host_addr as *mut u8,
232                     )?;
233                 }
234             } else {
235                 // SAFETY:
236                 // Safe because the guest regions are guarnteed not to overlap.
237                 unsafe {
238                     set_user_memory_region(
239                         &vm_descriptor,
240                         region.index as MemSlot,
241                         false,
242                         region.guest_addr.offset(),
243                         region.size.try_into().unwrap(),
244                         region.host_addr as *mut u8,
245                     )?;
246                 }
247             }
248         }
249 
250         Ok(GunyahVm {
251             gh: gh.try_clone()?,
252             vm: vm_descriptor,
253             guest_mem,
254             mem_regions: Arc::new(Mutex::new(BTreeMap::new())),
255             mem_slot_gaps: Arc::new(Mutex::new(BinaryHeap::new())),
256             routes: Arc::new(Mutex::new(HashSet::new())),
257             hv_cfg: cfg,
258         })
259     }
260 
create_vcpu(&self, id: usize) -> Result<GunyahVcpu>261     fn create_vcpu(&self, id: usize) -> Result<GunyahVcpu> {
262         let gh_fn_vcpu_arg = gh_fn_vcpu_arg {
263             id: id.try_into().unwrap(),
264         };
265 
266         let function_desc = gh_fn_desc {
267             type_: GH_FN_VCPU,
268             arg_size: size_of::<gh_fn_vcpu_arg>() as u32,
269             // Safe because kernel is expecting pointer with non-zero arg_size
270             arg: &gh_fn_vcpu_arg as *const gh_fn_vcpu_arg as u64,
271         };
272 
273         // SAFETY:
274         // Safe because we know that our file is a VM fd and we verify the return result.
275         let fd = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION(), &function_desc) };
276         if fd < 0 {
277             return errno_result();
278         }
279 
280         // SAFETY:
281         // Wrap the vcpu now in case the following ? returns early. This is safe because we verified
282         // the value of the fd and we own the fd.
283         let vcpu = unsafe { SafeDescriptor::from_raw_descriptor(fd) };
284 
285         // SAFETY:
286         // Safe because we know this is a Gunyah VCPU
287         let res = unsafe { ioctl(&vcpu, GH_VCPU_MMAP_SIZE()) };
288         if res < 0 {
289             return errno_result();
290         }
291         let run_mmap_size = res as usize;
292 
293         let run_mmap = MemoryMappingBuilder::new(run_mmap_size)
294             .from_descriptor(&vcpu)
295             .build()
296             .map_err(|_| Error::new(ENOSPC))?;
297 
298         Ok(GunyahVcpu {
299             vm: self.vm.try_clone()?,
300             vcpu,
301             id,
302             run_mmap: Arc::new(run_mmap),
303         })
304     }
305 
register_irqfd(&self, label: u32, evt: &Event, level: bool) -> Result<()>306     pub fn register_irqfd(&self, label: u32, evt: &Event, level: bool) -> Result<()> {
307         let gh_fn_irqfd_arg = gh_fn_irqfd_arg {
308             fd: evt.as_raw_descriptor() as u32,
309             label,
310             flags: if level { GH_IRQFD_LEVEL } else { 0 },
311             ..Default::default()
312         };
313 
314         let function_desc = gh_fn_desc {
315             type_: GH_FN_IRQFD,
316             arg_size: size_of::<gh_fn_irqfd_arg>() as u32,
317             // SAFETY:
318             // Safe because kernel is expecting pointer with non-zero arg_size
319             arg: &gh_fn_irqfd_arg as *const gh_fn_irqfd_arg as u64,
320         };
321 
322         // SAFETY: safe because the return value is checked.
323         let ret = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION(), &function_desc) };
324         if ret == 0 {
325             self.routes
326                 .lock()
327                 .insert(GunyahIrqRoute { irq: label, level });
328             Ok(())
329         } else {
330             errno_result()
331         }
332     }
333 
unregister_irqfd(&self, label: u32, _evt: &Event) -> Result<()>334     pub fn unregister_irqfd(&self, label: u32, _evt: &Event) -> Result<()> {
335         let gh_fn_irqfd_arg = gh_fn_irqfd_arg {
336             label,
337             ..Default::default()
338         };
339 
340         let function_desc = gh_fn_desc {
341             type_: GH_FN_IRQFD,
342             arg_size: size_of::<gh_fn_irqfd_arg>() as u32,
343             // Safe because kernel is expecting pointer with non-zero arg_size
344             arg: &gh_fn_irqfd_arg as *const gh_fn_irqfd_arg as u64,
345         };
346 
347         // SAFETY: safe because memory is not modified and the return value is checked.
348         let ret = unsafe { ioctl_with_ref(self, GH_VM_REMOVE_FUNCTION(), &function_desc) };
349         if ret == 0 {
350             Ok(())
351         } else {
352             errno_result()
353         }
354     }
355 
try_clone(&self) -> Result<Self> where Self: Sized,356     pub fn try_clone(&self) -> Result<Self>
357     where
358         Self: Sized,
359     {
360         Ok(GunyahVm {
361             gh: self.gh.try_clone()?,
362             vm: self.vm.try_clone()?,
363             guest_mem: self.guest_mem.clone(),
364             mem_regions: self.mem_regions.clone(),
365             mem_slot_gaps: self.mem_slot_gaps.clone(),
366             routes: self.routes.clone(),
367             hv_cfg: self.hv_cfg,
368         })
369     }
370 
set_dtb_config(&self, fdt_address: GuestAddress, fdt_size: usize) -> Result<()>371     fn set_dtb_config(&self, fdt_address: GuestAddress, fdt_size: usize) -> Result<()> {
372         let dtb_config = gh_vm_dtb_config {
373             guest_phys_addr: fdt_address.offset(),
374             size: fdt_size.try_into().unwrap(),
375         };
376 
377         // SAFETY:
378         // Safe because we know this is a Gunyah VM
379         let ret = unsafe { ioctl_with_ref(self, GH_VM_SET_DTB_CONFIG(), &dtb_config) };
380         if ret == 0 {
381             Ok(())
382         } else {
383             errno_result()
384         }
385     }
386 
set_protected_vm_firmware_ipa(&self, fw_addr: GuestAddress, fw_size: u64) -> Result<()>387     fn set_protected_vm_firmware_ipa(&self, fw_addr: GuestAddress, fw_size: u64) -> Result<()> {
388         let fw_config = gh_vm_firmware_config {
389             guest_phys_addr: fw_addr.offset(),
390             size: fw_size,
391         };
392 
393         // SAFETY:
394         // Safe because we know this is a Gunyah VM
395         let ret = unsafe { ioctl_with_ref(self, GH_VM_ANDROID_SET_FW_CONFIG(), &fw_config) };
396         if ret == 0 {
397             Ok(())
398         } else {
399             errno_result()
400         }
401     }
402 
start(&self) -> Result<()>403     fn start(&self) -> Result<()> {
404         // SAFETY: safe because memory is not modified and the return value is checked.
405         let ret = unsafe { ioctl(self, GH_VM_START()) };
406         if ret == 0 {
407             Ok(())
408         } else {
409             errno_result()
410         }
411     }
412 }
413 
414 impl Vm for GunyahVm {
try_clone(&self) -> Result<Self> where Self: Sized,415     fn try_clone(&self) -> Result<Self>
416     where
417         Self: Sized,
418     {
419         Ok(GunyahVm {
420             gh: self.gh.try_clone()?,
421             vm: self.vm.try_clone()?,
422             guest_mem: self.guest_mem.clone(),
423             mem_regions: self.mem_regions.clone(),
424             mem_slot_gaps: self.mem_slot_gaps.clone(),
425             routes: self.routes.clone(),
426             hv_cfg: self.hv_cfg,
427         })
428     }
429 
check_capability(&self, c: VmCap) -> bool430     fn check_capability(&self, c: VmCap) -> bool {
431         match c {
432             VmCap::DirtyLog => false,
433             // Strictly speaking, Gunyah supports pvclock, but Gunyah takes care
434             // of it and crosvm doesn't need to do anything for it
435             VmCap::PvClock => false,
436             VmCap::Protected => true,
437             VmCap::EarlyInitCpuid => false,
438             #[cfg(target_arch = "x86_64")]
439             VmCap::BusLockDetect => false,
440             VmCap::ReadOnlyMemoryRegion => false,
441             VmCap::MemNoncoherentDma => false,
442         }
443     }
444 
get_guest_phys_addr_bits(&self) -> u8445     fn get_guest_phys_addr_bits(&self) -> u8 {
446         40
447     }
448 
get_memory(&self) -> &GuestMemory449     fn get_memory(&self) -> &GuestMemory {
450         &self.guest_mem
451     }
452 
add_memory_region( &mut self, guest_addr: GuestAddress, mem_region: Box<dyn MappedRegion>, read_only: bool, _log_dirty_pages: bool, _cache: MemCacheType, ) -> Result<MemSlot>453     fn add_memory_region(
454         &mut self,
455         guest_addr: GuestAddress,
456         mem_region: Box<dyn MappedRegion>,
457         read_only: bool,
458         _log_dirty_pages: bool,
459         _cache: MemCacheType,
460     ) -> Result<MemSlot> {
461         let pgsz = pagesize() as u64;
462         // Gunyah require to set the user memory region with page size aligned size. Safe to extend
463         // the mem.size() to be page size aligned because the mmap will round up the size to be
464         // page size aligned if it is not.
465         let size = (mem_region.size() as u64 + pgsz - 1) / pgsz * pgsz;
466         let end_addr = guest_addr.checked_add(size).ok_or(Error::new(EOVERFLOW))?;
467 
468         if self.guest_mem.range_overlap(guest_addr, end_addr) {
469             return Err(Error::new(ENOSPC));
470         }
471 
472         let mut regions = self.mem_regions.lock();
473         let mut gaps = self.mem_slot_gaps.lock();
474         let slot = match gaps.pop() {
475             Some(gap) => gap.0,
476             None => (regions.len() + self.guest_mem.num_regions() as usize) as MemSlot,
477         };
478 
479         // SAFETY: safe because memory is not modified and the return value is checked.
480         let res = unsafe {
481             set_user_memory_region(
482                 &self.vm,
483                 slot,
484                 read_only,
485                 guest_addr.offset(),
486                 size,
487                 mem_region.as_ptr(),
488             )
489         };
490 
491         if let Err(e) = res {
492             gaps.push(Reverse(slot));
493             return Err(e);
494         }
495         regions.insert(slot, (mem_region, guest_addr));
496         Ok(slot)
497     }
498 
msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()>499     fn msync_memory_region(&mut self, slot: MemSlot, offset: usize, size: usize) -> Result<()> {
500         let mut regions = self.mem_regions.lock();
501         let (mem, _) = regions.get_mut(&slot).ok_or_else(|| Error::new(ENOENT))?;
502 
503         mem.msync(offset, size).map_err(|err| match err {
504             MmapError::InvalidAddress => Error::new(EFAULT),
505             MmapError::NotPageAligned => Error::new(EINVAL),
506             MmapError::SystemCallFailed(e) => e,
507             _ => Error::new(EIO),
508         })
509     }
510 
remove_memory_region(&mut self, _slot: MemSlot) -> Result<Box<dyn MappedRegion>>511     fn remove_memory_region(&mut self, _slot: MemSlot) -> Result<Box<dyn MappedRegion>> {
512         unimplemented!()
513     }
514 
create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor>515     fn create_device(&self, _kind: DeviceKind) -> Result<SafeDescriptor> {
516         unimplemented!()
517     }
518 
get_dirty_log(&self, _slot: MemSlot, _dirty_log: &mut [u8]) -> Result<()>519     fn get_dirty_log(&self, _slot: MemSlot, _dirty_log: &mut [u8]) -> Result<()> {
520         unimplemented!()
521     }
522 
register_ioevent( &mut self, evt: &Event, addr: IoEventAddress, datamatch: Datamatch, ) -> Result<()>523     fn register_ioevent(
524         &mut self,
525         evt: &Event,
526         addr: IoEventAddress,
527         datamatch: Datamatch,
528     ) -> Result<()> {
529         let (do_datamatch, datamatch_value, datamatch_len) = match datamatch {
530             Datamatch::AnyLength => (false, 0, 0),
531             Datamatch::U8(v) => match v {
532                 Some(u) => (true, u as u64, 1),
533                 None => (false, 0, 1),
534             },
535             Datamatch::U16(v) => match v {
536                 Some(u) => (true, u as u64, 2),
537                 None => (false, 0, 2),
538             },
539             Datamatch::U32(v) => match v {
540                 Some(u) => (true, u as u64, 4),
541                 None => (false, 0, 4),
542             },
543             Datamatch::U64(v) => match v {
544                 Some(u) => (true, u, 8),
545                 None => (false, 0, 8),
546             },
547         };
548 
549         let mut flags = 0;
550         if do_datamatch {
551             flags |= 1 << GH_IOEVENTFD_DATAMATCH;
552         }
553 
554         let maddr = if let IoEventAddress::Mmio(maddr) = addr {
555             maddr
556         } else {
557             todo!()
558         };
559 
560         let gh_fn_ioeventfd_arg = gh_fn_ioeventfd_arg {
561             fd: evt.as_raw_descriptor(),
562             datamatch: datamatch_value,
563             len: datamatch_len,
564             addr: maddr,
565             flags,
566             ..Default::default()
567         };
568 
569         let function_desc = gh_fn_desc {
570             type_: GH_FN_IOEVENTFD,
571             arg_size: size_of::<gh_fn_ioeventfd_arg>() as u32,
572             arg: &gh_fn_ioeventfd_arg as *const gh_fn_ioeventfd_arg as u64,
573         };
574 
575         // SAFETY: safe because memory is not modified and the return value is checked.
576         let ret = unsafe { ioctl_with_ref(self, GH_VM_ADD_FUNCTION(), &function_desc) };
577         if ret == 0 {
578             Ok(())
579         } else {
580             errno_result()
581         }
582     }
583 
unregister_ioevent( &mut self, _evt: &Event, addr: IoEventAddress, _datamatch: Datamatch, ) -> Result<()>584     fn unregister_ioevent(
585         &mut self,
586         _evt: &Event,
587         addr: IoEventAddress,
588         _datamatch: Datamatch,
589     ) -> Result<()> {
590         let maddr = if let IoEventAddress::Mmio(maddr) = addr {
591             maddr
592         } else {
593             todo!()
594         };
595 
596         let gh_fn_ioeventfd_arg = gh_fn_ioeventfd_arg {
597             addr: maddr,
598             ..Default::default()
599         };
600 
601         let function_desc = gh_fn_desc {
602             type_: GH_FN_IOEVENTFD,
603             arg_size: size_of::<gh_fn_ioeventfd_arg>() as u32,
604             arg: &gh_fn_ioeventfd_arg as *const gh_fn_ioeventfd_arg as u64,
605         };
606 
607         // SAFETY: safe because memory is not modified and the return value is checked.
608         let ret = unsafe { ioctl_with_ref(self, GH_VM_REMOVE_FUNCTION(), &function_desc) };
609         if ret == 0 {
610             Ok(())
611         } else {
612             errno_result()
613         }
614     }
615 
handle_io_events(&self, _addr: IoEventAddress, _data: &[u8]) -> Result<()>616     fn handle_io_events(&self, _addr: IoEventAddress, _data: &[u8]) -> Result<()> {
617         Ok(())
618     }
619 
get_pvclock(&self) -> Result<ClockState>620     fn get_pvclock(&self) -> Result<ClockState> {
621         unimplemented!()
622     }
623 
set_pvclock(&self, _state: &ClockState) -> Result<()>624     fn set_pvclock(&self, _state: &ClockState) -> Result<()> {
625         unimplemented!()
626     }
627 
add_fd_mapping( &mut self, slot: u32, offset: usize, size: usize, fd: &dyn AsRawDescriptor, fd_offset: u64, prot: Protection, ) -> Result<()>628     fn add_fd_mapping(
629         &mut self,
630         slot: u32,
631         offset: usize,
632         size: usize,
633         fd: &dyn AsRawDescriptor,
634         fd_offset: u64,
635         prot: Protection,
636     ) -> Result<()> {
637         let mut regions = self.mem_regions.lock();
638         let (region, _) = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
639 
640         match region.add_fd_mapping(offset, size, fd, fd_offset, prot) {
641             Ok(()) => Ok(()),
642             Err(MmapError::SystemCallFailed(e)) => Err(e),
643             Err(_) => Err(Error::new(EIO)),
644         }
645     }
646 
remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()>647     fn remove_mapping(&mut self, slot: u32, offset: usize, size: usize) -> Result<()> {
648         let mut regions = self.mem_regions.lock();
649         let (region, _) = regions.get_mut(&slot).ok_or_else(|| Error::new(EINVAL))?;
650 
651         match region.remove_mapping(offset, size) {
652             Ok(()) => Ok(()),
653             Err(MmapError::SystemCallFailed(e)) => Err(e),
654             Err(_) => Err(Error::new(EIO)),
655         }
656     }
657 
handle_balloon_event(&mut self, _event: BalloonEvent) -> Result<()>658     fn handle_balloon_event(&mut self, _event: BalloonEvent) -> Result<()> {
659         unimplemented!()
660     }
661 }
662 
663 const GH_RM_EXIT_TYPE_VM_EXIT: u16 = 0;
664 const GH_RM_EXIT_TYPE_PSCI_POWER_OFF: u16 = 1;
665 const GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET: u16 = 2;
666 const GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET2: u16 = 3;
667 const GH_RM_EXIT_TYPE_WDT_BITE: u16 = 4;
668 const GH_RM_EXIT_TYPE_HYP_ERROR: u16 = 5;
669 const GH_RM_EXIT_TYPE_ASYNC_EXT_ABORT: u16 = 6;
670 const GH_RM_EXIT_TYPE_VM_FORCE_STOPPED: u16 = 7;
671 
672 pub struct GunyahVcpu {
673     vm: SafeDescriptor,
674     vcpu: SafeDescriptor,
675     id: usize,
676     run_mmap: Arc<MemoryMapping>,
677 }
678 
679 struct GunyahVcpuSignalHandle {
680     run_mmap: Arc<MemoryMapping>,
681 }
682 
683 impl VcpuSignalHandleInner for GunyahVcpuSignalHandle {
signal_immediate_exit(&self)684     fn signal_immediate_exit(&self) {
685         // SAFETY: we ensure `run_mmap` is a valid mapping of `kvm_run` at creation time, and the
686         // `Arc` ensures the mapping still exists while we hold a reference to it.
687         unsafe {
688             let run = self.run_mmap.as_ptr() as *mut gh_vcpu_run;
689             (*run).immediate_exit = 1;
690         }
691     }
692 }
693 
694 impl AsRawDescriptor for GunyahVcpu {
as_raw_descriptor(&self) -> RawDescriptor695     fn as_raw_descriptor(&self) -> RawDescriptor {
696         self.vcpu.as_raw_descriptor()
697     }
698 }
699 
700 impl Vcpu for GunyahVcpu {
try_clone(&self) -> Result<Self> where Self: Sized,701     fn try_clone(&self) -> Result<Self>
702     where
703         Self: Sized,
704     {
705         let vcpu = self.vcpu.try_clone()?;
706 
707         Ok(GunyahVcpu {
708             vm: self.vm.try_clone()?,
709             vcpu,
710             id: self.id,
711             run_mmap: self.run_mmap.clone(),
712         })
713     }
714 
as_vcpu(&self) -> &dyn Vcpu715     fn as_vcpu(&self) -> &dyn Vcpu {
716         self
717     }
718 
run(&mut self) -> Result<VcpuExit>719     fn run(&mut self) -> Result<VcpuExit> {
720         // SAFETY:
721         // Safe because we know our file is a VCPU fd and we verify the return result.
722         let ret = unsafe { ioctl(self, GH_VCPU_RUN()) };
723         if ret != 0 {
724             return errno_result();
725         }
726 
727         // SAFETY:
728         // Safe because we know we mapped enough memory to hold the gh_vcpu_run struct
729         // because the kernel told us how large it is.
730         let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gh_vcpu_run) };
731         match run.exit_reason {
732             GH_VCPU_EXIT_MMIO => Ok(VcpuExit::Mmio),
733             GH_VCPU_EXIT_STATUS => {
734                 // SAFETY:
735                 // Safe because the exit_reason (which comes from the kernel) told us which
736                 // union field to use.
737                 let status = unsafe { &mut run.__bindgen_anon_1.status };
738                 match status.status {
739                     GH_VM_STATUS_GH_VM_STATUS_LOAD_FAILED => Ok(VcpuExit::FailEntry {
740                         hardware_entry_failure_reason: 0,
741                     }),
742                     GH_VM_STATUS_GH_VM_STATUS_CRASHED => Ok(VcpuExit::SystemEventCrash),
743                     GH_VM_STATUS_GH_VM_STATUS_EXITED => {
744                         info!("exit type {}", status.exit_info.type_);
745                         match status.exit_info.type_ {
746                             GH_RM_EXIT_TYPE_VM_EXIT => Ok(VcpuExit::SystemEventShutdown),
747                             GH_RM_EXIT_TYPE_PSCI_POWER_OFF => Ok(VcpuExit::SystemEventShutdown),
748                             GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET => Ok(VcpuExit::SystemEventReset),
749                             GH_RM_EXIT_TYPE_PSCI_SYSTEM_RESET2 => Ok(VcpuExit::SystemEventReset),
750                             GH_RM_EXIT_TYPE_WDT_BITE => Ok(VcpuExit::SystemEventCrash),
751                             GH_RM_EXIT_TYPE_HYP_ERROR => Ok(VcpuExit::SystemEventCrash),
752                             GH_RM_EXIT_TYPE_ASYNC_EXT_ABORT => Ok(VcpuExit::SystemEventCrash),
753                             GH_RM_EXIT_TYPE_VM_FORCE_STOPPED => Ok(VcpuExit::SystemEventShutdown),
754                             r => {
755                                 warn!("Unknown exit type: {}", r);
756                                 Err(Error::new(EINVAL))
757                             }
758                         }
759                     }
760                     r => {
761                         warn!("Unknown vm status: {}", r);
762                         Err(Error::new(EINVAL))
763                     }
764                 }
765             }
766             r => {
767                 warn!("unknown gh exit reason: {}", r);
768                 Err(Error::new(EINVAL))
769             }
770         }
771     }
772 
id(&self) -> usize773     fn id(&self) -> usize {
774         self.id
775     }
776 
set_immediate_exit(&self, exit: bool)777     fn set_immediate_exit(&self, exit: bool) {
778         // SAFETY:
779         // Safe because we know we mapped enough memory to hold the kvm_run struct because the
780         // kernel told us how large it was. The pointer is page aligned so casting to a different
781         // type is well defined, hence the clippy allow attribute.
782         let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gh_vcpu_run) };
783         run.immediate_exit = exit.into();
784     }
785 
signal_handle(&self) -> VcpuSignalHandle786     fn signal_handle(&self) -> VcpuSignalHandle {
787         VcpuSignalHandle {
788             inner: Box::new(GunyahVcpuSignalHandle {
789                 run_mmap: self.run_mmap.clone(),
790             }),
791         }
792     }
793 
handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>794     fn handle_mmio(&self, handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()> {
795         // SAFETY:
796         // Safe because we know we mapped enough memory to hold the gh_vcpu_run struct because the
797         // kernel told us how large it was. The pointer is page aligned so casting to a different
798         // type is well defined
799         let run = unsafe { &mut *(self.run_mmap.as_ptr() as *mut gh_vcpu_run) };
800         // Verify that the handler is called in the right context.
801         assert!(run.exit_reason == GH_VCPU_EXIT_MMIO);
802         // SAFETY:
803         // Safe because the exit_reason (which comes from the kernel) told us which
804         // union field to use.
805         let mmio = unsafe { &mut run.__bindgen_anon_1.mmio };
806         let address = mmio.phys_addr;
807         let size = min(mmio.len as usize, mmio.data.len());
808         if mmio.is_write != 0 {
809             handle_fn(IoParams {
810                 address,
811                 size,
812                 operation: IoOperation::Write { data: mmio.data },
813             });
814             Ok(())
815         } else if let Some(data) = handle_fn(IoParams {
816             address,
817             size,
818             operation: IoOperation::Read,
819         }) {
820             mmio.data[..size].copy_from_slice(&data[..size]);
821             Ok(())
822         } else {
823             Err(Error::new(EINVAL))
824         }
825     }
826 
handle_io(&self, _handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()>827     fn handle_io(&self, _handle_fn: &mut dyn FnMut(IoParams) -> Option<[u8; 8]>) -> Result<()> {
828         unreachable!()
829     }
830 
handle_hyperv_hypercall(&self, _func: &mut dyn FnMut(HypervHypercall) -> u64) -> Result<()>831     fn handle_hyperv_hypercall(&self, _func: &mut dyn FnMut(HypervHypercall) -> u64) -> Result<()> {
832         unreachable!()
833     }
834 
handle_rdmsr(&self, _data: u64) -> Result<()>835     fn handle_rdmsr(&self, _data: u64) -> Result<()> {
836         unreachable!()
837     }
838 
handle_wrmsr(&self)839     fn handle_wrmsr(&self) {
840         unreachable!()
841     }
842 
on_suspend(&self) -> Result<()>843     fn on_suspend(&self) -> Result<()> {
844         Ok(())
845     }
846 
enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()>847     unsafe fn enable_raw_capability(&self, _cap: u32, _args: &[u64; 4]) -> Result<()> {
848         unimplemented!()
849     }
850 }
851