• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::fs::File;
6 use std::sync::Arc;
7 use std::u32;
8 
9 use anyhow::bail;
10 use anyhow::Context;
11 use anyhow::Result;
12 use base::error;
13 #[cfg(any(target_os = "android", target_os = "linux"))]
14 use base::linux::MemoryMappingBuilderUnix;
15 use base::pagesize;
16 use base::AsRawDescriptor;
17 use base::AsRawDescriptors;
18 use base::Event;
19 use base::MappedRegion;
20 use base::MemoryMapping;
21 use base::MemoryMappingBuilder;
22 #[cfg(windows)]
23 use base::MemoryMappingBuilderWindows;
24 use base::Protection;
25 use base::RawDescriptor;
26 use hypervisor::MemCacheType;
27 use hypervisor::Vm;
28 use resources::SystemAllocator;
29 use vfio_sys::*;
30 use vm_control::api::VmMemoryClient;
31 use vm_control::VmMemoryDestination;
32 use vm_control::VmMemorySource;
33 use vm_memory::GuestAddress;
34 
35 use crate::pci::CrosvmDeviceId;
36 use crate::vfio::VfioDevice;
37 use crate::vfio::VfioError;
38 use crate::vfio::VfioIrq;
39 use crate::BusAccessInfo;
40 use crate::BusDevice;
41 use crate::BusDeviceObj;
42 use crate::DeviceId;
43 use crate::IommuDevType;
44 use crate::IrqEdgeEvent;
45 use crate::IrqLevelEvent;
46 use crate::Suspendable;
47 
48 struct MmioInfo {
49     index: usize,
50     start: u64,
51     length: u64,
52 }
53 
54 pub struct VfioPlatformDevice {
55     device: Arc<VfioDevice>,
56     interrupt_edge_evt: Vec<IrqEdgeEvent>,
57     interrupt_level_evt: Vec<IrqLevelEvent>,
58     mmio_regions: Vec<MmioInfo>,
59     vm_memory_client: VmMemoryClient,
60     // scratch MemoryMapping to avoid unmap beform vm exit
61     mem: Vec<MemoryMapping>,
62 }
63 
64 impl BusDevice for VfioPlatformDevice {
device_id(&self) -> DeviceId65     fn device_id(&self) -> DeviceId {
66         CrosvmDeviceId::VfioPlatformDevice.into()
67     }
68 
debug_label(&self) -> String69     fn debug_label(&self) -> String {
70         format!("vfio {} device", self.device.device_name())
71     }
72 
read(&mut self, info: BusAccessInfo, data: &mut [u8])73     fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
74         self.read_mmio(info.address, data)
75     }
76 
write(&mut self, info: BusAccessInfo, data: &[u8])77     fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
78         self.write_mmio(info.address, data)
79     }
80 }
81 
82 impl Suspendable for VfioPlatformDevice {}
83 
84 impl BusDeviceObj for VfioPlatformDevice {
as_platform_device(&self) -> Option<&VfioPlatformDevice>85     fn as_platform_device(&self) -> Option<&VfioPlatformDevice> {
86         Some(self)
87     }
as_platform_device_mut(&mut self) -> Option<&mut VfioPlatformDevice>88     fn as_platform_device_mut(&mut self) -> Option<&mut VfioPlatformDevice> {
89         Some(self)
90     }
into_platform_device(self: Box<Self>) -> Option<Box<VfioPlatformDevice>>91     fn into_platform_device(self: Box<Self>) -> Option<Box<VfioPlatformDevice>> {
92         Some(self)
93     }
94 }
95 
96 impl VfioPlatformDevice {
97     /// Constructs a new Vfio Platform device for the given Vfio device
new(device: VfioDevice, vm_memory_client: VmMemoryClient) -> Self98     pub fn new(device: VfioDevice, vm_memory_client: VmMemoryClient) -> Self {
99         let dev = Arc::new(device);
100         VfioPlatformDevice {
101             device: dev,
102             interrupt_edge_evt: Vec::new(),
103             interrupt_level_evt: Vec::new(),
104             mmio_regions: Vec::new(),
105             vm_memory_client,
106             mem: Vec::new(),
107         }
108     }
109 
get_platform_irqs(&self) -> Result<Vec<VfioIrq>, VfioError>110     pub fn get_platform_irqs(&self) -> Result<Vec<VfioIrq>, VfioError> {
111         self.device.get_irqs()
112     }
113 
irq_is_automask(&self, irq: &VfioIrq) -> bool114     pub fn irq_is_automask(&self, irq: &VfioIrq) -> bool {
115         irq.flags & VFIO_IRQ_INFO_AUTOMASKED != 0
116     }
117 
setup_irq_resample(&mut self, resample_evt: &Event, index: u32) -> Result<()>118     fn setup_irq_resample(&mut self, resample_evt: &Event, index: u32) -> Result<()> {
119         self.device.irq_mask(index).context("Intx mask failed")?;
120         self.device
121             .resample_virq_enable(resample_evt, index)
122             .context("resample enable failed")?;
123         self.device
124             .irq_unmask(index)
125             .context("Intx unmask failed")?;
126         Ok(())
127     }
128 
assign_edge_platform_irq(&mut self, irq_evt: &IrqEdgeEvent, index: u32) -> Result<()>129     pub fn assign_edge_platform_irq(&mut self, irq_evt: &IrqEdgeEvent, index: u32) -> Result<()> {
130         let interrupt_evt = irq_evt.try_clone().context("failed to clone irq event")?;
131         self.device
132             .irq_enable(&[Some(interrupt_evt.get_trigger())], index, 0)
133             .context("platform irq enable failed")?;
134         self.interrupt_edge_evt.push(interrupt_evt);
135         Ok(())
136     }
137 
assign_level_platform_irq(&mut self, irq_evt: &IrqLevelEvent, index: u32) -> Result<()>138     pub fn assign_level_platform_irq(&mut self, irq_evt: &IrqLevelEvent, index: u32) -> Result<()> {
139         let interrupt_evt = irq_evt.try_clone().context("failed to clone irq event")?;
140         self.device
141             .irq_enable(&[Some(interrupt_evt.get_trigger())], index, 0)
142             .context("platform irq enable failed")?;
143         if let Err(e) = self.setup_irq_resample(interrupt_evt.get_resample(), index) {
144             self.disable_irqs(index);
145             bail!("failed to set up irq resampling: {}", e);
146         }
147         self.interrupt_level_evt.push(interrupt_evt);
148         Ok(())
149     }
150 
find_region(&self, addr: u64) -> Option<MmioInfo>151     fn find_region(&self, addr: u64) -> Option<MmioInfo> {
152         for mmio_info in self.mmio_regions.iter() {
153             if addr >= mmio_info.start && addr < mmio_info.start + mmio_info.length {
154                 return Some(MmioInfo {
155                     index: mmio_info.index,
156                     start: mmio_info.start,
157                     length: mmio_info.length,
158                 });
159             }
160         }
161         None
162     }
163 
allocate_regions( &mut self, resources: &mut SystemAllocator, ) -> Result<Vec<(u64, u64)>, resources::Error>164     pub fn allocate_regions(
165         &mut self,
166         resources: &mut SystemAllocator,
167     ) -> Result<Vec<(u64, u64)>, resources::Error> {
168         let mut ranges = Vec::new();
169         for i in 0..self.device.get_region_count() {
170             let size = self.device.get_region_size(i);
171             let alloc_id = resources.get_anon_alloc();
172             let allocator = resources
173                 .mmio_platform_allocator()
174                 .ok_or(resources::Error::MissingPlatformMMIOAddresses)?;
175             let start_addr = allocator.allocate_with_align(
176                 size,
177                 alloc_id,
178                 "vfio_mmio".to_string(),
179                 pagesize() as u64,
180             )?;
181             ranges.push((start_addr, size));
182 
183             self.mmio_regions.push(MmioInfo {
184                 index: i,
185                 start: start_addr,
186                 length: size,
187             });
188         }
189         Ok(ranges)
190     }
191 
region_mmap_early(&self, vm: &mut impl Vm, index: usize, start_addr: u64)192     fn region_mmap_early(&self, vm: &mut impl Vm, index: usize, start_addr: u64) {
193         if self.device.get_region_flags(index) & VFIO_REGION_INFO_FLAG_MMAP == 0 {
194             return;
195         }
196 
197         for mmap in &self.device.get_region_mmap(index) {
198             let mmap_offset = mmap.offset;
199             let mmap_size = mmap.size;
200             let guest_map_start = start_addr + mmap_offset;
201             let region_offset = self.device.get_region_offset(index);
202             let offset = region_offset + mmap_offset;
203 
204             let mmap = match MemoryMappingBuilder::new(mmap_size as usize)
205                 .from_descriptor(self.device.device_file())
206                 .offset(offset)
207                 .build()
208             {
209                 Ok(v) => v,
210                 Err(e) => {
211                     error!("{e}, index: {index}, start_addr:{start_addr:#x}, offset:{offset:#x}");
212                     break;
213                 }
214             };
215 
216             let host = mmap.as_ptr();
217             let guest_addr = GuestAddress(guest_map_start);
218             if let Err(e) = vm.add_memory_region(
219                 guest_addr,
220                 Box::new(mmap),
221                 false,
222                 false,
223                 MemCacheType::CacheCoherent,
224             ) {
225                 error!("{e}, index: {index}, guest_addr:{guest_addr}, host:{host:?}");
226                 break;
227             }
228         }
229     }
230 
231     /// Force adding the MMIO regions to the guest memory space.
232     ///
233     /// By default, MMIO regions are mapped lazily when the guest first accesses them. Instead,
234     /// this function maps them, even if the guest might end up not accessing them. It only runs in
235     /// the current thread and can therefore be called before the VM is started.
regions_mmap_early(&mut self, vm: &mut impl Vm)236     pub fn regions_mmap_early(&mut self, vm: &mut impl Vm) {
237         for mmio_info in self.mmio_regions.iter() {
238             self.region_mmap_early(vm, mmio_info.index, mmio_info.start);
239         }
240     }
241 
region_mmap(&self, index: usize, start_addr: u64) -> Vec<MemoryMapping>242     fn region_mmap(&self, index: usize, start_addr: u64) -> Vec<MemoryMapping> {
243         let mut mem_map: Vec<MemoryMapping> = Vec::new();
244         if self.device.get_region_flags(index) & VFIO_REGION_INFO_FLAG_MMAP != 0 {
245             let mmaps = self.device.get_region_mmap(index);
246             if mmaps.is_empty() {
247                 return mem_map;
248             }
249 
250             for mmap in mmaps.iter() {
251                 let mmap_offset = mmap.offset;
252                 let mmap_size = mmap.size;
253                 let guest_map_start = start_addr + mmap_offset;
254                 let region_offset = self.device.get_region_offset(index);
255                 let offset = region_offset + mmap_offset;
256                 let descriptor = match self.device.device_file().try_clone() {
257                     Ok(device_file) => device_file.into(),
258                     Err(_) => break,
259                 };
260                 match self.vm_memory_client.register_memory(
261                     VmMemorySource::Descriptor {
262                         descriptor,
263                         offset,
264                         size: mmap_size,
265                     },
266                     VmMemoryDestination::GuestPhysicalAddress(guest_map_start),
267                     Protection::read_write(),
268                     MemCacheType::CacheCoherent,
269                 ) {
270                     Ok(_region) => {
271                         // Even if vm has mapped this region, but it is in vm main process,
272                         // device process doesn't has this mapping, but vfio_dma_map() need it
273                         // in device process, so here map it again.
274                         let mmap = match MemoryMappingBuilder::new(mmap_size as usize)
275                             .from_file(self.device.device_file())
276                             .offset(offset)
277                             .build()
278                         {
279                             Ok(v) => v,
280                             Err(_e) => break,
281                         };
282                         let host = mmap.as_ptr() as u64;
283                         // SAFETY:
284                         // Safe because the given guest_map_start is valid guest bar address. and
285                         // the host pointer is correct and valid guaranteed by MemoryMapping
286                         // interface.
287                         match unsafe {
288                             self.device
289                                 .vfio_dma_map(guest_map_start, mmap_size, host, true)
290                         } {
291                             Ok(_) => mem_map.push(mmap),
292                             Err(e) => {
293                                 error!(
294                                     "{}, index: {}, start_addr:0x{:x}, host:0x{:x}",
295                                     e, index, start_addr, host
296                                 );
297                                 break;
298                             }
299                         }
300                     }
301                     Err(e) => {
302                         error!("register_memory failed: {}", e);
303                         break;
304                     }
305                 }
306             }
307         }
308 
309         mem_map
310     }
311 
regions_mmap(&mut self)312     fn regions_mmap(&mut self) {
313         for mmio_info in self.mmio_regions.iter() {
314             let mut mem_map = self.region_mmap(mmio_info.index, mmio_info.start);
315             self.mem.append(&mut mem_map);
316         }
317     }
318 
disable_irqs(&mut self, index: u32)319     fn disable_irqs(&mut self, index: u32) {
320         if let Err(e) = self.device.irq_disable(index) {
321             error!("Platform irq disable failed: {}", e);
322         }
323     }
324 
read_mmio(&mut self, addr: u64, data: &mut [u8])325     fn read_mmio(&mut self, addr: u64, data: &mut [u8]) {
326         if let Some(mmio_info) = self.find_region(addr) {
327             let offset = addr - mmio_info.start;
328             let index = mmio_info.index;
329             self.device.region_read(index, data, offset);
330         }
331         // We have no other way than wait for 1st access and then do the mmap,
332         // so that next accesses are dual-stage MMU accelerated.
333         self.regions_mmap();
334     }
335 
write_mmio(&mut self, addr: u64, data: &[u8])336     fn write_mmio(&mut self, addr: u64, data: &[u8]) {
337         if let Some(mmio_info) = self.find_region(addr) {
338             let offset = addr - mmio_info.start;
339             let index = mmio_info.index;
340             self.device.region_write(index, data, offset);
341         }
342         // We have no other way than wait for 1st access and then do the mmap,
343         // so that next accesses are dual-stage MMU accelerated.
344         self.regions_mmap();
345     }
346 
keep_rds(&self) -> Vec<RawDescriptor>347     pub fn keep_rds(&self) -> Vec<RawDescriptor> {
348         let mut rds = self.device.keep_rds();
349 
350         for irq_evt in self.interrupt_edge_evt.iter() {
351             rds.extend(irq_evt.as_raw_descriptors());
352         }
353 
354         for irq_evt in self.interrupt_level_evt.iter() {
355             rds.extend(irq_evt.as_raw_descriptors());
356         }
357 
358         rds.push(self.vm_memory_client.as_raw_descriptor());
359         rds
360     }
361 
362     /// Gets the vfio device backing `File`.
device_file(&self) -> &File363     pub fn device_file(&self) -> &File {
364         self.device.device_file()
365     }
366 
367     /// Returns the DT symbol (node label) of the VFIO device.
dt_symbol(&self) -> Option<&str>368     pub fn dt_symbol(&self) -> Option<&str> {
369         self.device.dt_symbol()
370     }
371 
372     /// Returns the type and indentifier (if applicable) of the IOMMU used by this VFIO device and
373     /// its master IDs.
iommu(&self) -> Option<(IommuDevType, Option<u32>, &[u32])>374     pub fn iommu(&self) -> Option<(IommuDevType, Option<u32>, &[u32])> {
375         self.device.iommu()
376     }
377 }
378