• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::fs::File;
6 use std::sync::Arc;
7 use std::u32;
8 
9 use anyhow::bail;
10 use anyhow::Context;
11 use anyhow::Result;
12 use base::error;
13 use base::pagesize;
14 use base::AsRawDescriptor;
15 use base::AsRawDescriptors;
16 use base::Event;
17 use base::MappedRegion;
18 use base::MemoryMapping;
19 use base::MemoryMappingBuilder;
20 use base::Protection;
21 use base::RawDescriptor;
22 use base::Tube;
23 use resources::SystemAllocator;
24 use vfio_sys::*;
25 use vm_control::VmMemoryDestination;
26 use vm_control::VmMemoryRequest;
27 use vm_control::VmMemoryResponse;
28 use vm_control::VmMemorySource;
29 
30 use crate::pci::CrosvmDeviceId;
31 use crate::vfio::VfioDevice;
32 use crate::vfio::VfioError;
33 use crate::vfio::VfioIrq;
34 use crate::BusAccessInfo;
35 use crate::BusDevice;
36 use crate::BusDeviceObj;
37 use crate::DeviceId;
38 use crate::IrqEdgeEvent;
39 use crate::IrqLevelEvent;
40 use crate::Suspendable;
41 
42 struct MmioInfo {
43     index: u32,
44     start: u64,
45     length: u64,
46 }
47 
48 pub struct VfioPlatformDevice {
49     device: Arc<VfioDevice>,
50     interrupt_edge_evt: Vec<IrqEdgeEvent>,
51     interrupt_level_evt: Vec<IrqLevelEvent>,
52     mmio_regions: Vec<MmioInfo>,
53     vm_socket_mem: Tube,
54     // scratch MemoryMapping to avoid unmap beform vm exit
55     mem: Vec<MemoryMapping>,
56 }
57 
58 impl BusDevice for VfioPlatformDevice {
device_id(&self) -> DeviceId59     fn device_id(&self) -> DeviceId {
60         CrosvmDeviceId::VfioPlatformDevice.into()
61     }
62 
debug_label(&self) -> String63     fn debug_label(&self) -> String {
64         format!("vfio {} device", self.device.device_name())
65     }
66 
read(&mut self, info: BusAccessInfo, data: &mut [u8])67     fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
68         self.read_mmio(info.address, data)
69     }
70 
write(&mut self, info: BusAccessInfo, data: &[u8])71     fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
72         self.write_mmio(info.address, data)
73     }
74 }
75 
76 impl Suspendable for VfioPlatformDevice {}
77 
78 impl BusDeviceObj for VfioPlatformDevice {
as_platform_device(&self) -> Option<&VfioPlatformDevice>79     fn as_platform_device(&self) -> Option<&VfioPlatformDevice> {
80         Some(self)
81     }
as_platform_device_mut(&mut self) -> Option<&mut VfioPlatformDevice>82     fn as_platform_device_mut(&mut self) -> Option<&mut VfioPlatformDevice> {
83         Some(self)
84     }
into_platform_device(self: Box<Self>) -> Option<Box<VfioPlatformDevice>>85     fn into_platform_device(self: Box<Self>) -> Option<Box<VfioPlatformDevice>> {
86         Some(self)
87     }
88 }
89 
90 impl VfioPlatformDevice {
91     /// Constructs a new Vfio Platform device for the given Vfio device
new(device: VfioDevice, vfio_device_socket_mem: Tube) -> Self92     pub fn new(device: VfioDevice, vfio_device_socket_mem: Tube) -> Self {
93         let dev = Arc::new(device);
94         VfioPlatformDevice {
95             device: dev,
96             interrupt_edge_evt: Vec::new(),
97             interrupt_level_evt: Vec::new(),
98             mmio_regions: Vec::new(),
99             vm_socket_mem: vfio_device_socket_mem,
100             mem: Vec::new(),
101         }
102     }
103 
get_platform_irqs(&self) -> Result<Vec<VfioIrq>, VfioError>104     pub fn get_platform_irqs(&self) -> Result<Vec<VfioIrq>, VfioError> {
105         self.device.get_irqs()
106     }
107 
irq_is_automask(&self, irq: &VfioIrq) -> bool108     pub fn irq_is_automask(&self, irq: &VfioIrq) -> bool {
109         irq.flags & VFIO_IRQ_INFO_AUTOMASKED != 0
110     }
111 
setup_irq_resample(&mut self, resample_evt: &Event, index: u32) -> Result<()>112     fn setup_irq_resample(&mut self, resample_evt: &Event, index: u32) -> Result<()> {
113         self.device.irq_mask(index).context("Intx mask failed")?;
114         self.device
115             .resample_virq_enable(resample_evt, index)
116             .context("resample enable failed")?;
117         self.device
118             .irq_unmask(index)
119             .context("Intx unmask failed")?;
120         Ok(())
121     }
122 
assign_edge_platform_irq(&mut self, irq_evt: &IrqEdgeEvent, index: u32) -> Result<()>123     pub fn assign_edge_platform_irq(&mut self, irq_evt: &IrqEdgeEvent, index: u32) -> Result<()> {
124         let interrupt_evt = irq_evt.try_clone().context("failed to clone irq event")?;
125         self.device
126             .irq_enable(&[Some(interrupt_evt.get_trigger())], index, 0)
127             .context("platform irq enable failed")?;
128         self.interrupt_edge_evt.push(interrupt_evt);
129         Ok(())
130     }
131 
assign_level_platform_irq(&mut self, irq_evt: &IrqLevelEvent, index: u32) -> Result<()>132     pub fn assign_level_platform_irq(&mut self, irq_evt: &IrqLevelEvent, index: u32) -> Result<()> {
133         let interrupt_evt = irq_evt.try_clone().context("failed to clone irq event")?;
134         self.device
135             .irq_enable(&[Some(interrupt_evt.get_trigger())], index, 0)
136             .context("platform irq enable failed")?;
137         if let Err(e) = self.setup_irq_resample(interrupt_evt.get_resample(), index) {
138             self.disable_irqs(index);
139             bail!("failed to set up irq resampling: {}", e);
140         }
141         self.interrupt_level_evt.push(interrupt_evt);
142         Ok(())
143     }
144 
find_region(&self, addr: u64) -> Option<MmioInfo>145     fn find_region(&self, addr: u64) -> Option<MmioInfo> {
146         for mmio_info in self.mmio_regions.iter() {
147             if addr >= mmio_info.start && addr < mmio_info.start + mmio_info.length {
148                 return Some(MmioInfo {
149                     index: mmio_info.index,
150                     start: mmio_info.start,
151                     length: mmio_info.length,
152                 });
153             }
154         }
155         None
156     }
157 
allocate_regions( &mut self, resources: &mut SystemAllocator, ) -> Result<Vec<(u64, u64)>, resources::Error>158     pub fn allocate_regions(
159         &mut self,
160         resources: &mut SystemAllocator,
161     ) -> Result<Vec<(u64, u64)>, resources::Error> {
162         let mut ranges = Vec::new();
163         for i in 0..self.device.get_region_count() {
164             let size = self.device.get_region_size(i);
165             let alloc_id = resources.get_anon_alloc();
166             let allocator = resources
167                 .mmio_platform_allocator()
168                 .ok_or(resources::Error::MissingPlatformMMIOAddresses)?;
169             let start_addr = allocator.allocate_with_align(
170                 size,
171                 alloc_id,
172                 "vfio_mmio".to_string(),
173                 pagesize() as u64,
174             )?;
175             ranges.push((start_addr, size));
176 
177             self.mmio_regions.push(MmioInfo {
178                 index: i,
179                 start: start_addr,
180                 length: size,
181             });
182         }
183         Ok(ranges)
184     }
185 
region_mmap(&self, index: u32, start_addr: u64) -> Vec<MemoryMapping>186     fn region_mmap(&self, index: u32, start_addr: u64) -> Vec<MemoryMapping> {
187         let mut mem_map: Vec<MemoryMapping> = Vec::new();
188         if self.device.get_region_flags(index) & VFIO_REGION_INFO_FLAG_MMAP != 0 {
189             let mmaps = self.device.get_region_mmap(index);
190             if mmaps.is_empty() {
191                 return mem_map;
192             }
193 
194             for mmap in mmaps.iter() {
195                 let mmap_offset = mmap.offset;
196                 let mmap_size = mmap.size;
197                 let guest_map_start = start_addr + mmap_offset;
198                 let region_offset = self.device.get_region_offset(index);
199                 let offset = region_offset + mmap_offset;
200                 let descriptor = match self.device.device_file().try_clone() {
201                     Ok(device_file) => device_file.into(),
202                     Err(_) => break,
203                 };
204                 if self
205                     .vm_socket_mem
206                     .send(&VmMemoryRequest::RegisterMemory {
207                         source: VmMemorySource::Descriptor {
208                             descriptor,
209                             offset,
210                             size: mmap_size,
211                         },
212                         dest: VmMemoryDestination::GuestPhysicalAddress(guest_map_start),
213                         prot: Protection::read_write(),
214                     })
215                     .is_err()
216                 {
217                     break;
218                 }
219 
220                 let response: VmMemoryResponse = match self.vm_socket_mem.recv() {
221                     Ok(res) => res,
222                     Err(_) => break,
223                 };
224                 match response {
225                     VmMemoryResponse::Ok => {
226                         // Even if vm has mapped this region, but it is in vm main process,
227                         // device process doesn't has this mapping, but vfio_dma_map() need it
228                         // in device process, so here map it again.
229                         let mmap = match MemoryMappingBuilder::new(mmap_size as usize)
230                             .from_file(self.device.device_file())
231                             .offset(offset)
232                             .build()
233                         {
234                             Ok(v) => v,
235                             Err(_e) => break,
236                         };
237                         let host = mmap.as_ptr() as u64;
238                         // Safe because the given guest_map_start is valid guest bar address. and
239                         // the host pointer is correct and valid guaranteed by MemoryMapping interface.
240                         match unsafe {
241                             self.device
242                                 .vfio_dma_map(guest_map_start, mmap_size, host, true)
243                         } {
244                             Ok(_) => mem_map.push(mmap),
245                             Err(e) => {
246                                 error!(
247                                     "{}, index: {}, start_addr:0x{:x}, host:0x{:x}",
248                                     e, index, start_addr, host
249                                 );
250                                 break;
251                             }
252                         }
253                     }
254                     _ => break,
255                 }
256             }
257         }
258 
259         mem_map
260     }
261 
regions_mmap(&mut self)262     fn regions_mmap(&mut self) {
263         for mmio_info in self.mmio_regions.iter() {
264             let mut mem_map = self.region_mmap(mmio_info.index, mmio_info.start);
265             self.mem.append(&mut mem_map);
266         }
267     }
268 
disable_irqs(&mut self, index: u32)269     fn disable_irqs(&mut self, index: u32) {
270         if let Err(e) = self.device.irq_disable(index) {
271             error!("Platform irq disable failed: {}", e);
272         }
273     }
274 
read_mmio(&mut self, addr: u64, data: &mut [u8])275     fn read_mmio(&mut self, addr: u64, data: &mut [u8]) {
276         if let Some(mmio_info) = self.find_region(addr) {
277             let offset = addr - mmio_info.start;
278             let index = mmio_info.index;
279             self.device.region_read(index, data, offset);
280         }
281         // We have no other way than wait for 1st access and then do the mmap,
282         // so that next accesses are dual-stage MMU accelerated.
283         self.regions_mmap();
284     }
285 
write_mmio(&mut self, addr: u64, data: &[u8])286     fn write_mmio(&mut self, addr: u64, data: &[u8]) {
287         if let Some(mmio_info) = self.find_region(addr) {
288             let offset = addr - mmio_info.start;
289             let index = mmio_info.index;
290             self.device.region_write(index, data, offset);
291         }
292         // We have no other way than wait for 1st access and then do the mmap,
293         // so that next accesses are dual-stage MMU accelerated.
294         self.regions_mmap();
295     }
296 
keep_rds(&self) -> Vec<RawDescriptor>297     pub fn keep_rds(&self) -> Vec<RawDescriptor> {
298         let mut rds = self.device.keep_rds();
299 
300         for irq_evt in self.interrupt_edge_evt.iter() {
301             rds.extend(irq_evt.as_raw_descriptors());
302         }
303 
304         for irq_evt in self.interrupt_level_evt.iter() {
305             rds.extend(irq_evt.as_raw_descriptors());
306         }
307 
308         rds.push(self.vm_socket_mem.as_raw_descriptor());
309         rds
310     }
311 
312     /// Gets the vfio device backing `File`.
device_file(&self) -> &File313     pub fn device_file(&self) -> &File {
314         self.device.device_file()
315     }
316 }
317