• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! x86-64 specific transports.
2 
3 mod cam;
4 mod hypercalls;
5 
6 use super::{
7     pci::{
8         bus::{ConfigurationAccess, DeviceFunction, PciRoot, PCI_CAP_ID_VNDR},
9         device_type, CommonCfg, VirtioCapabilityInfo, VirtioPciError, CAP_BAR_OFFSET,
10         CAP_BAR_OFFSET_OFFSET, CAP_LENGTH_OFFSET, CAP_NOTIFY_OFF_MULTIPLIER_OFFSET,
11         VIRTIO_PCI_CAP_COMMON_CFG, VIRTIO_PCI_CAP_DEVICE_CFG, VIRTIO_PCI_CAP_ISR_CFG,
12         VIRTIO_PCI_CAP_NOTIFY_CFG, VIRTIO_VENDOR_ID,
13     },
14     DeviceStatus, DeviceType, Transport,
15 };
16 use crate::{hal::PhysAddr, Error};
17 pub use cam::HypCam;
18 use hypercalls::HypIoRegion;
19 use zerocopy::{FromBytes, Immutable, IntoBytes};
20 
21 macro_rules! configread {
22     ($common_cfg:expr, $field:ident) => {
23         $common_cfg.read(core::mem::offset_of!(CommonCfg, $field))
24     };
25 }
26 
27 macro_rules! configwrite {
28     ($common_cfg:expr, $field:ident, $value:expr) => {
29         $common_cfg.write(core::mem::offset_of!(CommonCfg, $field), $value)
30     };
31 }
32 
33 /// PCI transport for VirtIO using hypercalls implemented by the x86-64 pKVM hypervisor for IO BARs.
34 #[derive(Debug)]
35 pub struct HypPciTransport {
36     device_type: DeviceType,
37     /// The bus, device and function identifier for the VirtIO device.
38     device_function: DeviceFunction,
39     /// The common configuration structure within some BAR.
40     common_cfg: HypIoRegion,
41     /// The start of the queue notification region within some BAR.
42     notify_region: HypIoRegion,
43     notify_off_multiplier: u32,
44     /// The ISR status register within some BAR.
45     isr_status: HypIoRegion,
46     /// The VirtIO device-specific configuration within some BAR.
47     config_space: Option<HypIoRegion>,
48 }
49 
50 impl HypPciTransport {
51     /// Constructs a new x86-64 pKVM PCI VirtIO transport for the given device function on the given
52     /// PCI root controller.
new<C: ConfigurationAccess>( root: &mut PciRoot<C>, device_function: DeviceFunction, ) -> Result<Self, VirtioPciError>53     pub fn new<C: ConfigurationAccess>(
54         root: &mut PciRoot<C>,
55         device_function: DeviceFunction,
56     ) -> Result<Self, VirtioPciError> {
57         let device_vendor = root.configuration_access.read_word(device_function, 0);
58         let device_id = (device_vendor >> 16) as u16;
59         let vendor_id = device_vendor as u16;
60         if vendor_id != VIRTIO_VENDOR_ID {
61             return Err(VirtioPciError::InvalidVendorId(vendor_id));
62         }
63         let device_type = device_type(device_id);
64 
65         // Find the PCI capabilities we need.
66         let mut common_cfg = None;
67         let mut notify_cfg = None;
68         let mut notify_off_multiplier = 0;
69         let mut isr_cfg = None;
70         let mut device_cfg = None;
71         for capability in root.capabilities(device_function) {
72             if capability.id != PCI_CAP_ID_VNDR {
73                 continue;
74             }
75             let cap_len = capability.private_header as u8;
76             let cfg_type = (capability.private_header >> 8) as u8;
77             if cap_len < 16 {
78                 continue;
79             }
80             let struct_info = VirtioCapabilityInfo {
81                 bar: root
82                     .configuration_access
83                     .read_word(device_function, capability.offset + CAP_BAR_OFFSET)
84                     as u8,
85                 offset: root
86                     .configuration_access
87                     .read_word(device_function, capability.offset + CAP_BAR_OFFSET_OFFSET),
88                 length: root
89                     .configuration_access
90                     .read_word(device_function, capability.offset + CAP_LENGTH_OFFSET),
91             };
92 
93             match cfg_type {
94                 VIRTIO_PCI_CAP_COMMON_CFG if common_cfg.is_none() => {
95                     common_cfg = Some(struct_info);
96                 }
97                 VIRTIO_PCI_CAP_NOTIFY_CFG if cap_len >= 20 && notify_cfg.is_none() => {
98                     notify_cfg = Some(struct_info);
99                     notify_off_multiplier = root.configuration_access.read_word(
100                         device_function,
101                         capability.offset + CAP_NOTIFY_OFF_MULTIPLIER_OFFSET,
102                     );
103                 }
104                 VIRTIO_PCI_CAP_ISR_CFG if isr_cfg.is_none() => {
105                     isr_cfg = Some(struct_info);
106                 }
107                 VIRTIO_PCI_CAP_DEVICE_CFG if device_cfg.is_none() => {
108                     device_cfg = Some(struct_info);
109                 }
110                 _ => {}
111             }
112         }
113 
114         let common_cfg = get_bar_region::<CommonCfg, _>(
115             root,
116             device_function,
117             &common_cfg.ok_or(VirtioPciError::MissingCommonConfig)?,
118         )?;
119 
120         let notify_cfg = notify_cfg.ok_or(VirtioPciError::MissingNotifyConfig)?;
121         if notify_off_multiplier % 2 != 0 {
122             return Err(VirtioPciError::InvalidNotifyOffMultiplier(
123                 notify_off_multiplier,
124             ));
125         }
126         let notify_region = get_bar_region::<u16, _>(root, device_function, &notify_cfg)?;
127 
128         let isr_status = get_bar_region::<u8, _>(
129             root,
130             device_function,
131             &isr_cfg.ok_or(VirtioPciError::MissingIsrConfig)?,
132         )?;
133 
134         let config_space = if let Some(device_cfg) = device_cfg {
135             Some(get_bar_region::<u32, _>(
136                 root,
137                 device_function,
138                 &device_cfg,
139             )?)
140         } else {
141             None
142         };
143 
144         Ok(Self {
145             device_type,
146             device_function,
147             common_cfg,
148             notify_region,
149             notify_off_multiplier,
150             isr_status,
151             config_space,
152         })
153     }
154 }
155 
156 impl Transport for HypPciTransport {
device_type(&self) -> DeviceType157     fn device_type(&self) -> DeviceType {
158         self.device_type
159     }
160 
read_device_features(&mut self) -> u64161     fn read_device_features(&mut self) -> u64 {
162         configwrite!(self.common_cfg, device_feature_select, 0u32);
163         let device_features_low: u32 = configread!(self.common_cfg, device_feature);
164         configwrite!(self.common_cfg, device_feature_select, 1u32);
165         let device_features_high: u32 = configread!(self.common_cfg, device_feature);
166         (device_features_high as u64) << 32 | device_features_low as u64
167     }
168 
write_driver_features(&mut self, driver_features: u64)169     fn write_driver_features(&mut self, driver_features: u64) {
170         configwrite!(self.common_cfg, driver_feature_select, 0u32);
171         configwrite!(self.common_cfg, driver_feature, driver_features as u32);
172         configwrite!(self.common_cfg, driver_feature_select, 1u32);
173         configwrite!(
174             self.common_cfg,
175             driver_feature,
176             (driver_features >> 32) as u32
177         );
178     }
179 
max_queue_size(&mut self, queue: u16) -> u32180     fn max_queue_size(&mut self, queue: u16) -> u32 {
181         configwrite!(self.common_cfg, queue_select, queue);
182         let queue_size: u16 = configread!(self.common_cfg, queue_size);
183         queue_size.into()
184     }
185 
notify(&mut self, queue: u16)186     fn notify(&mut self, queue: u16) {
187         configwrite!(self.common_cfg, queue_select, queue);
188         // TODO: Consider caching this somewhere (per queue).
189         let queue_notify_off: u16 = configread!(self.common_cfg, queue_notify_off);
190 
191         let offset_bytes = usize::from(queue_notify_off) * self.notify_off_multiplier as usize;
192         self.notify_region.write(offset_bytes, queue);
193     }
194 
get_status(&self) -> DeviceStatus195     fn get_status(&self) -> DeviceStatus {
196         let status: u8 = configread!(self.common_cfg, device_status);
197         DeviceStatus::from_bits_truncate(status.into())
198     }
199 
set_status(&mut self, status: DeviceStatus)200     fn set_status(&mut self, status: DeviceStatus) {
201         configwrite!(self.common_cfg, device_status, status.bits() as u8);
202     }
203 
set_guest_page_size(&mut self, _guest_page_size: u32)204     fn set_guest_page_size(&mut self, _guest_page_size: u32) {
205         // No-op, the PCI transport doesn't care.
206     }
207 
requires_legacy_layout(&self) -> bool208     fn requires_legacy_layout(&self) -> bool {
209         false
210     }
211 
queue_set( &mut self, queue: u16, size: u32, descriptors: PhysAddr, driver_area: PhysAddr, device_area: PhysAddr, )212     fn queue_set(
213         &mut self,
214         queue: u16,
215         size: u32,
216         descriptors: PhysAddr,
217         driver_area: PhysAddr,
218         device_area: PhysAddr,
219     ) {
220         configwrite!(self.common_cfg, queue_select, queue);
221         configwrite!(self.common_cfg, queue_size, size as u16);
222         configwrite!(self.common_cfg, queue_desc, descriptors as u64);
223         configwrite!(self.common_cfg, queue_driver, driver_area as u64);
224         configwrite!(self.common_cfg, queue_device, device_area as u64);
225         configwrite!(self.common_cfg, queue_enable, 1u16);
226     }
227 
queue_unset(&mut self, _queue: u16)228     fn queue_unset(&mut self, _queue: u16) {
229         // The VirtIO spec doesn't allow queues to be unset once they have been set up for the PCI
230         // transport, so this is a no-op.
231     }
232 
queue_used(&mut self, queue: u16) -> bool233     fn queue_used(&mut self, queue: u16) -> bool {
234         configwrite!(self.common_cfg, queue_select, queue);
235         let queue_enable: u16 = configread!(self.common_cfg, queue_enable);
236         queue_enable == 1
237     }
238 
ack_interrupt(&mut self) -> bool239     fn ack_interrupt(&mut self) -> bool {
240         // Safe because the common config pointer is valid and we checked in get_bar_region that it
241         // was aligned.
242         // Reading the ISR status resets it to 0 and causes the device to de-assert the interrupt.
243         let isr_status: u8 = self.isr_status.read(0);
244         // TODO: Distinguish between queue interrupt and device configuration interrupt.
245         isr_status & 0x3 != 0
246     }
247 
read_config_generation(&self) -> u32248     fn read_config_generation(&self) -> u32 {
249         configread!(self.common_cfg, config_generation)
250     }
251 
read_config_space<T: FromBytes>(&self, offset: usize) -> Result<T, Error>252     fn read_config_space<T: FromBytes>(&self, offset: usize) -> Result<T, Error> {
253         assert!(align_of::<T>() <= 4,
254             "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
255             align_of::<T>());
256         assert_eq!(offset % align_of::<T>(), 0);
257 
258         let config_space = self.config_space.ok_or(Error::ConfigSpaceMissing)?;
259         if config_space.size < offset + size_of::<T>() {
260             Err(Error::ConfigSpaceTooSmall)
261         } else {
262             Ok(config_space.read(offset))
263         }
264     }
265 
write_config_space<T: IntoBytes + Immutable>( &mut self, offset: usize, value: T, ) -> Result<(), Error>266     fn write_config_space<T: IntoBytes + Immutable>(
267         &mut self,
268         offset: usize,
269         value: T,
270     ) -> Result<(), Error> {
271         assert!(align_of::<T>() <= 4,
272             "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
273             align_of::<T>());
274         assert_eq!(offset % align_of::<T>(), 0);
275 
276         let config_space = self.config_space.ok_or(Error::ConfigSpaceMissing)?;
277         if config_space.size < offset + size_of::<T>() {
278             Err(Error::ConfigSpaceTooSmall)
279         } else {
280             config_space.write(offset, value);
281             Ok(())
282         }
283     }
284 }
285 
get_bar_region<T, C: ConfigurationAccess>( root: &mut PciRoot<C>, device_function: DeviceFunction, struct_info: &VirtioCapabilityInfo, ) -> Result<HypIoRegion, VirtioPciError>286 fn get_bar_region<T, C: ConfigurationAccess>(
287     root: &mut PciRoot<C>,
288     device_function: DeviceFunction,
289     struct_info: &VirtioCapabilityInfo,
290 ) -> Result<HypIoRegion, VirtioPciError> {
291     let bar_info = root.bar_info(device_function, struct_info.bar)?;
292     let (bar_address, bar_size) = bar_info
293         .memory_address_size()
294         .ok_or(VirtioPciError::UnexpectedIoBar)?;
295     if bar_address == 0 {
296         return Err(VirtioPciError::BarNotAllocated(struct_info.bar));
297     }
298     if struct_info.offset + struct_info.length > bar_size
299         || size_of::<T>() > struct_info.length as usize
300     {
301         return Err(VirtioPciError::BarOffsetOutOfRange);
302     }
303     let paddr = bar_address as PhysAddr + struct_info.offset as PhysAddr;
304     if paddr % align_of::<T>() != 0 {
305         return Err(VirtioPciError::Misaligned {
306             address: paddr,
307             alignment: align_of::<T>(),
308         });
309     }
310     Ok(HypIoRegion {
311         paddr,
312         size: struct_info.length as usize,
313     })
314 }
315