• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! PCI transport for VirtIO.
2 
3 pub mod bus;
4 
5 use self::bus::{
6     ConfigurationAccess, DeviceFunction, DeviceFunctionInfo, PciError, PciRoot, PCI_CAP_ID_VNDR,
7 };
8 use super::{DeviceStatus, DeviceType, Transport};
9 use crate::{
10     hal::{Hal, PhysAddr},
11     nonnull_slice_from_raw_parts,
12     volatile::{
13         volread, volwrite, ReadOnly, Volatile, VolatileReadable, VolatileWritable, WriteOnly,
14     },
15     Error,
16 };
17 use core::{
18     mem::{align_of, size_of},
19     ptr::NonNull,
20 };
21 use zerocopy::{FromBytes, Immutable, IntoBytes};
22 
23 /// The PCI vendor ID for VirtIO devices.
24 pub const VIRTIO_VENDOR_ID: u16 = 0x1af4;
25 
26 /// The offset to add to a VirtIO device ID to get the corresponding PCI device ID.
27 const PCI_DEVICE_ID_OFFSET: u16 = 0x1040;
28 
29 const TRANSITIONAL_NETWORK: u16 = 0x1000;
30 const TRANSITIONAL_BLOCK: u16 = 0x1001;
31 const TRANSITIONAL_MEMORY_BALLOONING: u16 = 0x1002;
32 const TRANSITIONAL_CONSOLE: u16 = 0x1003;
33 const TRANSITIONAL_SCSI_HOST: u16 = 0x1004;
34 const TRANSITIONAL_ENTROPY_SOURCE: u16 = 0x1005;
35 const TRANSITIONAL_9P_TRANSPORT: u16 = 0x1009;
36 
37 /// The offset of the bar field within `virtio_pci_cap`.
38 pub(crate) const CAP_BAR_OFFSET: u8 = 4;
39 /// The offset of the offset field with `virtio_pci_cap`.
40 pub(crate) const CAP_BAR_OFFSET_OFFSET: u8 = 8;
41 /// The offset of the `length` field within `virtio_pci_cap`.
42 pub(crate) const CAP_LENGTH_OFFSET: u8 = 12;
43 /// The offset of the`notify_off_multiplier` field within `virtio_pci_notify_cap`.
44 pub(crate) const CAP_NOTIFY_OFF_MULTIPLIER_OFFSET: u8 = 16;
45 
46 /// Common configuration.
47 pub const VIRTIO_PCI_CAP_COMMON_CFG: u8 = 1;
48 /// Notifications.
49 pub const VIRTIO_PCI_CAP_NOTIFY_CFG: u8 = 2;
50 /// ISR Status.
51 pub const VIRTIO_PCI_CAP_ISR_CFG: u8 = 3;
52 /// Device specific configuration.
53 pub const VIRTIO_PCI_CAP_DEVICE_CFG: u8 = 4;
54 
device_type(pci_device_id: u16) -> DeviceType55 pub(crate) fn device_type(pci_device_id: u16) -> DeviceType {
56     match pci_device_id {
57         TRANSITIONAL_NETWORK => DeviceType::Network,
58         TRANSITIONAL_BLOCK => DeviceType::Block,
59         TRANSITIONAL_MEMORY_BALLOONING => DeviceType::MemoryBalloon,
60         TRANSITIONAL_CONSOLE => DeviceType::Console,
61         TRANSITIONAL_SCSI_HOST => DeviceType::ScsiHost,
62         TRANSITIONAL_ENTROPY_SOURCE => DeviceType::EntropySource,
63         TRANSITIONAL_9P_TRANSPORT => DeviceType::_9P,
64         id if id >= PCI_DEVICE_ID_OFFSET => DeviceType::from(id - PCI_DEVICE_ID_OFFSET),
65         _ => DeviceType::Invalid,
66     }
67 }
68 
69 /// Returns the type of VirtIO device to which the given PCI vendor and device ID corresponds, or
70 /// `None` if it is not a recognised VirtIO device.
virtio_device_type(device_function_info: &DeviceFunctionInfo) -> Option<DeviceType>71 pub fn virtio_device_type(device_function_info: &DeviceFunctionInfo) -> Option<DeviceType> {
72     if device_function_info.vendor_id == VIRTIO_VENDOR_ID {
73         let device_type = device_type(device_function_info.device_id);
74         if device_type != DeviceType::Invalid {
75             return Some(device_type);
76         }
77     }
78     None
79 }
80 
81 /// PCI transport for VirtIO.
82 ///
83 /// Ref: 4.1 Virtio Over PCI Bus
84 #[derive(Debug)]
85 pub struct PciTransport {
86     device_type: DeviceType,
87     /// The bus, device and function identifier for the VirtIO device.
88     device_function: DeviceFunction,
89     /// The common configuration structure within some BAR.
90     common_cfg: NonNull<CommonCfg>,
91     /// The start of the queue notification region within some BAR.
92     notify_region: NonNull<[WriteOnly<u16>]>,
93     notify_off_multiplier: u32,
94     /// The ISR status register within some BAR.
95     isr_status: NonNull<Volatile<u8>>,
96     /// The VirtIO device-specific configuration within some BAR.
97     config_space: Option<NonNull<[u32]>>,
98 }
99 
100 impl PciTransport {
101     /// Construct a new PCI VirtIO device driver for the given device function on the given PCI
102     /// root controller.
103     ///
104     /// The PCI device must already have had its BARs allocated.
new<H: Hal, C: ConfigurationAccess>( root: &mut PciRoot<C>, device_function: DeviceFunction, ) -> Result<Self, VirtioPciError>105     pub fn new<H: Hal, C: ConfigurationAccess>(
106         root: &mut PciRoot<C>,
107         device_function: DeviceFunction,
108     ) -> Result<Self, VirtioPciError> {
109         let device_vendor = root.configuration_access.read_word(device_function, 0);
110         let device_id = (device_vendor >> 16) as u16;
111         let vendor_id = device_vendor as u16;
112         if vendor_id != VIRTIO_VENDOR_ID {
113             return Err(VirtioPciError::InvalidVendorId(vendor_id));
114         }
115         let device_type = device_type(device_id);
116 
117         // Find the PCI capabilities we need.
118         let mut common_cfg = None;
119         let mut notify_cfg = None;
120         let mut notify_off_multiplier = 0;
121         let mut isr_cfg = None;
122         let mut device_cfg = None;
123         for capability in root.capabilities(device_function) {
124             if capability.id != PCI_CAP_ID_VNDR {
125                 continue;
126             }
127             let cap_len = capability.private_header as u8;
128             let cfg_type = (capability.private_header >> 8) as u8;
129             if cap_len < 16 {
130                 continue;
131             }
132             let struct_info = VirtioCapabilityInfo {
133                 bar: root
134                     .configuration_access
135                     .read_word(device_function, capability.offset + CAP_BAR_OFFSET)
136                     as u8,
137                 offset: root
138                     .configuration_access
139                     .read_word(device_function, capability.offset + CAP_BAR_OFFSET_OFFSET),
140                 length: root
141                     .configuration_access
142                     .read_word(device_function, capability.offset + CAP_LENGTH_OFFSET),
143             };
144 
145             match cfg_type {
146                 VIRTIO_PCI_CAP_COMMON_CFG if common_cfg.is_none() => {
147                     common_cfg = Some(struct_info);
148                 }
149                 VIRTIO_PCI_CAP_NOTIFY_CFG if cap_len >= 20 && notify_cfg.is_none() => {
150                     notify_cfg = Some(struct_info);
151                     notify_off_multiplier = root.configuration_access.read_word(
152                         device_function,
153                         capability.offset + CAP_NOTIFY_OFF_MULTIPLIER_OFFSET,
154                     );
155                 }
156                 VIRTIO_PCI_CAP_ISR_CFG if isr_cfg.is_none() => {
157                     isr_cfg = Some(struct_info);
158                 }
159                 VIRTIO_PCI_CAP_DEVICE_CFG if device_cfg.is_none() => {
160                     device_cfg = Some(struct_info);
161                 }
162                 _ => {}
163             }
164         }
165 
166         let common_cfg = get_bar_region::<H, _, _>(
167             root,
168             device_function,
169             &common_cfg.ok_or(VirtioPciError::MissingCommonConfig)?,
170         )?;
171 
172         let notify_cfg = notify_cfg.ok_or(VirtioPciError::MissingNotifyConfig)?;
173         if notify_off_multiplier % 2 != 0 {
174             return Err(VirtioPciError::InvalidNotifyOffMultiplier(
175                 notify_off_multiplier,
176             ));
177         }
178         let notify_region = get_bar_region_slice::<H, _, _>(root, device_function, &notify_cfg)?;
179 
180         let isr_status = get_bar_region::<H, _, _>(
181             root,
182             device_function,
183             &isr_cfg.ok_or(VirtioPciError::MissingIsrConfig)?,
184         )?;
185 
186         let config_space = if let Some(device_cfg) = device_cfg {
187             Some(get_bar_region_slice::<H, _, _>(
188                 root,
189                 device_function,
190                 &device_cfg,
191             )?)
192         } else {
193             None
194         };
195 
196         Ok(Self {
197             device_type,
198             device_function,
199             common_cfg,
200             notify_region,
201             notify_off_multiplier,
202             isr_status,
203             config_space,
204         })
205     }
206 }
207 
208 impl Transport for PciTransport {
device_type(&self) -> DeviceType209     fn device_type(&self) -> DeviceType {
210         self.device_type
211     }
212 
read_device_features(&mut self) -> u64213     fn read_device_features(&mut self) -> u64 {
214         // Safe because the common config pointer is valid and we checked in get_bar_region that it
215         // was aligned.
216         unsafe {
217             volwrite!(self.common_cfg, device_feature_select, 0);
218             let mut device_features_bits = volread!(self.common_cfg, device_feature) as u64;
219             volwrite!(self.common_cfg, device_feature_select, 1);
220             device_features_bits |= (volread!(self.common_cfg, device_feature) as u64) << 32;
221             device_features_bits
222         }
223     }
224 
write_driver_features(&mut self, driver_features: u64)225     fn write_driver_features(&mut self, driver_features: u64) {
226         // Safe because the common config pointer is valid and we checked in get_bar_region that it
227         // was aligned.
228         unsafe {
229             volwrite!(self.common_cfg, driver_feature_select, 0);
230             volwrite!(self.common_cfg, driver_feature, driver_features as u32);
231             volwrite!(self.common_cfg, driver_feature_select, 1);
232             volwrite!(
233                 self.common_cfg,
234                 driver_feature,
235                 (driver_features >> 32) as u32
236             );
237         }
238     }
239 
max_queue_size(&mut self, queue: u16) -> u32240     fn max_queue_size(&mut self, queue: u16) -> u32 {
241         // Safe because the common config pointer is valid and we checked in get_bar_region that it
242         // was aligned.
243         unsafe {
244             volwrite!(self.common_cfg, queue_select, queue);
245             volread!(self.common_cfg, queue_size).into()
246         }
247     }
248 
notify(&mut self, queue: u16)249     fn notify(&mut self, queue: u16) {
250         // Safe because the common config and notify region pointers are valid and we checked in
251         // get_bar_region that they were aligned.
252         unsafe {
253             volwrite!(self.common_cfg, queue_select, queue);
254             // TODO: Consider caching this somewhere (per queue).
255             let queue_notify_off = volread!(self.common_cfg, queue_notify_off);
256 
257             let offset_bytes = usize::from(queue_notify_off) * self.notify_off_multiplier as usize;
258             let index = offset_bytes / size_of::<u16>();
259             (&raw mut (*self.notify_region.as_ptr())[index]).vwrite(queue);
260         }
261     }
262 
get_status(&self) -> DeviceStatus263     fn get_status(&self) -> DeviceStatus {
264         // Safe because the common config pointer is valid and we checked in get_bar_region that it
265         // was aligned.
266         let status = unsafe { volread!(self.common_cfg, device_status) };
267         DeviceStatus::from_bits_truncate(status.into())
268     }
269 
set_status(&mut self, status: DeviceStatus)270     fn set_status(&mut self, status: DeviceStatus) {
271         // Safe because the common config pointer is valid and we checked in get_bar_region that it
272         // was aligned.
273         unsafe {
274             volwrite!(self.common_cfg, device_status, status.bits() as u8);
275         }
276     }
277 
set_guest_page_size(&mut self, _guest_page_size: u32)278     fn set_guest_page_size(&mut self, _guest_page_size: u32) {
279         // No-op, the PCI transport doesn't care.
280     }
281 
requires_legacy_layout(&self) -> bool282     fn requires_legacy_layout(&self) -> bool {
283         false
284     }
285 
queue_set( &mut self, queue: u16, size: u32, descriptors: PhysAddr, driver_area: PhysAddr, device_area: PhysAddr, )286     fn queue_set(
287         &mut self,
288         queue: u16,
289         size: u32,
290         descriptors: PhysAddr,
291         driver_area: PhysAddr,
292         device_area: PhysAddr,
293     ) {
294         // Safe because the common config pointer is valid and we checked in get_bar_region that it
295         // was aligned.
296         unsafe {
297             volwrite!(self.common_cfg, queue_select, queue);
298             volwrite!(self.common_cfg, queue_size, size as u16);
299             volwrite!(self.common_cfg, queue_desc, descriptors as u64);
300             volwrite!(self.common_cfg, queue_driver, driver_area as u64);
301             volwrite!(self.common_cfg, queue_device, device_area as u64);
302             volwrite!(self.common_cfg, queue_enable, 1);
303         }
304     }
305 
queue_unset(&mut self, _queue: u16)306     fn queue_unset(&mut self, _queue: u16) {
307         // The VirtIO spec doesn't allow queues to be unset once they have been set up for the PCI
308         // transport, so this is a no-op.
309     }
310 
queue_used(&mut self, queue: u16) -> bool311     fn queue_used(&mut self, queue: u16) -> bool {
312         // Safe because the common config pointer is valid and we checked in get_bar_region that it
313         // was aligned.
314         unsafe {
315             volwrite!(self.common_cfg, queue_select, queue);
316             volread!(self.common_cfg, queue_enable) == 1
317         }
318     }
319 
ack_interrupt(&mut self) -> bool320     fn ack_interrupt(&mut self) -> bool {
321         // Safe because the common config pointer is valid and we checked in get_bar_region that it
322         // was aligned.
323         // Reading the ISR status resets it to 0 and causes the device to de-assert the interrupt.
324         let isr_status = unsafe { self.isr_status.as_ptr().vread() };
325         // TODO: Distinguish between queue interrupt and device configuration interrupt.
326         isr_status & 0x3 != 0
327     }
328 
read_config_generation(&self) -> u32329     fn read_config_generation(&self) -> u32 {
330         // SAFETY: self.header points to a valid VirtIO MMIO region.
331         unsafe { volread!(self.common_cfg, config_generation) }.into()
332     }
333 
read_config_space<T: FromBytes>(&self, offset: usize) -> Result<T, Error>334     fn read_config_space<T: FromBytes>(&self, offset: usize) -> Result<T, Error> {
335         assert!(align_of::<T>() <= 4,
336             "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
337             align_of::<T>());
338         assert_eq!(offset % align_of::<T>(), 0);
339 
340         let config_space = self.config_space.ok_or(Error::ConfigSpaceMissing)?;
341         if config_space.len() * size_of::<u32>() < offset + size_of::<T>() {
342             Err(Error::ConfigSpaceTooSmall)
343         } else {
344             // SAFETY: If we have a config space pointer it must be valid for its length, and we just
345             // checked that the offset and size of the access was within the length.
346             unsafe {
347                 Ok((config_space.as_ptr().cast::<T>())
348                     .byte_add(offset)
349                     .read_volatile())
350             }
351         }
352     }
353 
write_config_space<T: IntoBytes + Immutable>( &mut self, offset: usize, value: T, ) -> Result<(), Error>354     fn write_config_space<T: IntoBytes + Immutable>(
355         &mut self,
356         offset: usize,
357         value: T,
358     ) -> Result<(), Error> {
359         assert!(align_of::<T>() <= 4,
360             "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
361             align_of::<T>());
362         assert_eq!(offset % align_of::<T>(), 0);
363 
364         let config_space = self.config_space.ok_or(Error::ConfigSpaceMissing)?;
365         if config_space.len() * size_of::<u32>() < offset + size_of::<T>() {
366             Err(Error::ConfigSpaceTooSmall)
367         } else {
368             // SAFETY: If we have a config space pointer it must be valid for its length, and we just
369             // checked that the offset and size of the access was within the length.
370             unsafe {
371                 (config_space.as_ptr().cast::<T>())
372                     .byte_add(offset)
373                     .write_volatile(value);
374             }
375             Ok(())
376         }
377     }
378 }
379 
380 // SAFETY: MMIO can be done from any thread or CPU core.
381 unsafe impl Send for PciTransport {}
382 
383 // SAFETY: `&PciTransport` only allows MMIO reads or getting the config space, both of which are
384 // fine to happen concurrently on different CPU cores.
385 unsafe impl Sync for PciTransport {}
386 
387 impl Drop for PciTransport {
drop(&mut self)388     fn drop(&mut self) {
389         // Reset the device when the transport is dropped.
390         self.set_status(DeviceStatus::empty());
391         while self.get_status() != DeviceStatus::empty() {}
392     }
393 }
394 
395 /// `virtio_pci_common_cfg`, see 4.1.4.3 "Common configuration structure layout".
396 #[repr(C)]
397 pub(crate) struct CommonCfg {
398     pub device_feature_select: Volatile<u32>,
399     pub device_feature: ReadOnly<u32>,
400     pub driver_feature_select: Volatile<u32>,
401     pub driver_feature: Volatile<u32>,
402     pub msix_config: Volatile<u16>,
403     pub num_queues: ReadOnly<u16>,
404     pub device_status: Volatile<u8>,
405     pub config_generation: ReadOnly<u8>,
406     pub queue_select: Volatile<u16>,
407     pub queue_size: Volatile<u16>,
408     pub queue_msix_vector: Volatile<u16>,
409     pub queue_enable: Volatile<u16>,
410     pub queue_notify_off: Volatile<u16>,
411     pub queue_desc: Volatile<u64>,
412     pub queue_driver: Volatile<u64>,
413     pub queue_device: Volatile<u64>,
414 }
415 
416 /// Information about a VirtIO structure within some BAR, as provided by a `virtio_pci_cap`.
417 #[derive(Clone, Debug, Eq, PartialEq)]
418 pub(crate) struct VirtioCapabilityInfo {
419     /// The bar in which the structure can be found.
420     pub bar: u8,
421     /// The offset within the bar.
422     pub offset: u32,
423     /// The length in bytes of the structure within the bar.
424     pub length: u32,
425 }
426 
get_bar_region<H: Hal, T, C: ConfigurationAccess>( root: &mut PciRoot<C>, device_function: DeviceFunction, struct_info: &VirtioCapabilityInfo, ) -> Result<NonNull<T>, VirtioPciError>427 fn get_bar_region<H: Hal, T, C: ConfigurationAccess>(
428     root: &mut PciRoot<C>,
429     device_function: DeviceFunction,
430     struct_info: &VirtioCapabilityInfo,
431 ) -> Result<NonNull<T>, VirtioPciError> {
432     let bar_info = root.bar_info(device_function, struct_info.bar)?;
433     let (bar_address, bar_size) = bar_info
434         .memory_address_size()
435         .ok_or(VirtioPciError::UnexpectedIoBar)?;
436     if bar_address == 0 {
437         return Err(VirtioPciError::BarNotAllocated(struct_info.bar));
438     }
439     if struct_info.offset + struct_info.length > bar_size
440         || size_of::<T>() > struct_info.length as usize
441     {
442         return Err(VirtioPciError::BarOffsetOutOfRange);
443     }
444     let paddr = bar_address as PhysAddr + struct_info.offset as PhysAddr;
445     // Safe because the paddr and size describe a valid MMIO region, at least according to the PCI
446     // bus.
447     let vaddr = unsafe { H::mmio_phys_to_virt(paddr, struct_info.length as usize) };
448     if vaddr.as_ptr() as usize % align_of::<T>() != 0 {
449         return Err(VirtioPciError::Misaligned {
450             address: vaddr.as_ptr() as usize,
451             alignment: align_of::<T>(),
452         });
453     }
454     Ok(vaddr.cast())
455 }
456 
get_bar_region_slice<H: Hal, T, C: ConfigurationAccess>( root: &mut PciRoot<C>, device_function: DeviceFunction, struct_info: &VirtioCapabilityInfo, ) -> Result<NonNull<[T]>, VirtioPciError>457 fn get_bar_region_slice<H: Hal, T, C: ConfigurationAccess>(
458     root: &mut PciRoot<C>,
459     device_function: DeviceFunction,
460     struct_info: &VirtioCapabilityInfo,
461 ) -> Result<NonNull<[T]>, VirtioPciError> {
462     let ptr = get_bar_region::<H, T, C>(root, device_function, struct_info)?;
463     Ok(nonnull_slice_from_raw_parts(
464         ptr,
465         struct_info.length as usize / size_of::<T>(),
466     ))
467 }
468 
469 /// An error encountered initialising a VirtIO PCI transport.
470 #[derive(Clone, Debug, Eq, Error, PartialEq)]
471 pub enum VirtioPciError {
472     /// PCI device vender ID was not the VirtIO vendor ID.
473     #[error("PCI device vender ID {0:#06x} was not the VirtIO vendor ID {VIRTIO_VENDOR_ID:#06x}.")]
474     InvalidVendorId(u16),
475     /// No valid `VIRTIO_PCI_CAP_COMMON_CFG` capability was found.
476     #[error("No valid `VIRTIO_PCI_CAP_COMMON_CFG` capability was found.")]
477     MissingCommonConfig,
478     /// No valid `VIRTIO_PCI_CAP_NOTIFY_CFG` capability was found.
479     #[error("No valid `VIRTIO_PCI_CAP_NOTIFY_CFG` capability was found.")]
480     MissingNotifyConfig,
481     /// `VIRTIO_PCI_CAP_NOTIFY_CFG` capability has a `notify_off_multiplier` that is not a multiple
482     /// of 2.
483     #[error("`VIRTIO_PCI_CAP_NOTIFY_CFG` capability has a `notify_off_multiplier` that is not a multiple of 2: {0}")]
484     InvalidNotifyOffMultiplier(u32),
485     /// No valid `VIRTIO_PCI_CAP_ISR_CFG` capability was found.
486     #[error("No valid `VIRTIO_PCI_CAP_ISR_CFG` capability was found.")]
487     MissingIsrConfig,
488     /// An IO BAR was provided rather than a memory BAR.
489     #[error("Unexpected IO BAR (expected memory BAR).")]
490     UnexpectedIoBar,
491     /// A BAR which we need was not allocated an address.
492     #[error("Bar {0} not allocated.")]
493     BarNotAllocated(u8),
494     /// The offset for some capability was greater than the length of the BAR.
495     #[error("Capability offset greater than BAR length.")]
496     BarOffsetOutOfRange,
497     /// The address was not aligned as expected.
498     #[error("Address {address:#018} was not aligned to a {alignment} byte boundary as expected.")]
499     Misaligned {
500         /// The address in question.
501         address: usize,
502         /// The expected alignment in bytes.
503         alignment: usize,
504     },
505     /// A generic PCI error,
506     #[error(transparent)]
507     Pci(PciError),
508 }
509 
510 impl From<PciError> for VirtioPciError {
from(error: PciError) -> Self511     fn from(error: PciError) -> Self {
512         Self::Pci(error)
513     }
514 }
515 
516 // SAFETY: The `vaddr` field of `VirtioPciError::Misaligned` is only used for debug output.
517 unsafe impl Send for VirtioPciError {}
518 
519 // SAFETY: The `vaddr` field of `VirtioPciError::Misaligned` is only used for debug output.
520 unsafe impl Sync for VirtioPciError {}
521 
522 #[cfg(test)]
523 mod tests {
524     use super::*;
525 
526     #[test]
transitional_device_ids()527     fn transitional_device_ids() {
528         assert_eq!(device_type(0x1000), DeviceType::Network);
529         assert_eq!(device_type(0x1002), DeviceType::MemoryBalloon);
530         assert_eq!(device_type(0x1009), DeviceType::_9P);
531     }
532 
533     #[test]
offset_device_ids()534     fn offset_device_ids() {
535         assert_eq!(device_type(0x1040), DeviceType::Invalid);
536         assert_eq!(device_type(0x1045), DeviceType::MemoryBalloon);
537         assert_eq!(device_type(0x1049), DeviceType::_9P);
538         assert_eq!(device_type(0x1058), DeviceType::Memory);
539         assert_eq!(device_type(0x1059), DeviceType::Sound);
540         assert_eq!(device_type(0x1060), DeviceType::Invalid);
541     }
542 
543     #[test]
virtio_device_type_valid()544     fn virtio_device_type_valid() {
545         assert_eq!(
546             virtio_device_type(&DeviceFunctionInfo {
547                 vendor_id: VIRTIO_VENDOR_ID,
548                 device_id: TRANSITIONAL_BLOCK,
549                 class: 0,
550                 subclass: 0,
551                 prog_if: 0,
552                 revision: 0,
553                 header_type: bus::HeaderType::Standard,
554             }),
555             Some(DeviceType::Block)
556         );
557     }
558 
559     #[test]
virtio_device_type_invalid()560     fn virtio_device_type_invalid() {
561         // Non-VirtIO vendor ID.
562         assert_eq!(
563             virtio_device_type(&DeviceFunctionInfo {
564                 vendor_id: 0x1234,
565                 device_id: TRANSITIONAL_BLOCK,
566                 class: 0,
567                 subclass: 0,
568                 prog_if: 0,
569                 revision: 0,
570                 header_type: bus::HeaderType::Standard,
571             }),
572             None
573         );
574 
575         // Invalid device ID.
576         assert_eq!(
577             virtio_device_type(&DeviceFunctionInfo {
578                 vendor_id: VIRTIO_VENDOR_ID,
579                 device_id: 0x1040,
580                 class: 0,
581                 subclass: 0,
582                 prog_if: 0,
583                 revision: 0,
584                 header_type: bus::HeaderType::Standard,
585             }),
586             None
587         );
588     }
589 }
590