• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! MMIO transport for VirtIO.
2 
3 use super::{DeviceStatus, DeviceType, Transport};
4 use crate::{
5     align_up,
6     queue::Descriptor,
7     volatile::{volread, volwrite, ReadOnly, Volatile, WriteOnly},
8     Error, PhysAddr, PAGE_SIZE,
9 };
10 use core::{
11     convert::{TryFrom, TryInto},
12     mem::{align_of, size_of},
13     ptr::NonNull,
14 };
15 use zerocopy::{FromBytes, Immutable, IntoBytes};
16 
17 const MAGIC_VALUE: u32 = 0x7472_6976;
18 pub(crate) const LEGACY_VERSION: u32 = 1;
19 pub(crate) const MODERN_VERSION: u32 = 2;
20 const CONFIG_SPACE_OFFSET: usize = 0x100;
21 
22 /// The version of the VirtIO MMIO transport supported by a device.
23 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
24 #[repr(u32)]
25 pub enum MmioVersion {
26     /// Legacy MMIO transport with page-based addressing.
27     Legacy = LEGACY_VERSION,
28     /// Modern MMIO transport.
29     Modern = MODERN_VERSION,
30 }
31 
32 impl TryFrom<u32> for MmioVersion {
33     type Error = MmioError;
34 
try_from(version: u32) -> Result<Self, Self::Error>35     fn try_from(version: u32) -> Result<Self, Self::Error> {
36         match version {
37             LEGACY_VERSION => Ok(Self::Legacy),
38             MODERN_VERSION => Ok(Self::Modern),
39             _ => Err(MmioError::UnsupportedVersion(version)),
40         }
41     }
42 }
43 
44 impl From<MmioVersion> for u32 {
from(version: MmioVersion) -> Self45     fn from(version: MmioVersion) -> Self {
46         match version {
47             MmioVersion::Legacy => LEGACY_VERSION,
48             MmioVersion::Modern => MODERN_VERSION,
49         }
50     }
51 }
52 
53 /// An error encountered initialising a VirtIO MMIO transport.
54 #[derive(Clone, Debug, Eq, Error, PartialEq)]
55 pub enum MmioError {
56     /// The header doesn't start with the expected magic value 0x74726976.
57     #[error("Invalid magic value {0:#010x} (expected 0x74726976)")]
58     BadMagic(u32),
59     /// The header reports a version number that is neither 1 (legacy) nor 2 (modern).
60     #[error("Unsupported Virtio MMIO version {0}")]
61     UnsupportedVersion(u32),
62     /// The header reports a device ID of 0.
63     #[error("Device ID was zero")]
64     ZeroDeviceId,
65     /// The MMIO region size was smaller than the header size we expect.
66     #[error("MMIO region too small")]
67     MmioRegionTooSmall,
68 }
69 
70 /// MMIO Device Register Interface, both legacy and modern.
71 ///
72 /// Ref: 4.2.2 MMIO Device Register Layout and 4.2.4 Legacy interface
73 #[repr(C)]
74 pub struct VirtIOHeader {
75     /// Magic value
76     magic: ReadOnly<u32>,
77 
78     /// Device version number
79     ///
80     /// Legacy device returns value 0x1.
81     version: ReadOnly<u32>,
82 
83     /// Virtio Subsystem Device ID
84     device_id: ReadOnly<u32>,
85 
86     /// Virtio Subsystem Vendor ID
87     vendor_id: ReadOnly<u32>,
88 
89     /// Flags representing features the device supports
90     device_features: ReadOnly<u32>,
91 
92     /// Device (host) features word selection
93     device_features_sel: WriteOnly<u32>,
94 
95     /// Reserved
96     __r1: [ReadOnly<u32>; 2],
97 
98     /// Flags representing device features understood and activated by the driver
99     driver_features: WriteOnly<u32>,
100 
101     /// Activated (guest) features word selection
102     driver_features_sel: WriteOnly<u32>,
103 
104     /// Guest page size
105     ///
106     /// The driver writes the guest page size in bytes to the register during
107     /// initialization, before any queues are used. This value should be a
108     /// power of 2 and is used by the device to calculate the Guest address
109     /// of the first queue page (see QueuePFN).
110     legacy_guest_page_size: WriteOnly<u32>,
111 
112     /// Reserved
113     __r2: ReadOnly<u32>,
114 
115     /// Virtual queue index
116     ///
117     /// Writing to this register selects the virtual queue that the following
118     /// operations on the QueueNumMax, QueueNum, QueueAlign and QueuePFN
119     /// registers apply to. The index number of the first queue is zero (0x0).
120     queue_sel: WriteOnly<u32>,
121 
122     /// Maximum virtual queue size
123     ///
124     /// Reading from the register returns the maximum size of the queue the
125     /// device is ready to process or zero (0x0) if the queue is not available.
126     /// This applies to the queue selected by writing to QueueSel and is
127     /// allowed only when QueuePFN is set to zero (0x0), so when the queue is
128     /// not actively used.
129     queue_num_max: ReadOnly<u32>,
130 
131     /// Virtual queue size
132     ///
133     /// Queue size is the number of elements in the queue. Writing to this
134     /// register notifies the device what size of the queue the driver will use.
135     /// This applies to the queue selected by writing to QueueSel.
136     queue_num: WriteOnly<u32>,
137 
138     /// Used Ring alignment in the virtual queue
139     ///
140     /// Writing to this register notifies the device about alignment boundary
141     /// of the Used Ring in bytes. This value should be a power of 2 and
142     /// applies to the queue selected by writing to QueueSel.
143     legacy_queue_align: WriteOnly<u32>,
144 
145     /// Guest physical page number of the virtual queue
146     ///
147     /// Writing to this register notifies the device about location of the
148     /// virtual queue in the Guest’s physical address space. This value is
149     /// the index number of a page starting with the queue Descriptor Table.
150     /// Value zero (0x0) means physical address zero (0x00000000) and is illegal.
151     /// When the driver stops using the queue it writes zero (0x0) to this
152     /// register. Reading from this register returns the currently used page
153     /// number of the queue, therefore a value other than zero (0x0) means that
154     /// the queue is in use. Both read and write accesses apply to the queue
155     /// selected by writing to QueueSel.
156     legacy_queue_pfn: Volatile<u32>,
157 
158     /// new interface only
159     queue_ready: Volatile<u32>,
160 
161     /// Reserved
162     __r3: [ReadOnly<u32>; 2],
163 
164     /// Queue notifier
165     queue_notify: WriteOnly<u32>,
166 
167     /// Reserved
168     __r4: [ReadOnly<u32>; 3],
169 
170     /// Interrupt status
171     interrupt_status: ReadOnly<u32>,
172 
173     /// Interrupt acknowledge
174     interrupt_ack: WriteOnly<u32>,
175 
176     /// Reserved
177     __r5: [ReadOnly<u32>; 2],
178 
179     /// Device status
180     ///
181     /// Reading from this register returns the current device status flags.
182     /// Writing non-zero values to this register sets the status flags,
183     /// indicating the OS/driver progress. Writing zero (0x0) to this register
184     /// triggers a device reset. The device sets QueuePFN to zero (0x0) for
185     /// all queues in the device. Also see 3.1 Device Initialization.
186     status: Volatile<DeviceStatus>,
187 
188     /// Reserved
189     __r6: [ReadOnly<u32>; 3],
190 
191     // new interface only since here
192     queue_desc_low: WriteOnly<u32>,
193     queue_desc_high: WriteOnly<u32>,
194 
195     /// Reserved
196     __r7: [ReadOnly<u32>; 2],
197 
198     queue_driver_low: WriteOnly<u32>,
199     queue_driver_high: WriteOnly<u32>,
200 
201     /// Reserved
202     __r8: [ReadOnly<u32>; 2],
203 
204     queue_device_low: WriteOnly<u32>,
205     queue_device_high: WriteOnly<u32>,
206 
207     /// Reserved
208     __r9: [ReadOnly<u32>; 21],
209 
210     config_generation: ReadOnly<u32>,
211 }
212 
213 impl VirtIOHeader {
214     /// Constructs a fake VirtIO header for use in unit tests.
215     #[cfg(test)]
make_fake_header( version: u32, device_id: u32, vendor_id: u32, device_features: u32, queue_num_max: u32, ) -> Self216     pub fn make_fake_header(
217         version: u32,
218         device_id: u32,
219         vendor_id: u32,
220         device_features: u32,
221         queue_num_max: u32,
222     ) -> Self {
223         Self {
224             magic: ReadOnly::new(MAGIC_VALUE),
225             version: ReadOnly::new(version),
226             device_id: ReadOnly::new(device_id),
227             vendor_id: ReadOnly::new(vendor_id),
228             device_features: ReadOnly::new(device_features),
229             device_features_sel: WriteOnly::default(),
230             __r1: Default::default(),
231             driver_features: Default::default(),
232             driver_features_sel: Default::default(),
233             legacy_guest_page_size: Default::default(),
234             __r2: Default::default(),
235             queue_sel: Default::default(),
236             queue_num_max: ReadOnly::new(queue_num_max),
237             queue_num: Default::default(),
238             legacy_queue_align: Default::default(),
239             legacy_queue_pfn: Default::default(),
240             queue_ready: Default::default(),
241             __r3: Default::default(),
242             queue_notify: Default::default(),
243             __r4: Default::default(),
244             interrupt_status: Default::default(),
245             interrupt_ack: Default::default(),
246             __r5: Default::default(),
247             status: Volatile::new(DeviceStatus::empty()),
248             __r6: Default::default(),
249             queue_desc_low: Default::default(),
250             queue_desc_high: Default::default(),
251             __r7: Default::default(),
252             queue_driver_low: Default::default(),
253             queue_driver_high: Default::default(),
254             __r8: Default::default(),
255             queue_device_low: Default::default(),
256             queue_device_high: Default::default(),
257             __r9: Default::default(),
258             config_generation: Default::default(),
259         }
260     }
261 }
262 
263 /// MMIO Device Register Interface.
264 ///
265 /// Ref: 4.2.2 MMIO Device Register Layout and 4.2.4 Legacy interface
266 #[derive(Debug)]
267 pub struct MmioTransport {
268     header: NonNull<VirtIOHeader>,
269     version: MmioVersion,
270     /// The size in bytes of the config space.
271     config_space_size: usize,
272 }
273 
274 impl MmioTransport {
275     /// Constructs a new VirtIO MMIO transport, or returns an error if the header reports an
276     /// unsupported version.
277     ///
278     /// # Safety
279     /// `header` must point to a properly aligned valid VirtIO MMIO region, which must remain valid
280     /// for the lifetime of the transport that is returned.
new(header: NonNull<VirtIOHeader>, mmio_size: usize) -> Result<Self, MmioError>281     pub unsafe fn new(header: NonNull<VirtIOHeader>, mmio_size: usize) -> Result<Self, MmioError> {
282         let magic = volread!(header, magic);
283         if magic != MAGIC_VALUE {
284             return Err(MmioError::BadMagic(magic));
285         }
286         if volread!(header, device_id) == 0 {
287             return Err(MmioError::ZeroDeviceId);
288         }
289         let Some(config_space_size) = mmio_size.checked_sub(CONFIG_SPACE_OFFSET) else {
290             return Err(MmioError::MmioRegionTooSmall);
291         };
292         let version = volread!(header, version).try_into()?;
293         Ok(Self {
294             header,
295             version,
296             config_space_size,
297         })
298     }
299 
300     /// Gets the version of the VirtIO MMIO transport.
version(&self) -> MmioVersion301     pub fn version(&self) -> MmioVersion {
302         self.version
303     }
304 
305     /// Gets the vendor ID.
vendor_id(&self) -> u32306     pub fn vendor_id(&self) -> u32 {
307         // Safe because self.header points to a valid VirtIO MMIO region.
308         unsafe { volread!(self.header, vendor_id) }
309     }
310 }
311 
312 // SAFETY: `header` is only used for MMIO, which can happen from any thread or CPU core.
313 unsafe impl Send for MmioTransport {}
314 
315 // SAFETY: `&MmioTransport` only allows MMIO reads or getting the config space, both of which are
316 // fine to happen concurrently on different CPU cores.
317 unsafe impl Sync for MmioTransport {}
318 
319 impl Transport for MmioTransport {
device_type(&self) -> DeviceType320     fn device_type(&self) -> DeviceType {
321         // Safe because self.header points to a valid VirtIO MMIO region.
322         let device_id = unsafe { volread!(self.header, device_id) };
323         device_id.into()
324     }
325 
read_device_features(&mut self) -> u64326     fn read_device_features(&mut self) -> u64 {
327         // Safe because self.header points to a valid VirtIO MMIO region.
328         unsafe {
329             volwrite!(self.header, device_features_sel, 0); // device features [0, 32)
330             let mut device_features_bits = volread!(self.header, device_features).into();
331             volwrite!(self.header, device_features_sel, 1); // device features [32, 64)
332             device_features_bits += (volread!(self.header, device_features) as u64) << 32;
333             device_features_bits
334         }
335     }
336 
write_driver_features(&mut self, driver_features: u64)337     fn write_driver_features(&mut self, driver_features: u64) {
338         // Safe because self.header points to a valid VirtIO MMIO region.
339         unsafe {
340             volwrite!(self.header, driver_features_sel, 0); // driver features [0, 32)
341             volwrite!(self.header, driver_features, driver_features as u32);
342             volwrite!(self.header, driver_features_sel, 1); // driver features [32, 64)
343             volwrite!(self.header, driver_features, (driver_features >> 32) as u32);
344         }
345     }
346 
max_queue_size(&mut self, queue: u16) -> u32347     fn max_queue_size(&mut self, queue: u16) -> u32 {
348         // Safe because self.header points to a valid VirtIO MMIO region.
349         unsafe {
350             volwrite!(self.header, queue_sel, queue.into());
351             volread!(self.header, queue_num_max)
352         }
353     }
354 
notify(&mut self, queue: u16)355     fn notify(&mut self, queue: u16) {
356         // Safe because self.header points to a valid VirtIO MMIO region.
357         unsafe {
358             volwrite!(self.header, queue_notify, queue.into());
359         }
360     }
361 
get_status(&self) -> DeviceStatus362     fn get_status(&self) -> DeviceStatus {
363         // Safe because self.header points to a valid VirtIO MMIO region.
364         unsafe { volread!(self.header, status) }
365     }
366 
set_status(&mut self, status: DeviceStatus)367     fn set_status(&mut self, status: DeviceStatus) {
368         // Safe because self.header points to a valid VirtIO MMIO region.
369         unsafe {
370             volwrite!(self.header, status, status);
371         }
372     }
373 
set_guest_page_size(&mut self, guest_page_size: u32)374     fn set_guest_page_size(&mut self, guest_page_size: u32) {
375         match self.version {
376             MmioVersion::Legacy => {
377                 // Safe because self.header points to a valid VirtIO MMIO region.
378                 unsafe {
379                     volwrite!(self.header, legacy_guest_page_size, guest_page_size);
380                 }
381             }
382             MmioVersion::Modern => {
383                 // No-op, modern devices don't care.
384             }
385         }
386     }
387 
requires_legacy_layout(&self) -> bool388     fn requires_legacy_layout(&self) -> bool {
389         match self.version {
390             MmioVersion::Legacy => true,
391             MmioVersion::Modern => false,
392         }
393     }
394 
queue_set( &mut self, queue: u16, size: u32, descriptors: PhysAddr, driver_area: PhysAddr, device_area: PhysAddr, )395     fn queue_set(
396         &mut self,
397         queue: u16,
398         size: u32,
399         descriptors: PhysAddr,
400         driver_area: PhysAddr,
401         device_area: PhysAddr,
402     ) {
403         match self.version {
404             MmioVersion::Legacy => {
405                 assert_eq!(
406                     driver_area - descriptors,
407                     size_of::<Descriptor>() * size as usize
408                 );
409                 assert_eq!(
410                     device_area - descriptors,
411                     align_up(
412                         size_of::<Descriptor>() * size as usize
413                             + size_of::<u16>() * (size as usize + 3)
414                     )
415                 );
416                 let align = PAGE_SIZE as u32;
417                 let pfn = (descriptors / PAGE_SIZE) as u32;
418                 assert_eq!(pfn as usize * PAGE_SIZE, descriptors);
419                 // Safe because self.header points to a valid VirtIO MMIO region.
420                 unsafe {
421                     volwrite!(self.header, queue_sel, queue.into());
422                     volwrite!(self.header, queue_num, size);
423                     volwrite!(self.header, legacy_queue_align, align);
424                     volwrite!(self.header, legacy_queue_pfn, pfn);
425                 }
426             }
427             MmioVersion::Modern => {
428                 // Safe because self.header points to a valid VirtIO MMIO region.
429                 unsafe {
430                     volwrite!(self.header, queue_sel, queue.into());
431                     volwrite!(self.header, queue_num, size);
432                     volwrite!(self.header, queue_desc_low, descriptors as u32);
433                     volwrite!(self.header, queue_desc_high, (descriptors >> 32) as u32);
434                     volwrite!(self.header, queue_driver_low, driver_area as u32);
435                     volwrite!(self.header, queue_driver_high, (driver_area >> 32) as u32);
436                     volwrite!(self.header, queue_device_low, device_area as u32);
437                     volwrite!(self.header, queue_device_high, (device_area >> 32) as u32);
438                     volwrite!(self.header, queue_ready, 1);
439                 }
440             }
441         }
442     }
443 
queue_unset(&mut self, queue: u16)444     fn queue_unset(&mut self, queue: u16) {
445         match self.version {
446             MmioVersion::Legacy => {
447                 // Safe because self.header points to a valid VirtIO MMIO region.
448                 unsafe {
449                     volwrite!(self.header, queue_sel, queue.into());
450                     volwrite!(self.header, queue_num, 0);
451                     volwrite!(self.header, legacy_queue_align, 0);
452                     volwrite!(self.header, legacy_queue_pfn, 0);
453                 }
454             }
455             MmioVersion::Modern => {
456                 // Safe because self.header points to a valid VirtIO MMIO region.
457                 unsafe {
458                     volwrite!(self.header, queue_sel, queue.into());
459 
460                     volwrite!(self.header, queue_ready, 0);
461                     // Wait until we read the same value back, to ensure synchronisation (see 4.2.2.2).
462                     while volread!(self.header, queue_ready) != 0 {}
463 
464                     volwrite!(self.header, queue_num, 0);
465                     volwrite!(self.header, queue_desc_low, 0);
466                     volwrite!(self.header, queue_desc_high, 0);
467                     volwrite!(self.header, queue_driver_low, 0);
468                     volwrite!(self.header, queue_driver_high, 0);
469                     volwrite!(self.header, queue_device_low, 0);
470                     volwrite!(self.header, queue_device_high, 0);
471                 }
472             }
473         }
474     }
475 
queue_used(&mut self, queue: u16) -> bool476     fn queue_used(&mut self, queue: u16) -> bool {
477         // Safe because self.header points to a valid VirtIO MMIO region.
478         unsafe {
479             volwrite!(self.header, queue_sel, queue.into());
480             match self.version {
481                 MmioVersion::Legacy => volread!(self.header, legacy_queue_pfn) != 0,
482                 MmioVersion::Modern => volread!(self.header, queue_ready) != 0,
483             }
484         }
485     }
486 
ack_interrupt(&mut self) -> bool487     fn ack_interrupt(&mut self) -> bool {
488         // Safe because self.header points to a valid VirtIO MMIO region.
489         unsafe {
490             let interrupt = volread!(self.header, interrupt_status);
491             if interrupt != 0 {
492                 volwrite!(self.header, interrupt_ack, interrupt);
493                 true
494             } else {
495                 false
496             }
497         }
498     }
499 
read_config_generation(&self) -> u32500     fn read_config_generation(&self) -> u32 {
501         // SAFETY: self.header points to a valid VirtIO MMIO region.
502         unsafe { volread!(self.header, config_generation) }
503     }
504 
read_config_space<T: FromBytes>(&self, offset: usize) -> Result<T, Error>505     fn read_config_space<T: FromBytes>(&self, offset: usize) -> Result<T, Error> {
506         assert!(align_of::<T>() <= 4,
507             "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
508             align_of::<T>());
509         assert!(offset % align_of::<T>() == 0);
510 
511         if self.config_space_size < offset + size_of::<T>() {
512             Err(Error::ConfigSpaceTooSmall)
513         } else {
514             // SAFETY: The caller of `MmioTransport::new` guaranteed that the header pointer was valid,
515             // which includes the config space.
516             unsafe {
517                 Ok(self
518                     .header
519                     .cast::<T>()
520                     .byte_add(CONFIG_SPACE_OFFSET)
521                     .byte_add(offset)
522                     .read_volatile())
523             }
524         }
525     }
526 
write_config_space<T: IntoBytes + Immutable>( &mut self, offset: usize, value: T, ) -> Result<(), Error>527     fn write_config_space<T: IntoBytes + Immutable>(
528         &mut self,
529         offset: usize,
530         value: T,
531     ) -> Result<(), Error> {
532         assert!(align_of::<T>() <= 4,
533             "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
534             align_of::<T>());
535         assert!(offset % align_of::<T>() == 0);
536 
537         if self.config_space_size < offset + size_of::<T>() {
538             Err(Error::ConfigSpaceTooSmall)
539         } else {
540             // SAFETY: The caller of `MmioTransport::new` guaranteed that the header pointer was valid,
541             // which includes the config space.
542             unsafe {
543                 self.header
544                     .cast::<T>()
545                     .byte_add(CONFIG_SPACE_OFFSET)
546                     .byte_add(offset)
547                     .write_volatile(value);
548             }
549             Ok(())
550         }
551     }
552 }
553 
554 impl Drop for MmioTransport {
drop(&mut self)555     fn drop(&mut self) {
556         // Reset the device when the transport is dropped.
557         self.set_status(DeviceStatus::empty())
558     }
559 }
560