• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #![deny(unsafe_op_in_unsafe_fn)]
2 
3 use crate::hal::{BufferDirection, Dma, Hal, PhysAddr};
4 use crate::transport::Transport;
5 use crate::{align_up, nonnull_slice_from_raw_parts, pages, Error, Result, PAGE_SIZE};
6 use bitflags::bitflags;
7 #[cfg(test)]
8 use core::cmp::min;
9 use core::hint::spin_loop;
10 use core::mem::{size_of, take};
11 #[cfg(test)]
12 use core::ptr;
13 use core::ptr::NonNull;
14 use core::sync::atomic::{fence, Ordering};
15 use zerocopy::FromBytes;
16 
17 /// The mechanism for bulk data transport on virtio devices.
18 ///
19 /// Each device can have zero or more virtqueues.
20 ///
21 /// * `SIZE`: The size of the queue. This is both the number of descriptors, and the number of slots
22 ///   in the available and used rings.
23 #[derive(Debug)]
24 pub struct VirtQueue<H: Hal, const SIZE: usize> {
25     /// DMA guard
26     layout: VirtQueueLayout<H>,
27     /// Descriptor table
28     ///
29     /// The device may be able to modify this, even though it's not supposed to, so we shouldn't
30     /// trust values read back from it. Use `desc_shadow` instead to keep track of what we wrote to
31     /// it.
32     desc: NonNull<[Descriptor]>,
33     /// Available ring
34     ///
35     /// The device may be able to modify this, even though it's not supposed to, so we shouldn't
36     /// trust values read back from it. The only field we need to read currently is `idx`, so we
37     /// have `avail_idx` below to use instead.
38     avail: NonNull<AvailRing<SIZE>>,
39     /// Used ring
40     used: NonNull<UsedRing<SIZE>>,
41 
42     /// The index of queue
43     queue_idx: u16,
44     /// The number of descriptors currently in use.
45     num_used: u16,
46     /// The head desc index of the free list.
47     free_head: u16,
48     /// Our trusted copy of `desc` that the device can't access.
49     desc_shadow: [Descriptor; SIZE],
50     /// Our trusted copy of `avail.idx`.
51     avail_idx: u16,
52     last_used_idx: u16,
53 }
54 
55 impl<H: Hal, const SIZE: usize> VirtQueue<H, SIZE> {
56     /// Create a new VirtQueue.
new<T: Transport>(transport: &mut T, idx: u16) -> Result<Self>57     pub fn new<T: Transport>(transport: &mut T, idx: u16) -> Result<Self> {
58         if transport.queue_used(idx) {
59             return Err(Error::AlreadyUsed);
60         }
61         if !SIZE.is_power_of_two()
62             || SIZE > u16::MAX.into()
63             || transport.max_queue_size() < SIZE as u32
64         {
65             return Err(Error::InvalidParam);
66         }
67         let size = SIZE as u16;
68 
69         let layout = if transport.requires_legacy_layout() {
70             VirtQueueLayout::allocate_legacy(size)?
71         } else {
72             VirtQueueLayout::allocate_flexible(size)?
73         };
74 
75         transport.queue_set(
76             idx,
77             size.into(),
78             layout.descriptors_paddr(),
79             layout.driver_area_paddr(),
80             layout.device_area_paddr(),
81         );
82 
83         let desc =
84             nonnull_slice_from_raw_parts(layout.descriptors_vaddr().cast::<Descriptor>(), SIZE);
85         let avail = layout.avail_vaddr().cast();
86         let used = layout.used_vaddr().cast();
87 
88         let mut desc_shadow: [Descriptor; SIZE] = FromBytes::new_zeroed();
89         // Link descriptors together.
90         for i in 0..(size - 1) {
91             desc_shadow[i as usize].next = i + 1;
92             // Safe because `desc` is properly aligned, dereferenceable, initialised, and the device
93             // won't access the descriptors for the duration of this unsafe block.
94             unsafe {
95                 (*desc.as_ptr())[i as usize].next = i + 1;
96             }
97         }
98 
99         Ok(VirtQueue {
100             layout,
101             desc,
102             avail,
103             used,
104             queue_idx: idx,
105             num_used: 0,
106             free_head: 0,
107             desc_shadow,
108             avail_idx: 0,
109             last_used_idx: 0,
110         })
111     }
112 
113     /// Add buffers to the virtqueue, return a token.
114     ///
115     /// Ref: linux virtio_ring.c virtqueue_add
116     ///
117     /// # Safety
118     ///
119     /// The input and output buffers must remain valid and not be accessed until a call to
120     /// `pop_used` with the returned token succeeds.
add<'a, 'b>( &mut self, inputs: &'a [&'b [u8]], outputs: &'a mut [&'b mut [u8]], ) -> Result<u16>121     pub unsafe fn add<'a, 'b>(
122         &mut self,
123         inputs: &'a [&'b [u8]],
124         outputs: &'a mut [&'b mut [u8]],
125     ) -> Result<u16> {
126         if inputs.is_empty() && outputs.is_empty() {
127             return Err(Error::InvalidParam);
128         }
129         if inputs.len() + outputs.len() + self.num_used as usize > SIZE {
130             return Err(Error::QueueFull);
131         }
132 
133         // allocate descriptors from free list
134         let head = self.free_head;
135         let mut last = self.free_head;
136 
137         for (buffer, direction) in InputOutputIter::new(inputs, outputs) {
138             // Write to desc_shadow then copy.
139             let desc = &mut self.desc_shadow[usize::from(self.free_head)];
140             // Safe because our caller promises that the buffers live at least until `pop_used`
141             // returns them.
142             unsafe {
143                 desc.set_buf::<H>(buffer, direction, DescFlags::NEXT);
144             }
145             last = self.free_head;
146             self.free_head = desc.next;
147 
148             self.write_desc(last);
149         }
150 
151         // set last_elem.next = NULL
152         self.desc_shadow[usize::from(last)]
153             .flags
154             .remove(DescFlags::NEXT);
155         self.write_desc(last);
156 
157         self.num_used += (inputs.len() + outputs.len()) as u16;
158 
159         let avail_slot = self.avail_idx & (SIZE as u16 - 1);
160         // Safe because self.avail is properly aligned, dereferenceable and initialised.
161         unsafe {
162             (*self.avail.as_ptr()).ring[avail_slot as usize] = head;
163         }
164 
165         // Write barrier so that device sees changes to descriptor table and available ring before
166         // change to available index.
167         fence(Ordering::SeqCst);
168 
169         // increase head of avail ring
170         self.avail_idx = self.avail_idx.wrapping_add(1);
171         // Safe because self.avail is properly aligned, dereferenceable and initialised.
172         unsafe {
173             (*self.avail.as_ptr()).idx = self.avail_idx;
174         }
175 
176         // Write barrier so that device can see change to available index after this method returns.
177         fence(Ordering::SeqCst);
178 
179         Ok(head)
180     }
181 
182     /// Add the given buffers to the virtqueue, notifies the device, blocks until the device uses
183     /// them, then pops them.
184     ///
185     /// This assumes that the device isn't processing any other buffers at the same time.
add_notify_wait_pop<'a>( &mut self, inputs: &'a [&'a [u8]], outputs: &'a mut [&'a mut [u8]], transport: &mut impl Transport, ) -> Result<u32>186     pub fn add_notify_wait_pop<'a>(
187         &mut self,
188         inputs: &'a [&'a [u8]],
189         outputs: &'a mut [&'a mut [u8]],
190         transport: &mut impl Transport,
191     ) -> Result<u32> {
192         // Safe because we don't return until the same token has been popped, so the buffers remain
193         // valid and are not otherwise accessed until then.
194         let token = unsafe { self.add(inputs, outputs) }?;
195 
196         // Notify the queue.
197         if self.should_notify() {
198             transport.notify(self.queue_idx);
199         }
200 
201         // Wait until there is at least one element in the used ring.
202         while !self.can_pop() {
203             spin_loop();
204         }
205 
206         // Safe because these are the same buffers as we passed to `add` above and they are still
207         // valid.
208         unsafe { self.pop_used(token, inputs, outputs) }
209     }
210 
211     /// Returns whether the driver should notify the device after adding a new buffer to the
212     /// virtqueue.
213     ///
214     /// This will be false if the device has supressed notifications.
should_notify(&self) -> bool215     pub fn should_notify(&self) -> bool {
216         // Read barrier, so we read a fresh value from the device.
217         fence(Ordering::SeqCst);
218 
219         // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
220         // instance of UsedRing.
221         unsafe { (*self.used.as_ptr()).flags & 0x0001 == 0 }
222     }
223 
224     /// Copies the descriptor at the given index from `desc_shadow` to `desc`, so it can be seen by
225     /// the device.
write_desc(&mut self, index: u16)226     fn write_desc(&mut self, index: u16) {
227         let index = usize::from(index);
228         // Safe because self.desc is properly aligned, dereferenceable and initialised, and nothing
229         // else reads or writes the descriptor during this block.
230         unsafe {
231             (*self.desc.as_ptr())[index] = self.desc_shadow[index].clone();
232         }
233     }
234 
235     /// Returns whether there is a used element that can be popped.
can_pop(&self) -> bool236     pub fn can_pop(&self) -> bool {
237         // Read barrier, so we read a fresh value from the device.
238         fence(Ordering::SeqCst);
239 
240         // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
241         // instance of UsedRing.
242         self.last_used_idx != unsafe { (*self.used.as_ptr()).idx }
243     }
244 
245     /// Returns the descriptor index (a.k.a. token) of the next used element without popping it, or
246     /// `None` if the used ring is empty.
peek_used(&self) -> Option<u16>247     pub fn peek_used(&self) -> Option<u16> {
248         if self.can_pop() {
249             let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
250             // Safe because self.used points to a valid, aligned, initialised, dereferenceable,
251             // readable instance of UsedRing.
252             Some(unsafe { (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16 })
253         } else {
254             None
255         }
256     }
257 
258     /// Returns the number of free descriptors.
available_desc(&self) -> usize259     pub fn available_desc(&self) -> usize {
260         SIZE - self.num_used as usize
261     }
262 
263     /// Unshares buffers in the list starting at descriptor index `head` and adds them to the free
264     /// list. Unsharing may involve copying data back to the original buffers, so they must be
265     /// passed in too.
266     ///
267     /// This will push all linked descriptors at the front of the free list.
268     ///
269     /// # Safety
270     ///
271     /// The buffers in `inputs` and `outputs` must match the set of buffers originally added to the
272     /// queue by `add`.
recycle_descriptors<'a>( &mut self, head: u16, inputs: &'a [&'a [u8]], outputs: &'a mut [&'a mut [u8]], )273     unsafe fn recycle_descriptors<'a>(
274         &mut self,
275         head: u16,
276         inputs: &'a [&'a [u8]],
277         outputs: &'a mut [&'a mut [u8]],
278     ) {
279         let original_free_head = self.free_head;
280         self.free_head = head;
281         let mut next = Some(head);
282 
283         for (buffer, direction) in InputOutputIter::new(inputs, outputs) {
284             let desc_index = next.expect("Descriptor chain was shorter than expected.");
285             let desc = &mut self.desc_shadow[usize::from(desc_index)];
286 
287             let paddr = desc.addr;
288             desc.unset_buf();
289             self.num_used -= 1;
290             next = desc.next();
291             if next.is_none() {
292                 desc.next = original_free_head;
293             }
294 
295             self.write_desc(desc_index);
296 
297             // Safe because the caller ensures that the buffer is valid and matches the descriptor
298             // from which we got `paddr`.
299             unsafe {
300                 // Unshare the buffer (and perhaps copy its contents back to the original buffer).
301                 H::unshare(paddr as usize, buffer, direction);
302             }
303         }
304 
305         if next.is_some() {
306             panic!("Descriptor chain was longer than expected.");
307         }
308     }
309 
310     /// If the given token is next on the device used queue, pops it and returns the total buffer
311     /// length which was used (written) by the device.
312     ///
313     /// Ref: linux virtio_ring.c virtqueue_get_buf_ctx
314     ///
315     /// # Safety
316     ///
317     /// The buffers in `inputs` and `outputs` must match the set of buffers originally added to the
318     /// queue by `add` when it returned the token being passed in here.
pop_used<'a>( &mut self, token: u16, inputs: &'a [&'a [u8]], outputs: &'a mut [&'a mut [u8]], ) -> Result<u32>319     pub unsafe fn pop_used<'a>(
320         &mut self,
321         token: u16,
322         inputs: &'a [&'a [u8]],
323         outputs: &'a mut [&'a mut [u8]],
324     ) -> Result<u32> {
325         if !self.can_pop() {
326             return Err(Error::NotReady);
327         }
328         // Read barrier not necessary, as can_pop already has one.
329 
330         // Get the index of the start of the descriptor chain for the next element in the used ring.
331         let last_used_slot = self.last_used_idx & (SIZE as u16 - 1);
332         let index;
333         let len;
334         // Safe because self.used points to a valid, aligned, initialised, dereferenceable, readable
335         // instance of UsedRing.
336         unsafe {
337             index = (*self.used.as_ptr()).ring[last_used_slot as usize].id as u16;
338             len = (*self.used.as_ptr()).ring[last_used_slot as usize].len;
339         }
340 
341         if index != token {
342             // The device used a different descriptor chain to the one we were expecting.
343             return Err(Error::WrongToken);
344         }
345 
346         // Safe because the caller ensures the buffers are valid and match the descriptor.
347         unsafe {
348             self.recycle_descriptors(index, inputs, outputs);
349         }
350         self.last_used_idx = self.last_used_idx.wrapping_add(1);
351 
352         Ok(len)
353     }
354 }
355 
356 /// The inner layout of a VirtQueue.
357 ///
358 /// Ref: 2.6 Split Virtqueues
359 #[derive(Debug)]
360 enum VirtQueueLayout<H: Hal> {
361     Legacy {
362         dma: Dma<H>,
363         avail_offset: usize,
364         used_offset: usize,
365     },
366     Modern {
367         /// The region used for the descriptor area and driver area.
368         driver_to_device_dma: Dma<H>,
369         /// The region used for the device area.
370         device_to_driver_dma: Dma<H>,
371         /// The offset from the start of the `driver_to_device_dma` region to the driver area
372         /// (available ring).
373         avail_offset: usize,
374     },
375 }
376 
377 impl<H: Hal> VirtQueueLayout<H> {
378     /// Allocates a single DMA region containing all parts of the virtqueue, following the layout
379     /// required by legacy interfaces.
380     ///
381     /// Ref: 2.6.2 Legacy Interfaces: A Note on Virtqueue Layout
allocate_legacy(queue_size: u16) -> Result<Self>382     fn allocate_legacy(queue_size: u16) -> Result<Self> {
383         let (desc, avail, used) = queue_part_sizes(queue_size);
384         let size = align_up(desc + avail) + align_up(used);
385         // Allocate contiguous pages.
386         let dma = Dma::new(size / PAGE_SIZE, BufferDirection::Both)?;
387         Ok(Self::Legacy {
388             dma,
389             avail_offset: desc,
390             used_offset: align_up(desc + avail),
391         })
392     }
393 
394     /// Allocates separate DMA regions for the the different parts of the virtqueue, as supported by
395     /// non-legacy interfaces.
396     ///
397     /// This is preferred over `allocate_legacy` where possible as it reduces memory fragmentation
398     /// and allows the HAL to know which DMA regions are used in which direction.
allocate_flexible(queue_size: u16) -> Result<Self>399     fn allocate_flexible(queue_size: u16) -> Result<Self> {
400         let (desc, avail, used) = queue_part_sizes(queue_size);
401         let driver_to_device_dma = Dma::new(pages(desc + avail), BufferDirection::DriverToDevice)?;
402         let device_to_driver_dma = Dma::new(pages(used), BufferDirection::DeviceToDriver)?;
403         Ok(Self::Modern {
404             driver_to_device_dma,
405             device_to_driver_dma,
406             avail_offset: desc,
407         })
408     }
409 
410     /// Returns the physical address of the descriptor area.
descriptors_paddr(&self) -> PhysAddr411     fn descriptors_paddr(&self) -> PhysAddr {
412         match self {
413             Self::Legacy { dma, .. } => dma.paddr(),
414             Self::Modern {
415                 driver_to_device_dma,
416                 ..
417             } => driver_to_device_dma.paddr(),
418         }
419     }
420 
421     /// Returns a pointer to the descriptor table (in the descriptor area).
descriptors_vaddr(&self) -> NonNull<u8>422     fn descriptors_vaddr(&self) -> NonNull<u8> {
423         match self {
424             Self::Legacy { dma, .. } => dma.vaddr(0),
425             Self::Modern {
426                 driver_to_device_dma,
427                 ..
428             } => driver_to_device_dma.vaddr(0),
429         }
430     }
431 
432     /// Returns the physical address of the driver area.
driver_area_paddr(&self) -> PhysAddr433     fn driver_area_paddr(&self) -> PhysAddr {
434         match self {
435             Self::Legacy {
436                 dma, avail_offset, ..
437             } => dma.paddr() + avail_offset,
438             Self::Modern {
439                 driver_to_device_dma,
440                 avail_offset,
441                 ..
442             } => driver_to_device_dma.paddr() + avail_offset,
443         }
444     }
445 
446     /// Returns a pointer to the available ring (in the driver area).
avail_vaddr(&self) -> NonNull<u8>447     fn avail_vaddr(&self) -> NonNull<u8> {
448         match self {
449             Self::Legacy {
450                 dma, avail_offset, ..
451             } => dma.vaddr(*avail_offset),
452             Self::Modern {
453                 driver_to_device_dma,
454                 avail_offset,
455                 ..
456             } => driver_to_device_dma.vaddr(*avail_offset),
457         }
458     }
459 
460     /// Returns the physical address of the device area.
device_area_paddr(&self) -> PhysAddr461     fn device_area_paddr(&self) -> PhysAddr {
462         match self {
463             Self::Legacy {
464                 used_offset, dma, ..
465             } => dma.paddr() + used_offset,
466             Self::Modern {
467                 device_to_driver_dma,
468                 ..
469             } => device_to_driver_dma.paddr(),
470         }
471     }
472 
473     /// Returns a pointer to the used ring (in the driver area).
used_vaddr(&self) -> NonNull<u8>474     fn used_vaddr(&self) -> NonNull<u8> {
475         match self {
476             Self::Legacy {
477                 dma, used_offset, ..
478             } => dma.vaddr(*used_offset),
479             Self::Modern {
480                 device_to_driver_dma,
481                 ..
482             } => device_to_driver_dma.vaddr(0),
483         }
484     }
485 }
486 
487 /// Returns the size in bytes of the descriptor table, available ring and used ring for a given
488 /// queue size.
489 ///
490 /// Ref: 2.6 Split Virtqueues
queue_part_sizes(queue_size: u16) -> (usize, usize, usize)491 fn queue_part_sizes(queue_size: u16) -> (usize, usize, usize) {
492     assert!(
493         queue_size.is_power_of_two(),
494         "queue size should be a power of 2"
495     );
496     let queue_size = queue_size as usize;
497     let desc = size_of::<Descriptor>() * queue_size;
498     let avail = size_of::<u16>() * (3 + queue_size);
499     let used = size_of::<u16>() * 3 + size_of::<UsedElem>() * queue_size;
500     (desc, avail, used)
501 }
502 
503 #[repr(C, align(16))]
504 #[derive(Clone, Debug, FromBytes)]
505 pub(crate) struct Descriptor {
506     addr: u64,
507     len: u32,
508     flags: DescFlags,
509     next: u16,
510 }
511 
512 impl Descriptor {
513     /// Sets the buffer address, length and flags, and shares it with the device.
514     ///
515     /// # Safety
516     ///
517     /// The caller must ensure that the buffer lives at least as long as the descriptor is active.
set_buf<H: Hal>( &mut self, buf: NonNull<[u8]>, direction: BufferDirection, extra_flags: DescFlags, )518     unsafe fn set_buf<H: Hal>(
519         &mut self,
520         buf: NonNull<[u8]>,
521         direction: BufferDirection,
522         extra_flags: DescFlags,
523     ) {
524         // Safe because our caller promises that the buffer is valid.
525         unsafe {
526             self.addr = H::share(buf, direction) as u64;
527         }
528         self.len = buf.len() as u32;
529         self.flags = extra_flags
530             | match direction {
531                 BufferDirection::DeviceToDriver => DescFlags::WRITE,
532                 BufferDirection::DriverToDevice => DescFlags::empty(),
533                 BufferDirection::Both => {
534                     panic!("Buffer passed to device should never use BufferDirection::Both.")
535                 }
536             };
537     }
538 
539     /// Sets the buffer address and length to 0.
540     ///
541     /// This must only be called once the device has finished using the descriptor.
unset_buf(&mut self)542     fn unset_buf(&mut self) {
543         self.addr = 0;
544         self.len = 0;
545     }
546 
547     /// Returns the index of the next descriptor in the chain if the `NEXT` flag is set, or `None`
548     /// if it is not (and thus this descriptor is the end of the chain).
next(&self) -> Option<u16>549     fn next(&self) -> Option<u16> {
550         if self.flags.contains(DescFlags::NEXT) {
551             Some(self.next)
552         } else {
553             None
554         }
555     }
556 }
557 
558 bitflags! {
559     /// Descriptor flags
560     #[derive(FromBytes)]
561     struct DescFlags: u16 {
562         const NEXT = 1;
563         const WRITE = 2;
564         const INDIRECT = 4;
565     }
566 }
567 
568 /// The driver uses the available ring to offer buffers to the device:
569 /// each ring entry refers to the head of a descriptor chain.
570 /// It is only written by the driver and read by the device.
571 #[repr(C)]
572 #[derive(Debug)]
573 struct AvailRing<const SIZE: usize> {
574     flags: u16,
575     /// A driver MUST NOT decrement the idx.
576     idx: u16,
577     ring: [u16; SIZE],
578     used_event: u16, // unused
579 }
580 
581 /// The used ring is where the device returns buffers once it is done with them:
582 /// it is only written to by the device, and read by the driver.
583 #[repr(C)]
584 #[derive(Debug)]
585 struct UsedRing<const SIZE: usize> {
586     flags: u16,
587     idx: u16,
588     ring: [UsedElem; SIZE],
589     avail_event: u16, // unused
590 }
591 
592 #[repr(C)]
593 #[derive(Debug)]
594 struct UsedElem {
595     id: u32,
596     len: u32,
597 }
598 
599 struct InputOutputIter<'a, 'b> {
600     inputs: &'a [&'b [u8]],
601     outputs: &'a mut [&'b mut [u8]],
602 }
603 
604 impl<'a, 'b> InputOutputIter<'a, 'b> {
new(inputs: &'a [&'b [u8]], outputs: &'a mut [&'b mut [u8]]) -> Self605     fn new(inputs: &'a [&'b [u8]], outputs: &'a mut [&'b mut [u8]]) -> Self {
606         Self { inputs, outputs }
607     }
608 }
609 
610 impl<'a, 'b> Iterator for InputOutputIter<'a, 'b> {
611     type Item = (NonNull<[u8]>, BufferDirection);
612 
next(&mut self) -> Option<Self::Item>613     fn next(&mut self) -> Option<Self::Item> {
614         if let Some(input) = take_first(&mut self.inputs) {
615             Some(((*input).into(), BufferDirection::DriverToDevice))
616         } else {
617             let output = take_first_mut(&mut self.outputs)?;
618             Some(((*output).into(), BufferDirection::DeviceToDriver))
619         }
620     }
621 }
622 
623 // TODO: Use `slice::take_first` once it is stable
624 // (https://github.com/rust-lang/rust/issues/62280).
take_first<'a, T>(slice: &mut &'a [T]) -> Option<&'a T>625 fn take_first<'a, T>(slice: &mut &'a [T]) -> Option<&'a T> {
626     let (first, rem) = slice.split_first()?;
627     *slice = rem;
628     Some(first)
629 }
630 
631 // TODO: Use `slice::take_first_mut` once it is stable
632 // (https://github.com/rust-lang/rust/issues/62280).
take_first_mut<'a, T>(slice: &mut &'a mut [T]) -> Option<&'a mut T>633 fn take_first_mut<'a, T>(slice: &mut &'a mut [T]) -> Option<&'a mut T> {
634     let (first, rem) = take(slice).split_first_mut()?;
635     *slice = rem;
636     Some(first)
637 }
638 
639 /// Simulates the device reading from a VirtIO queue and writing a response back, for use in tests.
640 ///
641 /// The fake device always uses descriptors in order.
642 #[cfg(test)]
fake_read_write_queue<const QUEUE_SIZE: usize>( descriptors: *const [Descriptor; QUEUE_SIZE], queue_driver_area: *const u8, queue_device_area: *mut u8, handler: impl FnOnce(Vec<u8>) -> Vec<u8>, )643 pub(crate) fn fake_read_write_queue<const QUEUE_SIZE: usize>(
644     descriptors: *const [Descriptor; QUEUE_SIZE],
645     queue_driver_area: *const u8,
646     queue_device_area: *mut u8,
647     handler: impl FnOnce(Vec<u8>) -> Vec<u8>,
648 ) {
649     use core::{ops::Deref, slice};
650 
651     let available_ring = queue_driver_area as *const AvailRing<QUEUE_SIZE>;
652     let used_ring = queue_device_area as *mut UsedRing<QUEUE_SIZE>;
653 
654     // Safe because the various pointers are properly aligned, dereferenceable, initialised, and
655     // nothing else accesses them during this block.
656     unsafe {
657         // Make sure there is actually at least one descriptor available to read from.
658         assert_ne!((*available_ring).idx, (*used_ring).idx);
659         // The fake device always uses descriptors in order, like VIRTIO_F_IN_ORDER, so
660         // `used_ring.idx` marks the next descriptor we should take from the available ring.
661         let next_slot = (*used_ring).idx & (QUEUE_SIZE as u16 - 1);
662         let head_descriptor_index = (*available_ring).ring[next_slot as usize];
663         let mut descriptor = &(*descriptors)[head_descriptor_index as usize];
664 
665         // Loop through all input descriptors in the chain, reading data from them.
666         let mut input = Vec::new();
667         while !descriptor.flags.contains(DescFlags::WRITE) {
668             input.extend_from_slice(slice::from_raw_parts(
669                 descriptor.addr as *const u8,
670                 descriptor.len as usize,
671             ));
672 
673             if let Some(next) = descriptor.next() {
674                 descriptor = &(*descriptors)[next as usize];
675             } else {
676                 break;
677             }
678         }
679         let input_length = input.len();
680 
681         // Let the test handle the request.
682         let output = handler(input);
683 
684         // Write the response to the remaining descriptors.
685         let mut remaining_output = output.deref();
686         if descriptor.flags.contains(DescFlags::WRITE) {
687             loop {
688                 assert!(descriptor.flags.contains(DescFlags::WRITE));
689 
690                 let length_to_write = min(remaining_output.len(), descriptor.len as usize);
691                 ptr::copy(
692                     remaining_output.as_ptr(),
693                     descriptor.addr as *mut u8,
694                     length_to_write,
695                 );
696                 remaining_output = &remaining_output[length_to_write..];
697 
698                 if let Some(next) = descriptor.next() {
699                     descriptor = &(*descriptors)[next as usize];
700                 } else {
701                     break;
702                 }
703             }
704         }
705         assert_eq!(remaining_output.len(), 0);
706 
707         // Mark the buffer as used.
708         (*used_ring).ring[next_slot as usize].id = head_descriptor_index as u32;
709         (*used_ring).ring[next_slot as usize].len = (input_length + output.len()) as u32;
710         (*used_ring).idx += 1;
711     }
712 }
713 
714 #[cfg(test)]
715 mod tests {
716     use super::*;
717     use crate::{
718         hal::fake::FakeHal,
719         transport::mmio::{MmioTransport, VirtIOHeader, MODERN_VERSION},
720     };
721     use core::ptr::NonNull;
722 
723     #[test]
invalid_queue_size()724     fn invalid_queue_size() {
725         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
726         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
727         // Size not a power of 2.
728         assert_eq!(
729             VirtQueue::<FakeHal, 3>::new(&mut transport, 0).unwrap_err(),
730             Error::InvalidParam
731         );
732     }
733 
734     #[test]
queue_too_big()735     fn queue_too_big() {
736         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
737         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
738         assert_eq!(
739             VirtQueue::<FakeHal, 8>::new(&mut transport, 0).unwrap_err(),
740             Error::InvalidParam
741         );
742     }
743 
744     #[test]
queue_already_used()745     fn queue_already_used() {
746         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
747         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
748         VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
749         assert_eq!(
750             VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap_err(),
751             Error::AlreadyUsed
752         );
753     }
754 
755     #[test]
add_empty()756     fn add_empty() {
757         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
758         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
759         let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
760         assert_eq!(
761             unsafe { queue.add(&[], &mut []) }.unwrap_err(),
762             Error::InvalidParam
763         );
764     }
765 
766     #[test]
add_too_many()767     fn add_too_many() {
768         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
769         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
770         let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
771         assert_eq!(queue.available_desc(), 4);
772         assert_eq!(
773             unsafe { queue.add(&[&[], &[], &[]], &mut [&mut [], &mut []]) }.unwrap_err(),
774             Error::QueueFull
775         );
776     }
777 
778     #[test]
add_buffers()779     fn add_buffers() {
780         let mut header = VirtIOHeader::make_fake_header(MODERN_VERSION, 1, 0, 0, 4);
781         let mut transport = unsafe { MmioTransport::new(NonNull::from(&mut header)) }.unwrap();
782         let mut queue = VirtQueue::<FakeHal, 4>::new(&mut transport, 0).unwrap();
783         assert_eq!(queue.available_desc(), 4);
784 
785         // Add a buffer chain consisting of two device-readable parts followed by two
786         // device-writable parts.
787         let token = unsafe { queue.add(&[&[1, 2], &[3]], &mut [&mut [0, 0], &mut [0]]) }.unwrap();
788 
789         assert_eq!(queue.available_desc(), 0);
790         assert!(!queue.can_pop());
791 
792         // Safe because the various parts of the queue are properly aligned, dereferenceable and
793         // initialised, and nothing else is accessing them at the same time.
794         unsafe {
795             let first_descriptor_index = (*queue.avail.as_ptr()).ring[0];
796             assert_eq!(first_descriptor_index, token);
797             assert_eq!(
798                 (*queue.desc.as_ptr())[first_descriptor_index as usize].len,
799                 2
800             );
801             assert_eq!(
802                 (*queue.desc.as_ptr())[first_descriptor_index as usize].flags,
803                 DescFlags::NEXT
804             );
805             let second_descriptor_index =
806                 (*queue.desc.as_ptr())[first_descriptor_index as usize].next;
807             assert_eq!(
808                 (*queue.desc.as_ptr())[second_descriptor_index as usize].len,
809                 1
810             );
811             assert_eq!(
812                 (*queue.desc.as_ptr())[second_descriptor_index as usize].flags,
813                 DescFlags::NEXT
814             );
815             let third_descriptor_index =
816                 (*queue.desc.as_ptr())[second_descriptor_index as usize].next;
817             assert_eq!(
818                 (*queue.desc.as_ptr())[third_descriptor_index as usize].len,
819                 2
820             );
821             assert_eq!(
822                 (*queue.desc.as_ptr())[third_descriptor_index as usize].flags,
823                 DescFlags::NEXT | DescFlags::WRITE
824             );
825             let fourth_descriptor_index =
826                 (*queue.desc.as_ptr())[third_descriptor_index as usize].next;
827             assert_eq!(
828                 (*queue.desc.as_ptr())[fourth_descriptor_index as usize].len,
829                 1
830             );
831             assert_eq!(
832                 (*queue.desc.as_ptr())[fourth_descriptor_index as usize].flags,
833                 DescFlags::WRITE
834             );
835         }
836     }
837 }
838