• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! Driver for VirtIO block devices.
2 
3 use crate::config::{read_config, ReadOnly};
4 use crate::hal::Hal;
5 use crate::queue::VirtQueue;
6 use crate::transport::Transport;
7 use crate::{Error, Result};
8 use bitflags::bitflags;
9 use log::info;
10 use zerocopy::{FromBytes, Immutable, IntoBytes, KnownLayout};
11 
12 const QUEUE: u16 = 0;
13 const QUEUE_SIZE: u16 = 16;
14 const SUPPORTED_FEATURES: BlkFeature = BlkFeature::RO
15     .union(BlkFeature::FLUSH)
16     .union(BlkFeature::RING_INDIRECT_DESC)
17     .union(BlkFeature::RING_EVENT_IDX);
18 
19 /// Driver for a VirtIO block device.
20 ///
21 /// This is a simple virtual block device, e.g. disk.
22 ///
23 /// Read and write requests (and other exotic requests) are placed in the queue and serviced
24 /// (probably out of order) by the device except where noted.
25 ///
26 /// # Example
27 ///
28 /// ```
29 /// # use virtio_drivers_and_devices::{Error, Hal};
30 /// # use virtio_drivers_and_devices::transport::Transport;
31 /// use virtio_drivers_and_devices::device::blk::{VirtIOBlk, SECTOR_SIZE};
32 ///
33 /// # fn example<HalImpl: Hal, T: Transport>(transport: T) -> Result<(), Error> {
34 /// let mut disk = VirtIOBlk::<HalImpl, _>::new(transport)?;
35 ///
36 /// println!("VirtIO block device: {} kB", disk.capacity() * SECTOR_SIZE as u64 / 2);
37 ///
38 /// // Read sector 0 and then copy it to sector 1.
39 /// let mut buf = [0; SECTOR_SIZE];
40 /// disk.read_blocks(0, &mut buf)?;
41 /// disk.write_blocks(1, &buf)?;
42 /// # Ok(())
43 /// # }
44 /// ```
45 pub struct VirtIOBlk<H: Hal, T: Transport> {
46     transport: T,
47     queue: VirtQueue<H, { QUEUE_SIZE as usize }>,
48     capacity: u64,
49     negotiated_features: BlkFeature,
50 }
51 
52 impl<H: Hal, T: Transport> VirtIOBlk<H, T> {
53     /// Create a new VirtIO-Blk driver.
new(mut transport: T) -> Result<Self>54     pub fn new(mut transport: T) -> Result<Self> {
55         let negotiated_features = transport.begin_init(SUPPORTED_FEATURES);
56 
57         // Read configuration space.
58         let capacity = transport.read_consistent(|| {
59             Ok(read_config!(transport, BlkConfig, capacity_low)? as u64
60                 | (read_config!(transport, BlkConfig, capacity_high)? as u64) << 32)
61         })?;
62         info!("found a block device of size {}KB", capacity / 2);
63 
64         let queue = VirtQueue::new(
65             &mut transport,
66             QUEUE,
67             negotiated_features.contains(BlkFeature::RING_INDIRECT_DESC),
68             negotiated_features.contains(BlkFeature::RING_EVENT_IDX),
69         )?;
70         transport.finish_init();
71 
72         Ok(VirtIOBlk {
73             transport,
74             queue,
75             capacity,
76             negotiated_features,
77         })
78     }
79 
80     /// Gets the capacity of the block device, in 512 byte ([`SECTOR_SIZE`]) sectors.
capacity(&self) -> u6481     pub fn capacity(&self) -> u64 {
82         self.capacity
83     }
84 
85     /// Returns true if the block device is read-only, or false if it allows writes.
readonly(&self) -> bool86     pub fn readonly(&self) -> bool {
87         self.negotiated_features.contains(BlkFeature::RO)
88     }
89 
90     /// Acknowledges a pending interrupt, if any.
91     ///
92     /// Returns true if there was an interrupt to acknowledge.
ack_interrupt(&mut self) -> bool93     pub fn ack_interrupt(&mut self) -> bool {
94         self.transport.ack_interrupt()
95     }
96 
97     /// Enables interrupts from the device.
enable_interrupts(&mut self)98     pub fn enable_interrupts(&mut self) {
99         self.queue.set_dev_notify(true);
100     }
101 
102     /// Disables interrupts from the device.
disable_interrupts(&mut self)103     pub fn disable_interrupts(&mut self) {
104         self.queue.set_dev_notify(false);
105     }
106 
107     /// Sends the given request to the device and waits for a response, with no extra data.
request(&mut self, request: BlkReq) -> Result108     fn request(&mut self, request: BlkReq) -> Result {
109         let mut resp = BlkResp::default();
110         self.queue.add_notify_wait_pop(
111             &[request.as_bytes()],
112             &mut [resp.as_mut_bytes()],
113             &mut self.transport,
114         )?;
115         resp.status.into()
116     }
117 
118     /// Sends the given request to the device and waits for a response, including the given data.
request_read(&mut self, request: BlkReq, data: &mut [u8]) -> Result119     fn request_read(&mut self, request: BlkReq, data: &mut [u8]) -> Result {
120         let mut resp = BlkResp::default();
121         self.queue.add_notify_wait_pop(
122             &[request.as_bytes()],
123             &mut [data, resp.as_mut_bytes()],
124             &mut self.transport,
125         )?;
126         resp.status.into()
127     }
128 
129     /// Sends the given request and data to the device and waits for a response.
request_write(&mut self, request: BlkReq, data: &[u8]) -> Result130     fn request_write(&mut self, request: BlkReq, data: &[u8]) -> Result {
131         let mut resp = BlkResp::default();
132         self.queue.add_notify_wait_pop(
133             &[request.as_bytes(), data],
134             &mut [resp.as_mut_bytes()],
135             &mut self.transport,
136         )?;
137         resp.status.into()
138     }
139 
140     /// Requests the device to flush any pending writes to storage.
141     ///
142     /// This will be ignored if the device doesn't support the `VIRTIO_BLK_F_FLUSH` feature.
flush(&mut self) -> Result143     pub fn flush(&mut self) -> Result {
144         if self.negotiated_features.contains(BlkFeature::FLUSH) {
145             self.request(BlkReq {
146                 type_: ReqType::Flush,
147                 ..Default::default()
148             })
149         } else {
150             Ok(())
151         }
152     }
153 
154     /// Gets the device ID.
155     ///
156     /// The ID is written as ASCII into the given buffer, which must be 20 bytes long, and the used
157     /// length returned.
device_id(&mut self, id: &mut [u8; 20]) -> Result<usize>158     pub fn device_id(&mut self, id: &mut [u8; 20]) -> Result<usize> {
159         self.request_read(
160             BlkReq {
161                 type_: ReqType::GetId,
162                 ..Default::default()
163             },
164             id,
165         )?;
166 
167         let length = id.iter().position(|&x| x == 0).unwrap_or(20);
168         Ok(length)
169     }
170 
171     /// Reads one or more blocks into the given buffer.
172     ///
173     /// The buffer length must be a non-zero multiple of [`SECTOR_SIZE`].
174     ///
175     /// Blocks until the read completes or there is an error.
read_blocks(&mut self, block_id: usize, buf: &mut [u8]) -> Result176     pub fn read_blocks(&mut self, block_id: usize, buf: &mut [u8]) -> Result {
177         assert_ne!(buf.len(), 0);
178         assert_eq!(buf.len() % SECTOR_SIZE, 0);
179         self.request_read(
180             BlkReq {
181                 type_: ReqType::In,
182                 reserved: 0,
183                 sector: block_id as u64,
184             },
185             buf,
186         )
187     }
188 
189     /// Submits a request to read one or more blocks, but returns immediately without waiting for
190     /// the read to complete.
191     ///
192     /// # Arguments
193     ///
194     /// * `block_id` - The identifier of the first block to read.
195     /// * `req` - A buffer which the driver can use for the request to send to the device. The
196     ///   contents don't matter as `read_blocks_nb` will initialise it, but like the other buffers
197     ///   it needs to be valid (and not otherwise used) until the corresponding
198     ///   `complete_read_blocks` call. Its length must be a non-zero multiple of [`SECTOR_SIZE`].
199     /// * `buf` - The buffer in memory into which the block should be read.
200     /// * `resp` - A mutable reference to a variable provided by the caller
201     ///   to contain the status of the request. The caller can safely
202     ///   read the variable only after the request is complete.
203     ///
204     /// # Usage
205     ///
206     /// It will submit request to the VirtIO block device and return a token identifying
207     /// the position of the first Descriptor in the chain. If there are not enough
208     /// Descriptors to allocate, then it returns [`Error::QueueFull`].
209     ///
210     /// The caller can then call `peek_used` with the returned token to check whether the device has
211     /// finished handling the request. Once it has, the caller must call `complete_read_blocks` with
212     /// the same buffers before reading the response.
213     ///
214     /// ```
215     /// # use virtio_drivers_and_devices::{Error, Hal};
216     /// # use virtio_drivers_and_devices::device::blk::VirtIOBlk;
217     /// # use virtio_drivers_and_devices::transport::Transport;
218     /// use virtio_drivers_and_devices::device::blk::{BlkReq, BlkResp, RespStatus};
219     ///
220     /// # fn example<H: Hal, T: Transport>(blk: &mut VirtIOBlk<H, T>) -> Result<(), Error> {
221     /// let mut request = BlkReq::default();
222     /// let mut buffer = [0; 512];
223     /// let mut response = BlkResp::default();
224     /// let token = unsafe { blk.read_blocks_nb(42, &mut request, &mut buffer, &mut response) }?;
225     ///
226     /// // Wait for an interrupt to tell us that the request completed...
227     /// assert_eq!(blk.peek_used(), Some(token));
228     ///
229     /// unsafe {
230     ///   blk.complete_read_blocks(token, &request, &mut buffer, &mut response)?;
231     /// }
232     /// if response.status() == RespStatus::OK {
233     ///   println!("Successfully read block.");
234     /// } else {
235     ///   println!("Error {:?} reading block.", response.status());
236     /// }
237     /// # Ok(())
238     /// # }
239     /// ```
240     ///
241     /// # Safety
242     ///
243     /// `req`, `buf` and `resp` are still borrowed by the underlying VirtIO block device even after
244     /// this method returns. Thus, it is the caller's responsibility to guarantee that they are not
245     /// accessed before the request is completed in order to avoid data races.
read_blocks_nb( &mut self, block_id: usize, req: &mut BlkReq, buf: &mut [u8], resp: &mut BlkResp, ) -> Result<u16>246     pub unsafe fn read_blocks_nb(
247         &mut self,
248         block_id: usize,
249         req: &mut BlkReq,
250         buf: &mut [u8],
251         resp: &mut BlkResp,
252     ) -> Result<u16> {
253         assert_ne!(buf.len(), 0);
254         assert_eq!(buf.len() % SECTOR_SIZE, 0);
255         *req = BlkReq {
256             type_: ReqType::In,
257             reserved: 0,
258             sector: block_id as u64,
259         };
260         let token = self
261             .queue
262             .add(&[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?;
263         if self.queue.should_notify() {
264             self.transport.notify(QUEUE);
265         }
266         Ok(token)
267     }
268 
269     /// Completes a read operation which was started by `read_blocks_nb`.
270     ///
271     /// # Safety
272     ///
273     /// The same buffers must be passed in again as were passed to `read_blocks_nb` when it returned
274     /// the token.
complete_read_blocks( &mut self, token: u16, req: &BlkReq, buf: &mut [u8], resp: &mut BlkResp, ) -> Result<()>275     pub unsafe fn complete_read_blocks(
276         &mut self,
277         token: u16,
278         req: &BlkReq,
279         buf: &mut [u8],
280         resp: &mut BlkResp,
281     ) -> Result<()> {
282         self.queue
283             .pop_used(token, &[req.as_bytes()], &mut [buf, resp.as_mut_bytes()])?;
284         resp.status.into()
285     }
286 
287     /// Writes the contents of the given buffer to a block or blocks.
288     ///
289     /// The buffer length must be a non-zero multiple of [`SECTOR_SIZE`].
290     ///
291     /// Blocks until the write is complete or there is an error.
write_blocks(&mut self, block_id: usize, buf: &[u8]) -> Result292     pub fn write_blocks(&mut self, block_id: usize, buf: &[u8]) -> Result {
293         assert_ne!(buf.len(), 0);
294         assert_eq!(buf.len() % SECTOR_SIZE, 0);
295         self.request_write(
296             BlkReq {
297                 type_: ReqType::Out,
298                 sector: block_id as u64,
299                 ..Default::default()
300             },
301             buf,
302         )
303     }
304 
305     /// Submits a request to write one or more blocks, but returns immediately without waiting for
306     /// the write to complete.
307     ///
308     /// # Arguments
309     ///
310     /// * `block_id` - The identifier of the first block to write.
311     /// * `req` - A buffer which the driver can use for the request to send to the device. The
312     ///   contents don't matter as `read_blocks_nb` will initialise it, but like the other buffers
313     ///   it needs to be valid (and not otherwise used) until the corresponding
314     ///   `complete_write_blocks` call.
315     /// * `buf` - The buffer in memory containing the data to write to the blocks. Its length must
316     ///   be a non-zero multiple of [`SECTOR_SIZE`].
317     /// * `resp` - A mutable reference to a variable provided by the caller
318     ///   to contain the status of the request. The caller can safely
319     ///   read the variable only after the request is complete.
320     ///
321     /// # Usage
322     ///
323     /// See [VirtIOBlk::read_blocks_nb].
324     ///
325     /// # Safety
326     ///
327     /// See  [VirtIOBlk::read_blocks_nb].
write_blocks_nb( &mut self, block_id: usize, req: &mut BlkReq, buf: &[u8], resp: &mut BlkResp, ) -> Result<u16>328     pub unsafe fn write_blocks_nb(
329         &mut self,
330         block_id: usize,
331         req: &mut BlkReq,
332         buf: &[u8],
333         resp: &mut BlkResp,
334     ) -> Result<u16> {
335         assert_ne!(buf.len(), 0);
336         assert_eq!(buf.len() % SECTOR_SIZE, 0);
337         *req = BlkReq {
338             type_: ReqType::Out,
339             reserved: 0,
340             sector: block_id as u64,
341         };
342         let token = self
343             .queue
344             .add(&[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?;
345         if self.queue.should_notify() {
346             self.transport.notify(QUEUE);
347         }
348         Ok(token)
349     }
350 
351     /// Completes a write operation which was started by `write_blocks_nb`.
352     ///
353     /// # Safety
354     ///
355     /// The same buffers must be passed in again as were passed to `write_blocks_nb` when it
356     /// returned the token.
complete_write_blocks( &mut self, token: u16, req: &BlkReq, buf: &[u8], resp: &mut BlkResp, ) -> Result<()>357     pub unsafe fn complete_write_blocks(
358         &mut self,
359         token: u16,
360         req: &BlkReq,
361         buf: &[u8],
362         resp: &mut BlkResp,
363     ) -> Result<()> {
364         self.queue
365             .pop_used(token, &[req.as_bytes(), buf], &mut [resp.as_mut_bytes()])?;
366         resp.status.into()
367     }
368 
369     /// Fetches the token of the next completed request from the used ring and returns it, without
370     /// removing it from the used ring. If there are no pending completed requests returns `None`.
peek_used(&mut self) -> Option<u16>371     pub fn peek_used(&mut self) -> Option<u16> {
372         self.queue.peek_used()
373     }
374 
375     /// Returns the size of the device's VirtQueue.
376     ///
377     /// This can be used to tell the caller how many channels to monitor on.
virt_queue_size(&self) -> u16378     pub fn virt_queue_size(&self) -> u16 {
379         QUEUE_SIZE
380     }
381 }
382 
383 impl<H: Hal, T: Transport> Drop for VirtIOBlk<H, T> {
drop(&mut self)384     fn drop(&mut self) {
385         // Clear any pointers pointing to DMA regions, so the device doesn't try to access them
386         // after they have been freed.
387         self.transport.queue_unset(QUEUE);
388     }
389 }
390 
391 #[derive(FromBytes, Immutable, IntoBytes)]
392 #[repr(C)]
393 struct BlkConfig {
394     /// Number of 512 Bytes sectors
395     capacity_low: ReadOnly<u32>,
396     capacity_high: ReadOnly<u32>,
397     size_max: ReadOnly<u32>,
398     seg_max: ReadOnly<u32>,
399     cylinders: ReadOnly<u16>,
400     heads: ReadOnly<u8>,
401     sectors: ReadOnly<u8>,
402     blk_size: ReadOnly<u32>,
403     physical_block_exp: ReadOnly<u8>,
404     alignment_offset: ReadOnly<u8>,
405     min_io_size: ReadOnly<u16>,
406     opt_io_size: ReadOnly<u32>,
407     // ... ignored
408 }
409 
410 /// A VirtIO block device request.
411 #[repr(C)]
412 #[derive(Debug, Immutable, IntoBytes, KnownLayout)]
413 pub struct BlkReq {
414     type_: ReqType,
415     reserved: u32,
416     sector: u64,
417 }
418 
419 impl Default for BlkReq {
default() -> Self420     fn default() -> Self {
421         Self {
422             type_: ReqType::In,
423             reserved: 0,
424             sector: 0,
425         }
426     }
427 }
428 
429 /// Response of a VirtIOBlk request.
430 #[repr(C)]
431 #[derive(Debug, FromBytes, Immutable, IntoBytes, KnownLayout)]
432 pub struct BlkResp {
433     status: RespStatus,
434 }
435 
436 impl BlkResp {
437     /// Return the status of a VirtIOBlk request.
status(&self) -> RespStatus438     pub fn status(&self) -> RespStatus {
439         self.status
440     }
441 }
442 
443 #[repr(u32)]
444 #[derive(Debug, Immutable, IntoBytes, KnownLayout)]
445 enum ReqType {
446     In = 0,
447     Out = 1,
448     Flush = 4,
449     GetId = 8,
450     GetLifetime = 10,
451     Discard = 11,
452     WriteZeroes = 13,
453     SecureErase = 14,
454 }
455 
456 /// Status of a VirtIOBlk request.
457 #[repr(transparent)]
458 #[derive(Copy, Clone, Debug, Eq, FromBytes, Immutable, IntoBytes, KnownLayout, PartialEq)]
459 pub struct RespStatus(u8);
460 
461 impl RespStatus {
462     /// Ok.
463     pub const OK: RespStatus = RespStatus(0);
464     /// IoErr.
465     pub const IO_ERR: RespStatus = RespStatus(1);
466     /// Unsupported yet.
467     pub const UNSUPPORTED: RespStatus = RespStatus(2);
468     /// Not ready.
469     pub const NOT_READY: RespStatus = RespStatus(3);
470 }
471 
472 impl From<RespStatus> for Result {
from(status: RespStatus) -> Self473     fn from(status: RespStatus) -> Self {
474         match status {
475             RespStatus::OK => Ok(()),
476             RespStatus::IO_ERR => Err(Error::IoError),
477             RespStatus::UNSUPPORTED => Err(Error::Unsupported),
478             RespStatus::NOT_READY => Err(Error::NotReady),
479             _ => Err(Error::IoError),
480         }
481     }
482 }
483 
484 impl Default for BlkResp {
default() -> Self485     fn default() -> Self {
486         BlkResp {
487             status: RespStatus::NOT_READY,
488         }
489     }
490 }
491 
492 /// The standard sector size of a VirtIO block device. Data is read and written in multiples of this
493 /// size.
494 pub const SECTOR_SIZE: usize = 512;
495 
496 bitflags! {
497     #[derive(Copy, Clone, Debug, Default, Eq, PartialEq)]
498     struct BlkFeature: u64 {
499         /// Device supports request barriers. (legacy)
500         const BARRIER       = 1 << 0;
501         /// Maximum size of any single segment is in `size_max`.
502         const SIZE_MAX      = 1 << 1;
503         /// Maximum number of segments in a request is in `seg_max`.
504         const SEG_MAX       = 1 << 2;
505         /// Disk-style geometry specified in geometry.
506         const GEOMETRY      = 1 << 4;
507         /// Device is read-only.
508         const RO            = 1 << 5;
509         /// Block size of disk is in `blk_size`.
510         const BLK_SIZE      = 1 << 6;
511         /// Device supports scsi packet commands. (legacy)
512         const SCSI          = 1 << 7;
513         /// Cache flush command support.
514         const FLUSH         = 1 << 9;
515         /// Device exports information on optimal I/O alignment.
516         const TOPOLOGY      = 1 << 10;
517         /// Device can toggle its cache between writeback and writethrough modes.
518         const CONFIG_WCE    = 1 << 11;
519         /// Device supports multiqueue.
520         const MQ            = 1 << 12;
521         /// Device can support discard command, maximum discard sectors size in
522         /// `max_discard_sectors` and maximum discard segment number in
523         /// `max_discard_seg`.
524         const DISCARD       = 1 << 13;
525         /// Device can support write zeroes command, maximum write zeroes sectors
526         /// size in `max_write_zeroes_sectors` and maximum write zeroes segment
527         /// number in `max_write_zeroes_seg`.
528         const WRITE_ZEROES  = 1 << 14;
529         /// Device supports providing storage lifetime information.
530         const LIFETIME      = 1 << 15;
531         /// Device can support the secure erase command.
532         const SECURE_ERASE  = 1 << 16;
533 
534         // device independent
535         const NOTIFY_ON_EMPTY       = 1 << 24; // legacy
536         const ANY_LAYOUT            = 1 << 27; // legacy
537         const RING_INDIRECT_DESC    = 1 << 28;
538         const RING_EVENT_IDX        = 1 << 29;
539         const UNUSED                = 1 << 30; // legacy
540         const VERSION_1             = 1 << 32; // detect legacy
541 
542         // the following since virtio v1.1
543         const ACCESS_PLATFORM       = 1 << 33;
544         const RING_PACKED           = 1 << 34;
545         const IN_ORDER              = 1 << 35;
546         const ORDER_PLATFORM        = 1 << 36;
547         const SR_IOV                = 1 << 37;
548         const NOTIFICATION_DATA     = 1 << 38;
549     }
550 }
551 
552 #[cfg(test)]
553 mod tests {
554     use super::*;
555     use crate::{
556         hal::fake::FakeHal,
557         transport::{
558             fake::{FakeTransport, QueueStatus, State},
559             DeviceType,
560         },
561     };
562     use alloc::{sync::Arc, vec};
563     use core::mem::size_of;
564     use std::{sync::Mutex, thread};
565 
566     #[test]
config()567     fn config() {
568         let config_space = BlkConfig {
569             capacity_low: ReadOnly::new(0x42),
570             capacity_high: ReadOnly::new(0x02),
571             size_max: ReadOnly::new(0),
572             seg_max: ReadOnly::new(0),
573             cylinders: ReadOnly::new(0),
574             heads: ReadOnly::new(0),
575             sectors: ReadOnly::new(0),
576             blk_size: ReadOnly::new(0),
577             physical_block_exp: ReadOnly::new(0),
578             alignment_offset: ReadOnly::new(0),
579             min_io_size: ReadOnly::new(0),
580             opt_io_size: ReadOnly::new(0),
581         };
582         let state = Arc::new(Mutex::new(State::new(
583             vec![QueueStatus::default()],
584             config_space,
585         )));
586         let transport = FakeTransport {
587             device_type: DeviceType::Block,
588             max_queue_size: QUEUE_SIZE.into(),
589             device_features: BlkFeature::RO.bits(),
590             state: state.clone(),
591         };
592         let blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
593 
594         assert_eq!(blk.capacity(), 0x02_0000_0042);
595         assert_eq!(blk.readonly(), true);
596     }
597 
598     #[test]
read()599     fn read() {
600         let config_space = BlkConfig {
601             capacity_low: ReadOnly::new(66),
602             capacity_high: ReadOnly::new(0),
603             size_max: ReadOnly::new(0),
604             seg_max: ReadOnly::new(0),
605             cylinders: ReadOnly::new(0),
606             heads: ReadOnly::new(0),
607             sectors: ReadOnly::new(0),
608             blk_size: ReadOnly::new(0),
609             physical_block_exp: ReadOnly::new(0),
610             alignment_offset: ReadOnly::new(0),
611             min_io_size: ReadOnly::new(0),
612             opt_io_size: ReadOnly::new(0),
613         };
614         let state = Arc::new(Mutex::new(State::new(
615             vec![QueueStatus::default()],
616             config_space,
617         )));
618         let transport = FakeTransport {
619             device_type: DeviceType::Block,
620             max_queue_size: QUEUE_SIZE.into(),
621             device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
622             state: state.clone(),
623         };
624         let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
625 
626         // Start a thread to simulate the device waiting for a read request.
627         let handle = thread::spawn(move || {
628             println!("Device waiting for a request.");
629             State::wait_until_queue_notified(&state, QUEUE);
630             println!("Transmit queue was notified.");
631 
632             assert!(state
633                 .lock()
634                 .unwrap()
635                 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
636                     assert_eq!(
637                         request,
638                         BlkReq {
639                             type_: ReqType::In,
640                             reserved: 0,
641                             sector: 42
642                         }
643                         .as_bytes()
644                     );
645 
646                     let mut response = vec![0; SECTOR_SIZE];
647                     response[0..9].copy_from_slice(b"Test data");
648                     response.extend_from_slice(
649                         BlkResp {
650                             status: RespStatus::OK,
651                         }
652                         .as_bytes(),
653                     );
654 
655                     response
656                 }));
657         });
658 
659         // Read a block from the device.
660         let mut buffer = [0; 512];
661         blk.read_blocks(42, &mut buffer).unwrap();
662         assert_eq!(&buffer[0..9], b"Test data");
663 
664         handle.join().unwrap();
665     }
666 
667     #[test]
write()668     fn write() {
669         let config_space = BlkConfig {
670             capacity_low: ReadOnly::new(66),
671             capacity_high: ReadOnly::new(0),
672             size_max: ReadOnly::new(0),
673             seg_max: ReadOnly::new(0),
674             cylinders: ReadOnly::new(0),
675             heads: ReadOnly::new(0),
676             sectors: ReadOnly::new(0),
677             blk_size: ReadOnly::new(0),
678             physical_block_exp: ReadOnly::new(0),
679             alignment_offset: ReadOnly::new(0),
680             min_io_size: ReadOnly::new(0),
681             opt_io_size: ReadOnly::new(0),
682         };
683         let state = Arc::new(Mutex::new(State::new(
684             vec![QueueStatus::default()],
685             config_space,
686         )));
687         let transport = FakeTransport {
688             device_type: DeviceType::Block,
689             max_queue_size: QUEUE_SIZE.into(),
690             device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
691             state: state.clone(),
692         };
693         let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
694 
695         // Start a thread to simulate the device waiting for a write request.
696         let handle = thread::spawn(move || {
697             println!("Device waiting for a request.");
698             State::wait_until_queue_notified(&state, QUEUE);
699             println!("Transmit queue was notified.");
700 
701             assert!(state
702                 .lock()
703                 .unwrap()
704                 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
705                     assert_eq!(
706                         &request[0..size_of::<BlkReq>()],
707                         BlkReq {
708                             type_: ReqType::Out,
709                             reserved: 0,
710                             sector: 42
711                         }
712                         .as_bytes()
713                     );
714                     let data = &request[size_of::<BlkReq>()..];
715                     assert_eq!(data.len(), SECTOR_SIZE);
716                     assert_eq!(&data[0..9], b"Test data");
717 
718                     let mut response = Vec::new();
719                     response.extend_from_slice(
720                         BlkResp {
721                             status: RespStatus::OK,
722                         }
723                         .as_bytes(),
724                     );
725 
726                     response
727                 }));
728         });
729 
730         // Write a block to the device.
731         let mut buffer = [0; 512];
732         buffer[0..9].copy_from_slice(b"Test data");
733         blk.write_blocks(42, &mut buffer).unwrap();
734 
735         // Request to flush should be ignored as the device doesn't support it.
736         blk.flush().unwrap();
737 
738         handle.join().unwrap();
739     }
740 
741     #[test]
flush()742     fn flush() {
743         let config_space = BlkConfig {
744             capacity_low: ReadOnly::new(66),
745             capacity_high: ReadOnly::new(0),
746             size_max: ReadOnly::new(0),
747             seg_max: ReadOnly::new(0),
748             cylinders: ReadOnly::new(0),
749             heads: ReadOnly::new(0),
750             sectors: ReadOnly::new(0),
751             blk_size: ReadOnly::new(0),
752             physical_block_exp: ReadOnly::new(0),
753             alignment_offset: ReadOnly::new(0),
754             min_io_size: ReadOnly::new(0),
755             opt_io_size: ReadOnly::new(0),
756         };
757         let state = Arc::new(Mutex::new(State::new(
758             vec![QueueStatus::default()],
759             config_space,
760         )));
761         let transport = FakeTransport {
762             device_type: DeviceType::Block,
763             max_queue_size: QUEUE_SIZE.into(),
764             device_features: (BlkFeature::RING_INDIRECT_DESC | BlkFeature::FLUSH).bits(),
765             state: state.clone(),
766         };
767         let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
768 
769         // Start a thread to simulate the device waiting for a flush request.
770         let handle = thread::spawn(move || {
771             println!("Device waiting for a request.");
772             State::wait_until_queue_notified(&state, QUEUE);
773             println!("Transmit queue was notified.");
774 
775             assert!(state
776                 .lock()
777                 .unwrap()
778                 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
779                     assert_eq!(
780                         request,
781                         BlkReq {
782                             type_: ReqType::Flush,
783                             reserved: 0,
784                             sector: 0,
785                         }
786                         .as_bytes()
787                     );
788 
789                     let mut response = Vec::new();
790                     response.extend_from_slice(
791                         BlkResp {
792                             status: RespStatus::OK,
793                         }
794                         .as_bytes(),
795                     );
796 
797                     response
798                 }));
799         });
800 
801         // Request to flush.
802         blk.flush().unwrap();
803 
804         handle.join().unwrap();
805     }
806 
807     #[test]
device_id()808     fn device_id() {
809         let config_space = BlkConfig {
810             capacity_low: ReadOnly::new(66),
811             capacity_high: ReadOnly::new(0),
812             size_max: ReadOnly::new(0),
813             seg_max: ReadOnly::new(0),
814             cylinders: ReadOnly::new(0),
815             heads: ReadOnly::new(0),
816             sectors: ReadOnly::new(0),
817             blk_size: ReadOnly::new(0),
818             physical_block_exp: ReadOnly::new(0),
819             alignment_offset: ReadOnly::new(0),
820             min_io_size: ReadOnly::new(0),
821             opt_io_size: ReadOnly::new(0),
822         };
823         let state = Arc::new(Mutex::new(State::new(
824             vec![QueueStatus::default()],
825             config_space,
826         )));
827         let transport = FakeTransport {
828             device_type: DeviceType::Block,
829             max_queue_size: QUEUE_SIZE.into(),
830             device_features: BlkFeature::RING_INDIRECT_DESC.bits(),
831             state: state.clone(),
832         };
833         let mut blk = VirtIOBlk::<FakeHal, FakeTransport<BlkConfig>>::new(transport).unwrap();
834 
835         // Start a thread to simulate the device waiting for a flush request.
836         let handle = thread::spawn(move || {
837             println!("Device waiting for a request.");
838             State::wait_until_queue_notified(&state, QUEUE);
839             println!("Transmit queue was notified.");
840 
841             assert!(state
842                 .lock()
843                 .unwrap()
844                 .read_write_queue::<{ QUEUE_SIZE as usize }>(QUEUE, |request| {
845                     assert_eq!(
846                         request,
847                         BlkReq {
848                             type_: ReqType::GetId,
849                             reserved: 0,
850                             sector: 0,
851                         }
852                         .as_bytes()
853                     );
854 
855                     let mut response = Vec::new();
856                     response.extend_from_slice(b"device_id\0\0\0\0\0\0\0\0\0\0\0");
857                     response.extend_from_slice(
858                         BlkResp {
859                             status: RespStatus::OK,
860                         }
861                         .as_bytes(),
862                     );
863 
864                     response
865                 }));
866         });
867 
868         let mut id = [0; 20];
869         let length = blk.device_id(&mut id).unwrap();
870         assert_eq!(&id[0..length], b"device_id");
871 
872         handle.join().unwrap();
873     }
874 }
875