• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! A fake implementation of `Transport` for unit tests.
2 
3 use super::{DeviceStatus, DeviceType, Transport};
4 use crate::{
5     queue::{fake_read_write_queue, Descriptor},
6     Error, PhysAddr,
7 };
8 use alloc::{sync::Arc, vec::Vec};
9 use core::{
10     fmt::{self, Debug, Formatter},
11     sync::atomic::{AtomicBool, Ordering},
12     time::Duration,
13 };
14 use std::{sync::Mutex, thread};
15 use zerocopy::{FromBytes, Immutable, IntoBytes};
16 
17 /// A fake implementation of [`Transport`] for unit tests.
18 #[derive(Debug)]
19 pub struct FakeTransport<C> {
20     /// The type of device which the transport should claim to be for.
21     pub device_type: DeviceType,
22     /// The maximum queue size supported by the transport.
23     pub max_queue_size: u32,
24     /// The device features which should be reported by the transport.
25     pub device_features: u64,
26     /// The mutable state of the transport.
27     pub state: Arc<Mutex<State<C>>>,
28 }
29 
30 impl<C: FromBytes + Immutable + IntoBytes> Transport for FakeTransport<C> {
device_type(&self) -> DeviceType31     fn device_type(&self) -> DeviceType {
32         self.device_type
33     }
34 
read_device_features(&mut self) -> u6435     fn read_device_features(&mut self) -> u64 {
36         self.device_features
37     }
38 
write_driver_features(&mut self, driver_features: u64)39     fn write_driver_features(&mut self, driver_features: u64) {
40         self.state.lock().unwrap().driver_features = driver_features;
41     }
42 
max_queue_size(&mut self, _queue: u16) -> u3243     fn max_queue_size(&mut self, _queue: u16) -> u32 {
44         self.max_queue_size
45     }
46 
notify(&mut self, queue: u16)47     fn notify(&mut self, queue: u16) {
48         self.state.lock().unwrap().queues[queue as usize]
49             .notified
50             .store(true, Ordering::SeqCst);
51     }
52 
get_status(&self) -> DeviceStatus53     fn get_status(&self) -> DeviceStatus {
54         self.state.lock().unwrap().status
55     }
56 
set_status(&mut self, status: DeviceStatus)57     fn set_status(&mut self, status: DeviceStatus) {
58         self.state.lock().unwrap().status = status;
59     }
60 
set_guest_page_size(&mut self, guest_page_size: u32)61     fn set_guest_page_size(&mut self, guest_page_size: u32) {
62         self.state.lock().unwrap().guest_page_size = guest_page_size;
63     }
64 
requires_legacy_layout(&self) -> bool65     fn requires_legacy_layout(&self) -> bool {
66         false
67     }
68 
queue_set( &mut self, queue: u16, size: u32, descriptors: PhysAddr, driver_area: PhysAddr, device_area: PhysAddr, )69     fn queue_set(
70         &mut self,
71         queue: u16,
72         size: u32,
73         descriptors: PhysAddr,
74         driver_area: PhysAddr,
75         device_area: PhysAddr,
76     ) {
77         let mut state = self.state.lock().unwrap();
78         state.queues[queue as usize].size = size;
79         state.queues[queue as usize].descriptors = descriptors;
80         state.queues[queue as usize].driver_area = driver_area;
81         state.queues[queue as usize].device_area = device_area;
82     }
83 
queue_unset(&mut self, queue: u16)84     fn queue_unset(&mut self, queue: u16) {
85         let mut state = self.state.lock().unwrap();
86         state.queues[queue as usize].size = 0;
87         state.queues[queue as usize].descriptors = 0;
88         state.queues[queue as usize].driver_area = 0;
89         state.queues[queue as usize].device_area = 0;
90     }
91 
queue_used(&mut self, queue: u16) -> bool92     fn queue_used(&mut self, queue: u16) -> bool {
93         self.state.lock().unwrap().queues[queue as usize].descriptors != 0
94     }
95 
ack_interrupt(&mut self) -> bool96     fn ack_interrupt(&mut self) -> bool {
97         let mut state = self.state.lock().unwrap();
98         let pending = state.interrupt_pending;
99         if pending {
100             state.interrupt_pending = false;
101         }
102         pending
103     }
104 
read_config_generation(&self) -> u32105     fn read_config_generation(&self) -> u32 {
106         self.state.lock().unwrap().config_generation
107     }
108 
read_config_space<T: FromBytes>(&self, offset: usize) -> Result<T, Error>109     fn read_config_space<T: FromBytes>(&self, offset: usize) -> Result<T, Error> {
110         assert!(align_of::<T>() <= 4,
111             "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
112             align_of::<T>());
113         assert!(offset % align_of::<T>() == 0);
114 
115         if size_of::<C>() < offset + size_of::<T>() {
116             Err(Error::ConfigSpaceTooSmall)
117         } else {
118             let state = self.state.lock().unwrap();
119             let bytes = &state.config_space.as_bytes()[offset..offset + size_of::<T>()];
120             Ok(T::read_from_bytes(bytes).unwrap())
121         }
122     }
123 
write_config_space<T: Immutable + IntoBytes>( &mut self, offset: usize, value: T, ) -> Result<(), Error>124     fn write_config_space<T: Immutable + IntoBytes>(
125         &mut self,
126         offset: usize,
127         value: T,
128     ) -> Result<(), Error> {
129         assert!(align_of::<T>() <= 4,
130             "Driver expected config space alignment of {} bytes, but VirtIO only guarantees 4 byte alignment.",
131             align_of::<T>());
132         assert!(offset % align_of::<T>() == 0);
133 
134         if size_of::<C>() < offset + size_of::<T>() {
135             Err(Error::ConfigSpaceTooSmall)
136         } else {
137             let mut state = self.state.lock().unwrap();
138             let bytes = &mut state.config_space.as_mut_bytes()[offset..offset + size_of::<T>()];
139             value.write_to(bytes).unwrap();
140             Ok(())
141         }
142     }
143 }
144 
145 /// The mutable state of a fake transport.
146 pub struct State<C> {
147     /// The status of the fake device.
148     pub status: DeviceStatus,
149     /// The features which the driver says it supports.
150     pub driver_features: u64,
151     /// The guest page size set by the driver.
152     pub guest_page_size: u32,
153     /// Whether the transport has an interrupt pending.
154     pub interrupt_pending: bool,
155     /// The state of the transport's queues.
156     pub queues: Vec<QueueStatus>,
157     /// The config generation which the transport should report.
158     pub config_generation: u32,
159     /// The state of the transport's VirtIO configuration space.
160     pub config_space: C,
161 }
162 
163 impl<C> Debug for State<C> {
fmt(&self, f: &mut Formatter) -> fmt::Result164     fn fmt(&self, f: &mut Formatter) -> fmt::Result {
165         f.debug_struct("State")
166             .field("status", &self.status)
167             .field("driver_features", &self.driver_features)
168             .field("guest_page_size", &self.guest_page_size)
169             .field("interrupt_pending", &self.interrupt_pending)
170             .field("queues", &self.queues)
171             .field("config_generation", &self.config_generation)
172             .field("config_space", &"...")
173             .finish()
174     }
175 }
176 
177 impl<C> State<C> {
178     /// Creates a state for a fake transport, with the given queues and VirtIO configuration space.
new(queues: Vec<QueueStatus>, config_space: C) -> Self179     pub const fn new(queues: Vec<QueueStatus>, config_space: C) -> Self {
180         Self {
181             status: DeviceStatus::empty(),
182             driver_features: 0,
183             guest_page_size: 0,
184             interrupt_pending: false,
185             queues,
186             config_generation: 0,
187             config_space,
188         }
189     }
190 
191     /// Simulates the device writing to the given queue.
192     ///
193     /// The fake device always uses descriptors in order.
write_to_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16, data: &[u8])194     pub fn write_to_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16, data: &[u8]) {
195         let queue = &self.queues[queue_index as usize];
196         assert_ne!(queue.descriptors, 0);
197         assert!(fake_read_write_queue(
198             queue.descriptors as *const [Descriptor; QUEUE_SIZE],
199             queue.driver_area as *const u8,
200             queue.device_area as *mut u8,
201             |input| {
202                 assert_eq!(input, Vec::new());
203                 data.to_owned()
204             },
205         ));
206     }
207 
208     /// Simulates the device reading from the given queue.
209     ///
210     /// Data is read into the `data` buffer passed in. Returns the number of bytes actually read.
211     ///
212     /// The fake device always uses descriptors in order.
read_from_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16) -> Vec<u8>213     pub fn read_from_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16) -> Vec<u8> {
214         let queue = &self.queues[queue_index as usize];
215         assert_ne!(queue.descriptors, 0);
216 
217         let mut ret = None;
218 
219         // Read data from the queue but don't write any response.
220         assert!(fake_read_write_queue(
221             queue.descriptors as *const [Descriptor; QUEUE_SIZE],
222             queue.driver_area as *const u8,
223             queue.device_area as *mut u8,
224             |input| {
225                 ret = Some(input);
226                 Vec::new()
227             },
228         ));
229 
230         ret.unwrap()
231     }
232 
233     /// Simulates the device reading data from the given queue and then writing a response back.
234     ///
235     /// The fake device always uses descriptors in order.
236     ///
237     /// Returns true if a descriptor chain was available and processed, or false if no descriptors were
238     /// available.
read_write_queue<const QUEUE_SIZE: usize>( &mut self, queue_index: u16, handler: impl FnOnce(Vec<u8>) -> Vec<u8>, ) -> bool239     pub fn read_write_queue<const QUEUE_SIZE: usize>(
240         &mut self,
241         queue_index: u16,
242         handler: impl FnOnce(Vec<u8>) -> Vec<u8>,
243     ) -> bool {
244         let queue = &self.queues[queue_index as usize];
245         assert_ne!(queue.descriptors, 0);
246         fake_read_write_queue(
247             queue.descriptors as *const [Descriptor; QUEUE_SIZE],
248             queue.driver_area as *const u8,
249             queue.device_area as *mut u8,
250             handler,
251         )
252     }
253 
254     /// Waits until the given queue is notified.
wait_until_queue_notified(state: &Mutex<Self>, queue_index: u16)255     pub fn wait_until_queue_notified(state: &Mutex<Self>, queue_index: u16) {
256         while !Self::poll_queue_notified(state, queue_index) {
257             thread::sleep(Duration::from_millis(10));
258         }
259     }
260 
261     /// Checks if the given queue has been notified.
262     ///
263     /// If it has, returns true and resets the status so this will return false until it is notified
264     /// again.
poll_queue_notified(state: &Mutex<Self>, queue_index: u16) -> bool265     pub fn poll_queue_notified(state: &Mutex<Self>, queue_index: u16) -> bool {
266         state.lock().unwrap().queues[usize::from(queue_index)]
267             .notified
268             .swap(false, Ordering::SeqCst)
269     }
270 }
271 
272 /// The status of a fake virtqueue.
273 #[derive(Debug, Default)]
274 pub struct QueueStatus {
275     /// The size of the fake virtqueue.
276     pub size: u32,
277     /// The physical address set for the queue's descriptors.
278     pub descriptors: PhysAddr,
279     /// The physical address set for the queue's driver area.
280     pub driver_area: PhysAddr,
281     /// The physical address set for the queue's device area.
282     pub device_area: PhysAddr,
283     /// Whether the queue has been notified by the driver since last we checked.
284     pub notified: AtomicBool,
285 }
286