1 use super::{DeviceStatus, DeviceType, Transport}; 2 use crate::{ 3 queue::{fake_read_write_queue, Descriptor}, 4 PhysAddr, Result, 5 }; 6 use alloc::{sync::Arc, vec::Vec}; 7 use core::{any::TypeId, ptr::NonNull}; 8 use std::sync::Mutex; 9 10 /// A fake implementation of [`Transport`] for unit tests. 11 #[derive(Debug)] 12 pub struct FakeTransport<C: 'static> { 13 pub device_type: DeviceType, 14 pub max_queue_size: u32, 15 pub device_features: u64, 16 pub config_space: NonNull<C>, 17 pub state: Arc<Mutex<State>>, 18 } 19 20 impl<C> Transport for FakeTransport<C> { device_type(&self) -> DeviceType21 fn device_type(&self) -> DeviceType { 22 self.device_type 23 } 24 read_device_features(&mut self) -> u6425 fn read_device_features(&mut self) -> u64 { 26 self.device_features 27 } 28 write_driver_features(&mut self, driver_features: u64)29 fn write_driver_features(&mut self, driver_features: u64) { 30 self.state.lock().unwrap().driver_features = driver_features; 31 } 32 max_queue_size(&self) -> u3233 fn max_queue_size(&self) -> u32 { 34 self.max_queue_size 35 } 36 notify(&mut self, queue: u16)37 fn notify(&mut self, queue: u16) { 38 self.state.lock().unwrap().queues[queue as usize].notified = true; 39 } 40 get_status(&self) -> DeviceStatus41 fn get_status(&self) -> DeviceStatus { 42 self.state.lock().unwrap().status 43 } 44 set_status(&mut self, status: DeviceStatus)45 fn set_status(&mut self, status: DeviceStatus) { 46 self.state.lock().unwrap().status = status; 47 } 48 set_guest_page_size(&mut self, guest_page_size: u32)49 fn set_guest_page_size(&mut self, guest_page_size: u32) { 50 self.state.lock().unwrap().guest_page_size = guest_page_size; 51 } 52 requires_legacy_layout(&self) -> bool53 fn requires_legacy_layout(&self) -> bool { 54 false 55 } 56 queue_set( &mut self, queue: u16, size: u32, descriptors: PhysAddr, driver_area: PhysAddr, device_area: PhysAddr, )57 fn queue_set( 58 &mut self, 59 queue: u16, 60 size: u32, 61 descriptors: PhysAddr, 62 driver_area: PhysAddr, 63 device_area: PhysAddr, 64 ) { 65 let mut state = self.state.lock().unwrap(); 66 state.queues[queue as usize].size = size; 67 state.queues[queue as usize].descriptors = descriptors; 68 state.queues[queue as usize].driver_area = driver_area; 69 state.queues[queue as usize].device_area = device_area; 70 } 71 queue_unset(&mut self, queue: u16)72 fn queue_unset(&mut self, queue: u16) { 73 let mut state = self.state.lock().unwrap(); 74 state.queues[queue as usize].size = 0; 75 state.queues[queue as usize].descriptors = 0; 76 state.queues[queue as usize].driver_area = 0; 77 state.queues[queue as usize].device_area = 0; 78 } 79 queue_used(&mut self, queue: u16) -> bool80 fn queue_used(&mut self, queue: u16) -> bool { 81 self.state.lock().unwrap().queues[queue as usize].descriptors != 0 82 } 83 ack_interrupt(&mut self) -> bool84 fn ack_interrupt(&mut self) -> bool { 85 let mut state = self.state.lock().unwrap(); 86 let pending = state.interrupt_pending; 87 if pending { 88 state.interrupt_pending = false; 89 } 90 pending 91 } 92 config_space<T: 'static>(&self) -> Result<NonNull<T>>93 fn config_space<T: 'static>(&self) -> Result<NonNull<T>> { 94 if TypeId::of::<T>() == TypeId::of::<C>() { 95 Ok(self.config_space.cast()) 96 } else { 97 panic!("Unexpected config space type."); 98 } 99 } 100 } 101 102 #[derive(Debug, Default)] 103 pub struct State { 104 pub status: DeviceStatus, 105 pub driver_features: u64, 106 pub guest_page_size: u32, 107 pub interrupt_pending: bool, 108 pub queues: Vec<QueueStatus>, 109 } 110 111 impl State { 112 /// Simulates the device writing to the given queue. 113 /// 114 /// The fake device always uses descriptors in order. write_to_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16, data: &[u8])115 pub fn write_to_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16, data: &[u8]) { 116 let queue = &self.queues[queue_index as usize]; 117 assert_ne!(queue.descriptors, 0); 118 fake_read_write_queue( 119 queue.descriptors as *const [Descriptor; QUEUE_SIZE], 120 queue.driver_area as *const u8, 121 queue.device_area as *mut u8, 122 |input| { 123 assert_eq!(input, Vec::new()); 124 data.to_owned() 125 }, 126 ); 127 } 128 129 /// Simulates the device reading from the given queue. 130 /// 131 /// Data is read into the `data` buffer passed in. Returns the number of bytes actually read. 132 /// 133 /// The fake device always uses descriptors in order. read_from_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16) -> Vec<u8>134 pub fn read_from_queue<const QUEUE_SIZE: usize>(&mut self, queue_index: u16) -> Vec<u8> { 135 let queue = &self.queues[queue_index as usize]; 136 assert_ne!(queue.descriptors, 0); 137 138 let mut ret = None; 139 140 // Read data from the queue but don't write any response. 141 fake_read_write_queue( 142 queue.descriptors as *const [Descriptor; QUEUE_SIZE], 143 queue.driver_area as *const u8, 144 queue.device_area as *mut u8, 145 |input| { 146 ret = Some(input); 147 Vec::new() 148 }, 149 ); 150 151 ret.unwrap() 152 } 153 154 /// Simulates the device reading data from the given queue and then writing a response back. 155 /// 156 /// The fake device always uses descriptors in order. read_write_queue<const QUEUE_SIZE: usize>( &mut self, queue_index: u16, handler: impl FnOnce(Vec<u8>) -> Vec<u8>, )157 pub fn read_write_queue<const QUEUE_SIZE: usize>( 158 &mut self, 159 queue_index: u16, 160 handler: impl FnOnce(Vec<u8>) -> Vec<u8>, 161 ) { 162 let queue = &self.queues[queue_index as usize]; 163 assert_ne!(queue.descriptors, 0); 164 fake_read_write_queue( 165 queue.descriptors as *const [Descriptor; QUEUE_SIZE], 166 queue.driver_area as *const u8, 167 queue.device_area as *mut u8, 168 handler, 169 ) 170 } 171 } 172 173 #[derive(Clone, Debug, Default, Eq, PartialEq)] 174 pub struct QueueStatus { 175 pub size: u32, 176 pub descriptors: PhysAddr, 177 pub driver_area: PhysAddr, 178 pub device_area: PhysAddr, 179 pub notified: bool, 180 } 181