• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use super::{Config, EthernetAddress, Features, VirtioNetHdr};
2 use super::{MIN_BUFFER_LEN, NET_HDR_SIZE, QUEUE_RECEIVE, QUEUE_TRANSMIT, SUPPORTED_FEATURES};
3 use crate::config::read_config;
4 use crate::hal::Hal;
5 use crate::queue::VirtQueue;
6 use crate::transport::Transport;
7 use crate::{Error, Result};
8 use log::{debug, info, warn};
9 use zerocopy::IntoBytes;
10 
11 /// Raw driver for a VirtIO network device.
12 ///
13 /// This is a raw version of the VirtIONet driver. It provides non-blocking
14 /// methods for transmitting and receiving raw slices, without the buffer
15 /// management. For more higher-level functions such as receive buffer backing,
16 /// see [`VirtIONet`].
17 ///
18 /// [`VirtIONet`]: super::VirtIONet
19 pub struct VirtIONetRaw<H: Hal, T: Transport, const QUEUE_SIZE: usize> {
20     transport: T,
21     mac: EthernetAddress,
22     recv_queue: VirtQueue<H, QUEUE_SIZE>,
23     send_queue: VirtQueue<H, QUEUE_SIZE>,
24 }
25 
26 impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> VirtIONetRaw<H, T, QUEUE_SIZE> {
27     /// Create a new VirtIO-Net driver.
new(mut transport: T) -> Result<Self>28     pub fn new(mut transport: T) -> Result<Self> {
29         let negotiated_features = transport.begin_init(SUPPORTED_FEATURES);
30         info!("negotiated_features {:?}", negotiated_features);
31 
32         // Read configuration space.
33         let mac = transport.read_consistent(|| read_config!(transport, Config, mac))?;
34         let status = read_config!(transport, Config, status)?;
35         debug!("Got MAC={:02x?}, status={:?}", mac, status);
36 
37         let send_queue = VirtQueue::new(
38             &mut transport,
39             QUEUE_TRANSMIT,
40             negotiated_features.contains(Features::RING_INDIRECT_DESC),
41             negotiated_features.contains(Features::RING_EVENT_IDX),
42         )?;
43         let recv_queue = VirtQueue::new(
44             &mut transport,
45             QUEUE_RECEIVE,
46             negotiated_features.contains(Features::RING_INDIRECT_DESC),
47             negotiated_features.contains(Features::RING_EVENT_IDX),
48         )?;
49 
50         transport.finish_init();
51 
52         Ok(VirtIONetRaw {
53             transport,
54             mac,
55             recv_queue,
56             send_queue,
57         })
58     }
59 
60     /// Acknowledge interrupt.
ack_interrupt(&mut self) -> bool61     pub fn ack_interrupt(&mut self) -> bool {
62         self.transport.ack_interrupt()
63     }
64 
65     /// Disable interrupts.
disable_interrupts(&mut self)66     pub fn disable_interrupts(&mut self) {
67         self.send_queue.set_dev_notify(false);
68         self.recv_queue.set_dev_notify(false);
69     }
70 
71     /// Enable interrupts.
enable_interrupts(&mut self)72     pub fn enable_interrupts(&mut self) {
73         self.send_queue.set_dev_notify(true);
74         self.recv_queue.set_dev_notify(true);
75     }
76 
77     /// Get MAC address.
mac_address(&self) -> EthernetAddress78     pub fn mac_address(&self) -> EthernetAddress {
79         self.mac
80     }
81 
82     /// Whether can send packet.
can_send(&self) -> bool83     pub fn can_send(&self) -> bool {
84         self.send_queue.available_desc() >= 2
85     }
86 
87     /// Whether the length of the receive buffer is valid.
check_rx_buf_len(rx_buf: &[u8]) -> Result<()>88     fn check_rx_buf_len(rx_buf: &[u8]) -> Result<()> {
89         if rx_buf.len() < MIN_BUFFER_LEN {
90             warn!("Receive buffer len {} is too small", rx_buf.len());
91             Err(Error::InvalidParam)
92         } else {
93             Ok(())
94         }
95     }
96 
97     /// Whether the length of the transmit buffer is valid.
check_tx_buf_len(tx_buf: &[u8]) -> Result<()>98     fn check_tx_buf_len(tx_buf: &[u8]) -> Result<()> {
99         if tx_buf.len() < NET_HDR_SIZE {
100             warn!("Transmit buffer len {} is too small", tx_buf.len());
101             Err(Error::InvalidParam)
102         } else {
103             Ok(())
104         }
105     }
106 
107     /// Fill the header of the `buffer` with [`VirtioNetHdr`].
108     ///
109     /// If the `buffer` is not large enough, it returns [`Error::InvalidParam`].
fill_buffer_header(&self, buffer: &mut [u8]) -> Result<usize>110     pub fn fill_buffer_header(&self, buffer: &mut [u8]) -> Result<usize> {
111         if buffer.len() < NET_HDR_SIZE {
112             return Err(Error::InvalidParam);
113         }
114         let header = VirtioNetHdr::default();
115         buffer[..NET_HDR_SIZE].copy_from_slice(header.as_bytes());
116         Ok(NET_HDR_SIZE)
117     }
118 
119     /// Submits a request to transmit a buffer immediately without waiting for
120     /// the transmission to complete.
121     ///
122     /// It will submit request to the VirtIO net device and return a token
123     /// identifying the position of the first descriptor in the chain. If there
124     /// are not enough descriptors to allocate, then it returns
125     /// [`Error::QueueFull`].
126     ///
127     /// The caller needs to fill the `tx_buf` with a header by calling
128     /// [`fill_buffer_header`] before transmission. Then it calls [`poll_transmit`]
129     /// with the returned token to check whether the device has finished handling
130     /// the request. Once it has, the caller must call [`transmit_complete`] with
131     /// the same buffer before reading the result (transmitted length).
132     ///
133     /// # Safety
134     ///
135     /// `tx_buf` is still borrowed by the underlying VirtIO net device even after
136     /// this method returns. Thus, it is the caller's responsibility to guarantee
137     /// that they are not accessed before the request is completed in order to
138     /// avoid data races.
139     ///
140     /// [`fill_buffer_header`]: Self::fill_buffer_header
141     /// [`poll_transmit`]: Self::poll_transmit
142     /// [`transmit_complete`]: Self::transmit_complete
transmit_begin(&mut self, tx_buf: &[u8]) -> Result<u16>143     pub unsafe fn transmit_begin(&mut self, tx_buf: &[u8]) -> Result<u16> {
144         Self::check_tx_buf_len(tx_buf)?;
145         let token = self.send_queue.add(&[tx_buf], &mut [])?;
146         if self.send_queue.should_notify() {
147             self.transport.notify(QUEUE_TRANSMIT);
148         }
149         Ok(token)
150     }
151 
152     /// Fetches the token of the next completed transmission request from the
153     /// used ring and returns it, without removing it from the used ring. If
154     /// there are no pending completed requests it returns [`None`].
poll_transmit(&mut self) -> Option<u16>155     pub fn poll_transmit(&mut self) -> Option<u16> {
156         self.send_queue.peek_used()
157     }
158 
159     /// Completes a transmission operation which was started by [`transmit_begin`].
160     /// Returns number of bytes transmitted.
161     ///
162     /// # Safety
163     ///
164     /// The same buffer must be passed in again as was passed to
165     /// [`transmit_begin`] when it returned the token.
166     ///
167     /// [`transmit_begin`]: Self::transmit_begin
transmit_complete(&mut self, token: u16, tx_buf: &[u8]) -> Result<usize>168     pub unsafe fn transmit_complete(&mut self, token: u16, tx_buf: &[u8]) -> Result<usize> {
169         let len = self.send_queue.pop_used(token, &[tx_buf], &mut [])?;
170         Ok(len as usize)
171     }
172 
173     /// Submits a request to receive a buffer immediately without waiting for
174     /// the reception to complete.
175     ///
176     /// It will submit request to the VirtIO net device and return a token
177     /// identifying the position of the first descriptor in the chain. If there
178     /// are not enough descriptors to allocate, then it returns
179     /// [`Error::QueueFull`].
180     ///
181     /// The caller can then call [`poll_receive`] with the returned token to
182     /// check whether the device has finished handling the request. Once it has,
183     /// the caller must call [`receive_complete`] with the same buffer before
184     /// reading the response.
185     ///
186     /// # Safety
187     ///
188     /// `rx_buf` is still borrowed by the underlying VirtIO net device even after
189     /// this method returns. Thus, it is the caller's responsibility to guarantee
190     /// that they are not accessed before the request is completed in order to
191     /// avoid data races.
192     ///
193     /// [`poll_receive`]: Self::poll_receive
194     /// [`receive_complete`]: Self::receive_complete
receive_begin(&mut self, rx_buf: &mut [u8]) -> Result<u16>195     pub unsafe fn receive_begin(&mut self, rx_buf: &mut [u8]) -> Result<u16> {
196         Self::check_rx_buf_len(rx_buf)?;
197         let token = self.recv_queue.add(&[], &mut [rx_buf])?;
198         if self.recv_queue.should_notify() {
199             self.transport.notify(QUEUE_RECEIVE);
200         }
201         Ok(token)
202     }
203 
204     /// Fetches the token of the next completed reception request from the
205     /// used ring and returns it, without removing it from the used ring. If
206     /// there are no pending completed requests it returns [`None`].
poll_receive(&self) -> Option<u16>207     pub fn poll_receive(&self) -> Option<u16> {
208         self.recv_queue.peek_used()
209     }
210 
211     /// Completes a transmission operation which was started by [`receive_begin`].
212     ///
213     /// After completion, the `rx_buf` will contain a header followed by the
214     /// received packet. It returns the length of the header and the length of
215     /// the packet.
216     ///
217     /// # Safety
218     ///
219     /// The same buffer must be passed in again as was passed to
220     /// [`receive_begin`] when it returned the token.
221     ///
222     /// [`receive_begin`]: Self::receive_begin
receive_complete( &mut self, token: u16, rx_buf: &mut [u8], ) -> Result<(usize, usize)>223     pub unsafe fn receive_complete(
224         &mut self,
225         token: u16,
226         rx_buf: &mut [u8],
227     ) -> Result<(usize, usize)> {
228         let len = self.recv_queue.pop_used(token, &[], &mut [rx_buf])? as usize;
229         let packet_len = len.checked_sub(NET_HDR_SIZE).ok_or(Error::IoError)?;
230         Ok((NET_HDR_SIZE, packet_len))
231     }
232 
233     /// Sends a packet to the network, and blocks until the request completed.
send(&mut self, tx_buf: &[u8]) -> Result234     pub fn send(&mut self, tx_buf: &[u8]) -> Result {
235         let header = VirtioNetHdr::default();
236         if tx_buf.is_empty() {
237             // Special case sending an empty packet, to avoid adding an empty buffer to the
238             // virtqueue.
239             self.send_queue.add_notify_wait_pop(
240                 &[header.as_bytes()],
241                 &mut [],
242                 &mut self.transport,
243             )?;
244         } else {
245             self.send_queue.add_notify_wait_pop(
246                 &[header.as_bytes(), tx_buf],
247                 &mut [],
248                 &mut self.transport,
249             )?;
250         }
251         Ok(())
252     }
253 
254     /// Blocks and waits for a packet to be received.
255     ///
256     /// After completion, the `rx_buf` will contain a header followed by the
257     /// received packet. It returns the length of the header and the length of
258     /// the packet.
receive_wait(&mut self, rx_buf: &mut [u8]) -> Result<(usize, usize)>259     pub fn receive_wait(&mut self, rx_buf: &mut [u8]) -> Result<(usize, usize)> {
260         let token = unsafe { self.receive_begin(rx_buf)? };
261         while self.poll_receive().is_none() {
262             core::hint::spin_loop();
263         }
264         unsafe { self.receive_complete(token, rx_buf) }
265     }
266 }
267 
268 impl<H: Hal, T: Transport, const QUEUE_SIZE: usize> Drop for VirtIONetRaw<H, T, QUEUE_SIZE> {
drop(&mut self)269     fn drop(&mut self) {
270         // Clear any pointers pointing to DMA regions, so the device doesn't try to access them
271         // after they have been freed.
272         self.transport.queue_unset(QUEUE_RECEIVE);
273         self.transport.queue_unset(QUEUE_TRANSMIT);
274     }
275 }
276