• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::io;
6 use std::io::Read;
7 use std::io::Write;
8 use std::result;
9 use std::sync::Arc;
10 use std::sync::MutexGuard;
11 
12 use base::error;
13 use base::named_pipes::OverlappedWrapper;
14 use base::warn;
15 use base::Event;
16 use base::ReadNotifier;
17 use base::WaitContext;
18 use libc::EEXIST;
19 use net_util::TapT;
20 use sync::Mutex;
21 use virtio_sys::virtio_net;
22 use vm_memory::GuestMemory;
23 
24 use super::super::super::base_features;
25 use super::super::super::net::Net;
26 use super::super::super::net::NetError;
27 use super::super::super::net::Token;
28 use super::super::super::net::Worker;
29 use super::super::super::net::MAX_BUFFER_SIZE;
30 use super::super::super::Interrupt;
31 use super::super::super::ProtectionType;
32 use super::super::super::Queue;
33 use super::super::super::Reader;
34 
35 // This file should not be included at virtio mod level if slirp is not include. In case it is,
36 // throw a user friendly message.
37 #[cfg(not(feature = "slirp"))]
38 compile_error!("Net device without slirp not supported on windows");
39 
validate_and_configure_tap<T: TapT>(_tap: &T, _vq_pairs: u16) -> Result<(), NetError>40 pub fn validate_and_configure_tap<T: TapT>(_tap: &T, _vq_pairs: u16) -> Result<(), NetError> {
41     // No-op for slirp on windows
42     Ok(())
43 }
44 
virtio_features_to_tap_offload(_features: u64) -> u3245 pub fn virtio_features_to_tap_offload(_features: u64) -> u32 {
46     // slirp does not support offloads
47     0
48 }
49 
50 // Copies a single frame from `self.rx_buf` into the guest. Returns true
51 // if a buffer was used, and false if the frame must be deferred until a buffer
52 // is made available by the driver.
rx_single_frame(rx_queue: &mut Queue, rx_buf: &mut [u8], rx_count: usize) -> bool53 fn rx_single_frame(rx_queue: &mut Queue, rx_buf: &mut [u8], rx_count: usize) -> bool {
54     let mut desc_chain = match rx_queue.pop() {
55         Some(desc) => desc,
56         None => return false,
57     };
58 
59     match desc_chain.writer.write_all(&rx_buf[0..rx_count]) {
60         Ok(()) => (),
61         Err(ref e) if e.kind() == io::ErrorKind::WriteZero => {
62             warn!(
63                 "net: rx: buffer is too small to hold frame of size {}",
64                 rx_count
65             );
66         }
67         Err(e) => {
68             warn!("net: rx: failed to write slice: {}", e);
69         }
70     };
71 
72     let bytes_written = desc_chain.writer.bytes_written() as u32;
73 
74     rx_queue.add_used(desc_chain, bytes_written);
75 
76     true
77 }
78 
process_rx<T: TapT>( interrupt: &Interrupt, rx_queue: &mut Queue, tap: &mut T, rx_buf: &mut [u8], deferred_rx: &mut bool, rx_count: &mut usize, overlapped_wrapper: &mut OverlappedWrapper, ) -> bool79 pub fn process_rx<T: TapT>(
80     interrupt: &Interrupt,
81     rx_queue: &mut Queue,
82     tap: &mut T,
83     rx_buf: &mut [u8],
84     deferred_rx: &mut bool,
85     rx_count: &mut usize,
86     overlapped_wrapper: &mut OverlappedWrapper,
87 ) -> bool {
88     let mut needs_interrupt = false;
89     let mut first_frame = true;
90 
91     // Read as many frames as possible.
92     loop {
93         let res = if *deferred_rx {
94             // The existing buffer still needs to be sent to the rx queue.
95             Ok(*rx_count)
96         } else {
97             tap.try_read_result(overlapped_wrapper)
98         };
99         match res {
100             Ok(count) => {
101                 *rx_count = count;
102                 if !rx_single_frame(rx_queue, rx_buf, *rx_count) {
103                     *deferred_rx = true;
104                     break;
105                 } else if first_frame {
106                     interrupt.signal_used_queue(rx_queue.vector());
107                     first_frame = false;
108                 } else {
109                     needs_interrupt = true;
110                 }
111 
112                 // SAFETY: safe because rx_buf & overlapped_wrapper live until
113                 // the overlapped operation completes and are not used in any
114                 // other operations until that time.
115                 match unsafe { tap.read_overlapped(rx_buf, overlapped_wrapper) } {
116                     Err(e) if e.kind() == std::io::ErrorKind::BrokenPipe => {
117                         warn!("net: rx: read_overlapped failed: {}", e);
118                         break;
119                     }
120                     Err(e) => {
121                         panic!("read_overlapped failed: {}", e);
122                     }
123                     _ => {}
124                 }
125 
126                 // We were able to dispatch a frame to the guest, so we can resume normal RX
127                 // service.
128                 *deferred_rx = false;
129             }
130             Err(e) => {
131                 // `try_read_result()` shouldn't return any error other than
132                 // `ERROR_IO_INCOMPLETE`. If it does, we need to retry the
133                 // overlapped operation.
134                 if e.kind() != std::io::ErrorKind::WouldBlock {
135                     warn!("net: rx: failed to read tap: {}", e);
136                     // SAFETY: safe because rx_buf & overlapped_wrapper live until
137                     // the overlapped operation completes and are not used in any
138                     // other operations until that time.
139                     match unsafe { tap.read_overlapped(rx_buf, overlapped_wrapper) } {
140                         Err(e) if e.kind() == std::io::ErrorKind::BrokenPipe => {
141                             warn!("net: rx: read_overlapped failed: {}", e);
142                             break;
143                         }
144                         Err(e) => {
145                             panic!("read_overlapped failed: {}", e);
146                         }
147                         _ => {}
148                     }
149                 }
150                 break;
151             }
152         }
153     }
154 
155     needs_interrupt
156 }
157 
process_tx<T: TapT>(interrupt: &Interrupt, tx_queue: &mut Queue, tap: &mut T)158 pub fn process_tx<T: TapT>(interrupt: &Interrupt, tx_queue: &mut Queue, tap: &mut T) {
159     // Reads up to `buf.len()` bytes or until there is no more data in `r`, whichever
160     // is smaller.
161     fn read_to_end(r: &mut Reader, buf: &mut [u8]) -> io::Result<usize> {
162         let mut count = 0;
163         while count < buf.len() {
164             match r.read(&mut buf[count..]) {
165                 Ok(0) => break,
166                 Ok(n) => count += n,
167                 Err(e) => return Err(e),
168             }
169         }
170 
171         Ok(count)
172     }
173 
174     while let Some(mut desc_chain) = tx_queue.pop() {
175         let mut frame = [0u8; MAX_BUFFER_SIZE];
176         match read_to_end(&mut desc_chain.reader, &mut frame[..]) {
177             Ok(len) => {
178                 // We need to copy frame into continuous buffer before writing it to
179                 // slirp because tap requires frame to complete in a single write.
180                 if let Err(err) = tap.write_all(&frame[..len]) {
181                     error!("net: tx: failed to write to tap: {}", err);
182                 }
183             }
184             Err(e) => error!("net: tx: failed to read frame into buffer: {}", e),
185         }
186 
187         tx_queue.add_used(desc_chain, 0);
188     }
189 
190     tx_queue.trigger_interrupt(interrupt);
191 }
192 
193 impl<T> Worker<T>
194 where
195     T: TapT + ReadNotifier,
196 {
process_rx_slirp(&mut self) -> bool197     pub(super) fn process_rx_slirp(&mut self) -> bool {
198         process_rx(
199             &self.interrupt,
200             &mut self.rx_queue,
201             &mut self.tap,
202             &mut self.rx_buf,
203             &mut self.deferred_rx,
204             &mut self.rx_count,
205             &mut self.overlapped_wrapper,
206         )
207     }
208 
handle_rx_token( &mut self, wait_ctx: &WaitContext<Token>, ) -> result::Result<(), NetError>209     pub(in crate::virtio) fn handle_rx_token(
210         &mut self,
211         wait_ctx: &WaitContext<Token>,
212     ) -> result::Result<(), NetError> {
213         let mut needs_interrupt = false;
214         // Process a deferred frame first if available. Don't read from tap again
215         // until we manage to receive this deferred frame.
216         if self.deferred_rx {
217             if rx_single_frame(&mut self.rx_queue, &mut self.rx_buf, self.rx_count) {
218                 self.deferred_rx = false;
219                 needs_interrupt = true;
220             } else {
221                 // There is an outstanding deferred frame and the guest has not yet
222                 // made any buffers available. Remove the tapfd from the poll
223                 // context until more are made available.
224                 wait_ctx
225                     .delete(&self.tap)
226                     .map_err(NetError::EventRemoveTap)?;
227                 return Ok(());
228             }
229         }
230         needs_interrupt |= self.process_rx_slirp();
231         if needs_interrupt {
232             self.interrupt.signal_used_queue(self.rx_queue.vector());
233         }
234         Ok(())
235     }
236 
handle_rx_queue( &mut self, wait_ctx: &WaitContext<Token>, _tap_polling_enabled: bool, ) -> result::Result<(), NetError>237     pub(in crate::virtio) fn handle_rx_queue(
238         &mut self,
239         wait_ctx: &WaitContext<Token>,
240         _tap_polling_enabled: bool,
241     ) -> result::Result<(), NetError> {
242         // There should be a buffer available now to receive the frame into.
243         if self.deferred_rx && rx_single_frame(&mut self.rx_queue, &mut self.rx_buf, self.rx_count)
244         {
245             // The guest has made buffers available, so add the tap back to the
246             // poll context in case it was removed.
247             match wait_ctx.add(&self.tap, Token::RxTap) {
248                 Ok(_) => {}
249                 Err(e) if e.errno() == EEXIST => {}
250                 Err(e) => {
251                     return Err(NetError::EventAddTap(e));
252                 }
253             }
254             self.deferred_rx = false;
255             self.interrupt.signal_used_queue(self.rx_queue.vector());
256         }
257         Ok(())
258     }
259 }
260