1 // Copyright 2022 The Chromium OS Authors. All rights reserved. 2 // SPDX-License-Identifier: Apache-2.0 3 4 //! Structs for Tube based endpoint. Listeners are not used with Tubes, since they are essentially 5 //! fancy socket pairs. 6 7 use std::io::{IoSlice, IoSliceMut}; 8 use std::path::Path; 9 use std::ptr::copy_nonoverlapping; 10 11 use base::{AsRawDescriptor, FromRawDescriptor, RawDescriptor, Tube}; 12 use serde::{Deserialize, Serialize}; 13 14 use super::{Error, Result}; 15 use crate::connection::{Endpoint, Listener, Req}; 16 use std::cmp::min; 17 use std::fs::File; 18 use std::marker::PhantomData; 19 20 #[derive(Serialize, Deserialize)] 21 struct RawDescriptorContainer { 22 #[serde(with = "base::with_raw_descriptor")] 23 rd: RawDescriptor, 24 } 25 26 #[derive(Serialize, Deserialize)] 27 struct EndpointMessage { 28 rds: Vec<RawDescriptorContainer>, 29 data: Vec<u8>, 30 } 31 32 /// No-op for Tubes. Tubes are a socketpair() equivalent and cannot be listened for or connected. 33 pub struct TubeListener; 34 35 impl Listener for TubeListener { 36 type Connection = Tube; 37 accept(&mut self) -> Result<Option<Self::Connection>>38 fn accept(&mut self) -> Result<Option<Self::Connection>> { 39 unimplemented!("listeners for Tubes are not used") 40 } 41 set_nonblocking(&self, _block: bool) -> Result<()>42 fn set_nonblocking(&self, _block: bool) -> Result<()> { 43 unimplemented!("listeners for Tubes are not used") 44 } 45 } 46 47 /// Tube endpoint for vhost-user connection. 48 pub struct TubeEndpoint<R: Req> { 49 tube: Tube, 50 _r: PhantomData<R>, 51 } 52 53 impl<R: Req> From<Tube> for TubeEndpoint<R> { from(tube: Tube) -> Self54 fn from(tube: Tube) -> Self { 55 Self { 56 tube, 57 _r: PhantomData, 58 } 59 } 60 } 61 62 impl<R: Req> Endpoint<R> for TubeEndpoint<R> { 63 type Listener = TubeListener; 64 from_connection(tube: <<Self as Endpoint<R>>::Listener as Listener>::Connection) -> Self65 fn from_connection(tube: <<Self as Endpoint<R>>::Listener as Listener>::Connection) -> Self { 66 Self { 67 tube, 68 _r: PhantomData, 69 } 70 } 71 connect<P: AsRef<Path>>(_path: P) -> Result<Self>72 fn connect<P: AsRef<Path>>(_path: P) -> Result<Self> { 73 unimplemented!("connections not supported on Tubes") 74 } 75 76 /// Sends bytes from scatter-gather vectors with optional attached file descriptors. 77 /// 78 /// # Return: 79 /// * - number of bytes sent on success 80 /// * - TubeError: tube related errors. send_iovec(&mut self, iovs: &[IoSlice], rds: Option<&[RawDescriptor]>) -> Result<usize>81 fn send_iovec(&mut self, iovs: &[IoSlice], rds: Option<&[RawDescriptor]>) -> Result<usize> { 82 // Gather the iovecs 83 let total_bytes = iovs.iter().map(|iov| iov.len()).sum(); 84 let mut data = Vec::with_capacity(total_bytes); 85 for iov in iovs { 86 data.extend(iov.iter()); 87 } 88 89 let mut msg = EndpointMessage { 90 data, 91 rds: Vec::with_capacity(rds.map_or(0, |rds| rds.len())), 92 }; 93 if let Some(rds) = rds { 94 for rd in rds { 95 msg.rds.push(RawDescriptorContainer { rd: *rd }); 96 } 97 } 98 self.tube.send(&msg).map_err(Error::TubeError)?; 99 Ok(total_bytes) 100 } 101 102 /// Reads bytes from the tube into the given scatter/gather vectors with optional attached 103 /// file. 104 /// 105 /// The underlying communication channel is a Tube. Providing too little recv buffer space will 106 /// cause data to get dropped (with an error). This is tricky to fix with Tube backing our 107 /// transport layer, and as far as we can tell, is not exercised in practice. 108 /// 109 /// # Return: 110 /// * - (number of bytes received, [received files]) on success 111 /// * - RecvBufferTooSmall: Input bufs is too small for the received buffer. 112 /// * - TubeError: tube related errors. recv_into_bufs( &mut self, bufs: &mut [IoSliceMut], _allow_rds: bool, ) -> Result<(usize, Option<Vec<File>>)>113 fn recv_into_bufs( 114 &mut self, 115 bufs: &mut [IoSliceMut], 116 _allow_rds: bool, 117 ) -> Result<(usize, Option<Vec<File>>)> { 118 // TODO(b/221882601): implement "allow_rds" 119 120 let msg: EndpointMessage = self.tube.recv().map_err(Error::TubeError)?; 121 122 let files = match msg.rds.len() { 123 0 => None, 124 // Safe because we own r.rd and it is guaranteed valid. 125 _ => Some( 126 msg.rds 127 .iter() 128 .map(|r| unsafe { File::from_raw_descriptor(r.rd) }) 129 .collect::<Vec<File>>(), 130 ), 131 }; 132 133 let mut bytes_read = 0; 134 for dest_iov in bufs.iter_mut() { 135 if bytes_read >= msg.data.len() { 136 // We've read all the available data into the iovecs. 137 break; 138 } 139 140 let copy_count = min(dest_iov.len(), msg.data.len() - bytes_read); 141 142 // Safe because: 143 // 1) msg.data and dest_iov do not overlap. 144 // 2) copy_count is bounded by dest_iov's length and msg.data.len() so we can't 145 // overrun. 146 unsafe { 147 copy_nonoverlapping( 148 msg.data.as_ptr().add(bytes_read), 149 dest_iov.as_mut_ptr(), 150 copy_count, 151 ) 152 }; 153 bytes_read += copy_count; 154 } 155 156 if bytes_read != msg.data.len() { 157 // User didn't supply enough iov space. 158 return Err(Error::RecvBufferTooSmall { 159 got: bytes_read, 160 want: msg.data.len(), 161 }); 162 } 163 164 Ok((bytes_read, files)) 165 } 166 } 167 168 impl<R: Req> AsRawDescriptor for TubeEndpoint<R> { 169 /// WARNING: this function does not return a waitable descriptor! Use base::ReadNotifier 170 /// instead. as_raw_descriptor(&self) -> RawDescriptor171 fn as_raw_descriptor(&self) -> RawDescriptor { 172 self.tube.as_raw_descriptor() 173 } 174 } 175 176 #[cfg(test)] 177 mod tests { 178 use super::*; 179 use crate::connection::EndpointExt; 180 use crate::message::{MasterReq, VhostUserMsgHeader}; 181 use base::{AsRawDescriptor, Tube}; 182 use std::io::{IoSlice, Read, Seek, SeekFrom, Write}; 183 use std::mem; 184 use tempfile::tempfile; 185 create_pair() -> (TubeEndpoint<MasterReq>, TubeEndpoint<MasterReq>)186 fn create_pair() -> (TubeEndpoint<MasterReq>, TubeEndpoint<MasterReq>) { 187 let (master_tube, slave_tube) = Tube::pair().unwrap(); 188 ( 189 TubeEndpoint::<MasterReq>::from_connection(master_tube), 190 TubeEndpoint::<MasterReq>::from_connection(slave_tube), 191 ) 192 } 193 194 #[test] send_data()195 fn send_data() { 196 let (mut master, mut slave) = create_pair(); 197 198 let buf1 = vec![0x1, 0x2, 0x3, 0x4]; 199 let len = master.send_slice(IoSlice::new(&buf1[..]), None).unwrap(); 200 assert_eq!(len, 4); 201 let (bytes, buf2, _) = slave.recv_into_buf(0x1000).unwrap(); 202 assert_eq!(bytes, 4); 203 assert_eq!(&buf1[..], &buf2[..bytes]); 204 } 205 206 #[test] send_fd()207 fn send_fd() { 208 let (mut master, mut slave) = create_pair(); 209 210 let mut file = tempfile().unwrap(); 211 write!(file, "test").unwrap(); 212 213 // Normal case for sending/receiving file descriptors 214 let buf1 = vec![0x1, 0x2, 0x3, 0x4]; 215 let len = master 216 .send_slice(IoSlice::new(&buf1[..]), Some(&[file.as_raw_descriptor()])) 217 .unwrap(); 218 assert_eq!(len, 4); 219 220 let (bytes, buf2, files) = slave.recv_into_buf(4).unwrap(); 221 assert_eq!(bytes, 4); 222 assert_eq!(&buf1[..], &buf2[..]); 223 assert!(files.is_some()); 224 let files = files.unwrap(); 225 { 226 assert_eq!(files.len(), 1); 227 let mut file = &files[0]; 228 let mut content = String::new(); 229 file.seek(SeekFrom::Start(0)).unwrap(); 230 file.read_to_string(&mut content).unwrap(); 231 assert_eq!(content, "test"); 232 } 233 234 // Following communication pattern should work: 235 // Sending side: data, data with fds 236 // Receiving side: data, data with fds 237 let len = master.send_slice(IoSlice::new(&buf1[..]), None).unwrap(); 238 assert_eq!(len, 4); 239 let len = master 240 .send_slice( 241 IoSlice::new(&buf1[..]), 242 Some(&[ 243 file.as_raw_descriptor(), 244 file.as_raw_descriptor(), 245 file.as_raw_descriptor(), 246 ]), 247 ) 248 .unwrap(); 249 assert_eq!(len, 4); 250 251 let (bytes, buf2, files) = slave.recv_into_buf(0x4).unwrap(); 252 assert_eq!(bytes, 4); 253 assert_eq!(&buf1[..], &buf2[..]); 254 assert!(files.is_none()); 255 256 let (bytes, buf2, files) = slave.recv_into_buf(0x4).unwrap(); 257 assert_eq!(bytes, 4); 258 assert_eq!(&buf1[..], &buf2[..]); 259 assert!(files.is_some()); 260 let files = files.unwrap(); 261 { 262 assert_eq!(files.len(), 3); 263 let mut file = &files[1]; 264 let mut content = String::new(); 265 file.seek(SeekFrom::Start(0)).unwrap(); 266 file.read_to_string(&mut content).unwrap(); 267 assert_eq!(content, "test"); 268 } 269 270 // If the target fd array is too small, extra file descriptors will get lost. 271 // 272 // Porting note: no, they won't. The FD array is sized to whatever the header says it 273 // should be. 274 let len = master 275 .send_slice( 276 IoSlice::new(&buf1[..]), 277 Some(&[ 278 file.as_raw_descriptor(), 279 file.as_raw_descriptor(), 280 file.as_raw_descriptor(), 281 ]), 282 ) 283 .unwrap(); 284 assert_eq!(len, 4); 285 286 let (bytes, _, files) = slave.recv_into_buf(0x4).unwrap(); 287 assert_eq!(bytes, 4); 288 assert!(files.is_some()); 289 } 290 291 #[test] send_recv()292 fn send_recv() { 293 let (mut master, mut slave) = create_pair(); 294 295 let mut hdr1 = 296 VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0, mem::size_of::<u64>() as u32); 297 hdr1.set_need_reply(true); 298 let features1 = 0x1u64; 299 master.send_message(&hdr1, &features1, None).unwrap(); 300 301 let mut features2 = 0u64; 302 let slice = unsafe { 303 std::slice::from_raw_parts_mut( 304 (&mut features2 as *mut u64) as *mut u8, 305 mem::size_of::<u64>(), 306 ) 307 }; 308 let (hdr2, bytes, files) = slave.recv_body_into_buf(slice).unwrap(); 309 assert_eq!(hdr1, hdr2); 310 assert_eq!(bytes, 8); 311 assert_eq!(features1, features2); 312 assert!(files.is_none()); 313 314 master.send_header(&hdr1, None).unwrap(); 315 let (hdr2, files) = slave.recv_header().unwrap(); 316 assert_eq!(hdr1, hdr2); 317 assert!(files.is_none()); 318 } 319 } 320