1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. 2 // SPDX-License-Identifier: Apache-2.0 3 4 //! Traits and Struct for vhost-user master. 5 6 use std::fs::File; 7 use std::mem; 8 use std::path::Path; 9 use std::sync::Arc; 10 use std::sync::Mutex; 11 use std::sync::MutexGuard; 12 13 use base::AsRawDescriptor; 14 use base::Event; 15 use base::RawDescriptor; 16 use base::INVALID_DESCRIPTOR; 17 use data_model::zerocopy_from_reader; 18 use data_model::DataInit; 19 20 use crate::backend::VhostBackend; 21 use crate::backend::VhostUserMemoryRegionInfo; 22 use crate::backend::VringConfigData; 23 use crate::connection::Endpoint; 24 use crate::connection::EndpointExt; 25 use crate::message::*; 26 use crate::take_single_file; 27 use crate::Error as VhostUserError; 28 use crate::Result as VhostUserResult; 29 use crate::Result; 30 use crate::SystemStream; 31 32 /// Trait for vhost-user master to provide extra methods not covered by the VhostBackend yet. 33 pub trait VhostUserMaster: VhostBackend { 34 /// Get the protocol feature bitmask from the underlying vhost implementation. get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>35 fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>; 36 37 /// Enable protocol features in the underlying vhost implementation. set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>38 fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>; 39 40 /// Query how many queues the backend supports. get_queue_num(&mut self) -> Result<u64>41 fn get_queue_num(&mut self) -> Result<u64>; 42 43 /// Signal slave to enable or disable corresponding vring. 44 /// 45 /// Slave must not pass data to/from the backend until ring is enabled by 46 /// VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has been 47 /// disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0. set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>48 fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>; 49 50 /// Fetch the contents of the virtio device configuration space. get_config( &mut self, offset: u32, size: u32, flags: VhostUserConfigFlags, buf: &[u8], ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>51 fn get_config( 52 &mut self, 53 offset: u32, 54 size: u32, 55 flags: VhostUserConfigFlags, 56 buf: &[u8], 57 ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>; 58 59 /// Change the virtio device configuration space. It also can be used for live migration on the 60 /// destination host to set readonly configuration space fields. set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>61 fn set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>; 62 63 /// Setup slave communication channel. set_slave_request_fd(&mut self, fd: &dyn AsRawDescriptor) -> Result<()>64 fn set_slave_request_fd(&mut self, fd: &dyn AsRawDescriptor) -> Result<()>; 65 66 /// Retrieve shared buffer for inflight I/O tracking. get_inflight_fd( &mut self, inflight: &VhostUserInflight, ) -> Result<(VhostUserInflight, File)>67 fn get_inflight_fd( 68 &mut self, 69 inflight: &VhostUserInflight, 70 ) -> Result<(VhostUserInflight, File)>; 71 72 /// Set shared buffer for inflight I/O tracking. set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawDescriptor) -> Result<()>73 fn set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawDescriptor) -> Result<()>; 74 75 /// Query the maximum amount of memory slots supported by the backend. get_max_mem_slots(&mut self) -> Result<u64>76 fn get_max_mem_slots(&mut self) -> Result<u64>; 77 78 /// Add a new guest memory mapping for vhost to use. add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>79 fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>; 80 81 /// Remove a guest memory mapping from vhost. remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>82 fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>; 83 84 /// Gets the shared memory regions used by the device. get_shared_memory_regions(&self) -> Result<Vec<VhostSharedMemoryRegion>>85 fn get_shared_memory_regions(&self) -> Result<Vec<VhostSharedMemoryRegion>>; 86 } 87 88 /// Struct for the vhost-user master endpoint. 89 #[derive(Clone)] 90 pub struct Master<E: Endpoint<MasterReq>> { 91 node: Arc<Mutex<MasterInternal<E>>>, 92 } 93 94 impl<E: Endpoint<MasterReq> + From<SystemStream>> Master<E> { 95 /// Create a new instance from a Unix stream socket. from_stream(sock: SystemStream, max_queue_num: u64) -> Self96 pub fn from_stream(sock: SystemStream, max_queue_num: u64) -> Self { 97 Self::new(E::from(sock), max_queue_num) 98 } 99 } 100 101 impl<E: Endpoint<MasterReq>> Master<E> { 102 /// Create a new instance. new(ep: E, max_queue_num: u64) -> Self103 fn new(ep: E, max_queue_num: u64) -> Self { 104 Master { 105 node: Arc::new(Mutex::new(MasterInternal { 106 main_sock: ep, 107 virtio_features: 0, 108 acked_virtio_features: 0, 109 protocol_features: 0, 110 acked_protocol_features: 0, 111 protocol_features_ready: false, 112 max_queue_num, 113 hdr_flags: VhostUserHeaderFlag::empty(), 114 })), 115 } 116 } 117 node(&self) -> MutexGuard<MasterInternal<E>>118 fn node(&self) -> MutexGuard<MasterInternal<E>> { 119 self.node.lock().unwrap() 120 } 121 122 /// Create a new vhost-user master endpoint. 123 /// 124 /// Will retry as the backend may not be ready to accept the connection. 125 /// 126 /// # Arguments 127 /// * `path` - path of Unix domain socket listener to connect to connect<P: AsRef<Path>>(path: P, max_queue_num: u64) -> Result<Self>128 pub fn connect<P: AsRef<Path>>(path: P, max_queue_num: u64) -> Result<Self> { 129 let mut retry_count = 5; 130 let endpoint = loop { 131 match E::connect(&path) { 132 Ok(endpoint) => break Ok(endpoint), 133 Err(e) => match &e { 134 VhostUserError::SocketConnect(why) => { 135 if why.kind() == std::io::ErrorKind::ConnectionRefused && retry_count > 0 { 136 std::thread::sleep(std::time::Duration::from_millis(100)); 137 retry_count -= 1; 138 continue; 139 } else { 140 break Err(e); 141 } 142 } 143 _ => break Err(e), 144 }, 145 } 146 }?; 147 148 Ok(Self::new(endpoint, max_queue_num)) 149 } 150 151 /// Set the header flags that should be applied to all following messages. set_hdr_flags(&self, flags: VhostUserHeaderFlag)152 pub fn set_hdr_flags(&self, flags: VhostUserHeaderFlag) { 153 let mut node = self.node(); 154 node.hdr_flags = flags; 155 } 156 } 157 158 impl<E: Endpoint<MasterReq>> VhostBackend for Master<E> { 159 /// Get from the underlying vhost implementation the feature bitmask. get_features(&self) -> Result<u64>160 fn get_features(&self) -> Result<u64> { 161 let mut node = self.node(); 162 let hdr = node.send_request_header(MasterReq::GET_FEATURES, None)?; 163 let val = node.recv_reply::<VhostUserU64>(&hdr)?; 164 node.virtio_features = val.value; 165 Ok(node.virtio_features) 166 } 167 168 /// Enable features in the underlying vhost implementation using a bitmask. set_features(&self, features: u64) -> Result<()>169 fn set_features(&self, features: u64) -> Result<()> { 170 let mut node = self.node(); 171 let val = VhostUserU64::new(features); 172 let hdr = node.send_request_with_body(MasterReq::SET_FEATURES, &val, None)?; 173 node.acked_virtio_features = features & node.virtio_features; 174 node.wait_for_ack(&hdr) 175 } 176 177 /// Set the current Master as an owner of the session. set_owner(&self) -> Result<()>178 fn set_owner(&self) -> Result<()> { 179 // We unwrap() the return value to assert that we are not expecting threads to ever fail 180 // while holding the lock. 181 let mut node = self.node(); 182 let hdr = node.send_request_header(MasterReq::SET_OWNER, None)?; 183 node.wait_for_ack(&hdr) 184 } 185 reset_owner(&self) -> Result<()>186 fn reset_owner(&self) -> Result<()> { 187 let mut node = self.node(); 188 let hdr = node.send_request_header(MasterReq::RESET_OWNER, None)?; 189 node.wait_for_ack(&hdr) 190 } 191 192 /// Set the memory map regions on the slave so it can translate the vring 193 /// addresses. In the ancillary data there is an array of file descriptors set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>194 fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> { 195 if regions.is_empty() || regions.len() > MAX_ATTACHED_FD_ENTRIES { 196 return Err(VhostUserError::InvalidParam); 197 } 198 199 let mut ctx = VhostUserMemoryContext::new(); 200 for region in regions.iter() { 201 // TODO(b/221882601): once mmap handle cross platform story exists, update this null 202 // check. 203 if region.memory_size == 0 || (region.mmap_handle as isize) < 0 { 204 return Err(VhostUserError::InvalidParam); 205 } 206 let reg = VhostUserMemoryRegion { 207 guest_phys_addr: region.guest_phys_addr, 208 memory_size: region.memory_size, 209 user_addr: region.userspace_addr, 210 mmap_offset: region.mmap_offset, 211 }; 212 ctx.append(®, region.mmap_handle); 213 } 214 215 let mut node = self.node(); 216 let body = VhostUserMemory::new(ctx.regions.len() as u32); 217 let (_, payload, _) = unsafe { ctx.regions.align_to::<u8>() }; 218 let hdr = node.send_request_with_payload( 219 MasterReq::SET_MEM_TABLE, 220 &body, 221 payload, 222 Some(ctx.fds.as_slice()), 223 )?; 224 node.wait_for_ack(&hdr) 225 } 226 227 // Clippy doesn't seem to know that if let with && is still experimental 228 #[allow(clippy::unnecessary_unwrap)] set_log_base(&self, base: u64, fd: Option<RawDescriptor>) -> Result<()>229 fn set_log_base(&self, base: u64, fd: Option<RawDescriptor>) -> Result<()> { 230 let mut node = self.node(); 231 let val = VhostUserU64::new(base); 232 233 if node.acked_protocol_features & VhostUserProtocolFeatures::LOG_SHMFD.bits() != 0 234 && fd.is_some() 235 { 236 let fds = [fd.unwrap()]; 237 let _ = node.send_request_with_body(MasterReq::SET_LOG_BASE, &val, Some(&fds))?; 238 } else { 239 let _ = node.send_request_with_body(MasterReq::SET_LOG_BASE, &val, None)?; 240 } 241 Ok(()) 242 } 243 set_log_fd(&self, fd: RawDescriptor) -> Result<()>244 fn set_log_fd(&self, fd: RawDescriptor) -> Result<()> { 245 let mut node = self.node(); 246 let fds = [fd]; 247 let hdr = node.send_request_header(MasterReq::SET_LOG_FD, Some(&fds))?; 248 node.wait_for_ack(&hdr) 249 } 250 251 /// Set the size of the queue. set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>252 fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> { 253 let mut node = self.node(); 254 if queue_index as u64 >= node.max_queue_num { 255 return Err(VhostUserError::InvalidParam); 256 } 257 258 let val = VhostUserVringState::new(queue_index as u32, num.into()); 259 let hdr = node.send_request_with_body(MasterReq::SET_VRING_NUM, &val, None)?; 260 node.wait_for_ack(&hdr) 261 } 262 263 /// Sets the addresses of the different aspects of the vring. set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>264 fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> { 265 let mut node = self.node(); 266 if queue_index as u64 >= node.max_queue_num 267 || config_data.flags & !(VhostUserVringAddrFlags::all().bits()) != 0 268 { 269 return Err(VhostUserError::InvalidParam); 270 } 271 272 let val = VhostUserVringAddr::from_config_data(queue_index as u32, config_data); 273 let hdr = node.send_request_with_body(MasterReq::SET_VRING_ADDR, &val, None)?; 274 node.wait_for_ack(&hdr) 275 } 276 277 /// Sets the base offset in the available vring. set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>278 fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> { 279 let mut node = self.node(); 280 if queue_index as u64 >= node.max_queue_num { 281 return Err(VhostUserError::InvalidParam); 282 } 283 284 let val = VhostUserVringState::new(queue_index as u32, base.into()); 285 let hdr = node.send_request_with_body(MasterReq::SET_VRING_BASE, &val, None)?; 286 node.wait_for_ack(&hdr) 287 } 288 get_vring_base(&self, queue_index: usize) -> Result<u32>289 fn get_vring_base(&self, queue_index: usize) -> Result<u32> { 290 let mut node = self.node(); 291 if queue_index as u64 >= node.max_queue_num { 292 return Err(VhostUserError::InvalidParam); 293 } 294 295 let req = VhostUserVringState::new(queue_index as u32, 0); 296 let hdr = node.send_request_with_body(MasterReq::GET_VRING_BASE, &req, None)?; 297 let reply = node.recv_reply::<VhostUserVringState>(&hdr)?; 298 Ok(reply.num) 299 } 300 301 /// Set the event file descriptor to signal when buffers are used. 302 /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag 303 /// is set when there is no file descriptor in the ancillary data. This signals that polling 304 /// will be used instead of waiting for the call. set_vring_call(&self, queue_index: usize, event: &Event) -> Result<()>305 fn set_vring_call(&self, queue_index: usize, event: &Event) -> Result<()> { 306 let mut node = self.node(); 307 if queue_index as u64 >= node.max_queue_num { 308 return Err(VhostUserError::InvalidParam); 309 } 310 let hdr = node.send_fd_for_vring( 311 MasterReq::SET_VRING_CALL, 312 queue_index, 313 event.as_raw_descriptor(), 314 )?; 315 node.wait_for_ack(&hdr) 316 } 317 318 /// Set the event file descriptor for adding buffers to the vring. 319 /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag 320 /// is set when there is no file descriptor in the ancillary data. This signals that polling 321 /// should be used instead of waiting for a kick. set_vring_kick(&self, queue_index: usize, event: &Event) -> Result<()>322 fn set_vring_kick(&self, queue_index: usize, event: &Event) -> Result<()> { 323 let mut node = self.node(); 324 if queue_index as u64 >= node.max_queue_num { 325 return Err(VhostUserError::InvalidParam); 326 } 327 let hdr = node.send_fd_for_vring( 328 MasterReq::SET_VRING_KICK, 329 queue_index, 330 event.as_raw_descriptor(), 331 )?; 332 node.wait_for_ack(&hdr) 333 } 334 335 /// Set the event file descriptor to signal when error occurs. 336 /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag 337 /// is set when there is no file descriptor in the ancillary data. set_vring_err(&self, queue_index: usize, event: &Event) -> Result<()>338 fn set_vring_err(&self, queue_index: usize, event: &Event) -> Result<()> { 339 let mut node = self.node(); 340 if queue_index as u64 >= node.max_queue_num { 341 return Err(VhostUserError::InvalidParam); 342 } 343 let hdr = node.send_fd_for_vring( 344 MasterReq::SET_VRING_ERR, 345 queue_index, 346 event.as_raw_descriptor(), 347 )?; 348 node.wait_for_ack(&hdr) 349 } 350 } 351 352 impl<E: Endpoint<MasterReq>> VhostUserMaster for Master<E> { get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>353 fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures> { 354 let mut node = self.node(); 355 let flag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); 356 if node.virtio_features & flag == 0 { 357 return Err(VhostUserError::InvalidOperation); 358 } 359 let hdr = node.send_request_header(MasterReq::GET_PROTOCOL_FEATURES, None)?; 360 let val = node.recv_reply::<VhostUserU64>(&hdr)?; 361 node.protocol_features = val.value; 362 // Should we support forward compatibility? 363 // If so just mask out unrecognized flags instead of return errors. 364 match VhostUserProtocolFeatures::from_bits(node.protocol_features) { 365 Some(val) => Ok(val), 366 None => Err(VhostUserError::InvalidMessage), 367 } 368 } 369 set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>370 fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()> { 371 let mut node = self.node(); 372 let flag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); 373 if node.virtio_features & flag == 0 { 374 return Err(VhostUserError::InvalidOperation); 375 } 376 if features.contains(VhostUserProtocolFeatures::SHARED_MEMORY_REGIONS) 377 && !features.contains(VhostUserProtocolFeatures::SLAVE_REQ) 378 { 379 return Err(VhostUserError::FeatureMismatch); 380 } 381 let val = VhostUserU64::new(features.bits()); 382 let hdr = node.send_request_with_body(MasterReq::SET_PROTOCOL_FEATURES, &val, None)?; 383 // Don't wait for ACK here because the protocol feature negotiation process hasn't been 384 // completed yet. 385 node.acked_protocol_features = features.bits(); 386 node.protocol_features_ready = true; 387 node.wait_for_ack(&hdr) 388 } 389 get_queue_num(&mut self) -> Result<u64>390 fn get_queue_num(&mut self) -> Result<u64> { 391 let mut node = self.node(); 392 if !node.is_feature_mq_available() { 393 return Err(VhostUserError::InvalidOperation); 394 } 395 396 let hdr = node.send_request_header(MasterReq::GET_QUEUE_NUM, None)?; 397 let val = node.recv_reply::<VhostUserU64>(&hdr)?; 398 if val.value > VHOST_USER_MAX_VRINGS { 399 return Err(VhostUserError::InvalidMessage); 400 } 401 node.max_queue_num = val.value; 402 Ok(node.max_queue_num) 403 } 404 set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>405 fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()> { 406 let mut node = self.node(); 407 // set_vring_enable() is supported only when PROTOCOL_FEATURES has been enabled. 408 if node.acked_virtio_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0 { 409 return Err(VhostUserError::InvalidOperation); 410 } else if queue_index as u64 >= node.max_queue_num { 411 return Err(VhostUserError::InvalidParam); 412 } 413 414 let val = VhostUserVringState::new(queue_index as u32, enable.into()); 415 let hdr = node.send_request_with_body(MasterReq::SET_VRING_ENABLE, &val, None)?; 416 node.wait_for_ack(&hdr) 417 } 418 get_config( &mut self, offset: u32, size: u32, flags: VhostUserConfigFlags, buf: &[u8], ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>419 fn get_config( 420 &mut self, 421 offset: u32, 422 size: u32, 423 flags: VhostUserConfigFlags, 424 buf: &[u8], 425 ) -> Result<(VhostUserConfig, VhostUserConfigPayload)> { 426 let body = VhostUserConfig::new(offset, size, flags); 427 if !body.is_valid() { 428 return Err(VhostUserError::InvalidParam); 429 } 430 431 let mut node = self.node(); 432 // depends on VhostUserProtocolFeatures::CONFIG 433 if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 { 434 return Err(VhostUserError::InvalidOperation); 435 } 436 437 // vhost-user spec states that: 438 // "Master payload: virtio device config space" 439 // "Slave payload: virtio device config space" 440 let hdr = node.send_request_with_payload(MasterReq::GET_CONFIG, &body, buf, None)?; 441 let (body_reply, buf_reply, rfds) = 442 node.recv_reply_with_payload::<VhostUserConfig>(&hdr)?; 443 if rfds.is_some() { 444 return Err(VhostUserError::InvalidMessage); 445 } else if body_reply.size == 0 { 446 return Err(VhostUserError::SlaveInternalError); 447 } else if body_reply.size != body.size 448 || body_reply.size as usize != buf.len() 449 || body_reply.offset != body.offset 450 { 451 return Err(VhostUserError::InvalidMessage); 452 } 453 454 Ok((body_reply, buf_reply)) 455 } 456 set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>457 fn set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()> { 458 if buf.len() > MAX_MSG_SIZE { 459 return Err(VhostUserError::InvalidParam); 460 } 461 let body = VhostUserConfig::new(offset, buf.len() as u32, flags); 462 if !body.is_valid() { 463 return Err(VhostUserError::InvalidParam); 464 } 465 466 let mut node = self.node(); 467 // depends on VhostUserProtocolFeatures::CONFIG 468 if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 { 469 return Err(VhostUserError::InvalidOperation); 470 } 471 472 let hdr = node.send_request_with_payload(MasterReq::SET_CONFIG, &body, buf, None)?; 473 node.wait_for_ack(&hdr) 474 } 475 set_slave_request_fd(&mut self, fd: &dyn AsRawDescriptor) -> Result<()>476 fn set_slave_request_fd(&mut self, fd: &dyn AsRawDescriptor) -> Result<()> { 477 let mut node = self.node(); 478 if node.acked_protocol_features & VhostUserProtocolFeatures::SLAVE_REQ.bits() == 0 { 479 return Err(VhostUserError::InvalidOperation); 480 } 481 let fds = [fd.as_raw_descriptor()]; 482 let hdr = node.send_request_header(MasterReq::SET_SLAVE_REQ_FD, Some(&fds))?; 483 node.wait_for_ack(&hdr) 484 } 485 get_inflight_fd( &mut self, inflight: &VhostUserInflight, ) -> Result<(VhostUserInflight, File)>486 fn get_inflight_fd( 487 &mut self, 488 inflight: &VhostUserInflight, 489 ) -> Result<(VhostUserInflight, File)> { 490 let mut node = self.node(); 491 if node.acked_protocol_features & VhostUserProtocolFeatures::INFLIGHT_SHMFD.bits() == 0 { 492 return Err(VhostUserError::InvalidOperation); 493 } 494 495 let hdr = node.send_request_with_body(MasterReq::GET_INFLIGHT_FD, inflight, None)?; 496 let (inflight, files) = node.recv_reply_with_files::<VhostUserInflight>(&hdr)?; 497 498 match take_single_file(files) { 499 Some(file) => Ok((inflight, file)), 500 None => Err(VhostUserError::IncorrectFds), 501 } 502 } 503 set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawDescriptor) -> Result<()>504 fn set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawDescriptor) -> Result<()> { 505 let mut node = self.node(); 506 if node.acked_protocol_features & VhostUserProtocolFeatures::INFLIGHT_SHMFD.bits() == 0 { 507 return Err(VhostUserError::InvalidOperation); 508 } 509 510 if inflight.mmap_size == 0 511 || inflight.num_queues == 0 512 || inflight.queue_size == 0 513 || fd == INVALID_DESCRIPTOR 514 { 515 return Err(VhostUserError::InvalidParam); 516 } 517 518 let hdr = node.send_request_with_body(MasterReq::SET_INFLIGHT_FD, inflight, Some(&[fd]))?; 519 node.wait_for_ack(&hdr) 520 } 521 get_max_mem_slots(&mut self) -> Result<u64>522 fn get_max_mem_slots(&mut self) -> Result<u64> { 523 let mut node = self.node(); 524 if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0 525 { 526 return Err(VhostUserError::InvalidOperation); 527 } 528 529 let hdr = node.send_request_header(MasterReq::GET_MAX_MEM_SLOTS, None)?; 530 let val = node.recv_reply::<VhostUserU64>(&hdr)?; 531 532 Ok(val.value) 533 } 534 add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>535 fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> { 536 let mut node = self.node(); 537 if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0 538 { 539 return Err(VhostUserError::InvalidOperation); 540 } 541 // TODO(b/221882601): once mmap handle cross platform story exists, update this null check. 542 if region.memory_size == 0 || (region.mmap_handle as isize) < 0 { 543 return Err(VhostUserError::InvalidParam); 544 } 545 546 let body = VhostUserSingleMemoryRegion::new( 547 region.guest_phys_addr, 548 region.memory_size, 549 region.userspace_addr, 550 region.mmap_offset, 551 ); 552 let fds = [region.mmap_handle]; 553 let hdr = node.send_request_with_body(MasterReq::ADD_MEM_REG, &body, Some(&fds))?; 554 node.wait_for_ack(&hdr) 555 } 556 remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>557 fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> { 558 let mut node = self.node(); 559 if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0 560 { 561 return Err(VhostUserError::InvalidOperation); 562 } 563 if region.memory_size == 0 { 564 return Err(VhostUserError::InvalidParam); 565 } 566 567 let body = VhostUserSingleMemoryRegion::new( 568 region.guest_phys_addr, 569 region.memory_size, 570 region.userspace_addr, 571 region.mmap_offset, 572 ); 573 let hdr = node.send_request_with_body(MasterReq::REM_MEM_REG, &body, None)?; 574 node.wait_for_ack(&hdr) 575 } 576 get_shared_memory_regions(&self) -> Result<Vec<VhostSharedMemoryRegion>>577 fn get_shared_memory_regions(&self) -> Result<Vec<VhostSharedMemoryRegion>> { 578 let mut node = self.node(); 579 let hdr = node.send_request_header(MasterReq::GET_SHARED_MEMORY_REGIONS, None)?; 580 let (body_reply, buf_reply, rfds) = node.recv_reply_with_payload::<VhostUserU64>(&hdr)?; 581 let struct_size = mem::size_of::<VhostSharedMemoryRegion>(); 582 if rfds.is_some() || buf_reply.len() != body_reply.value as usize * struct_size { 583 return Err(VhostUserError::InvalidMessage); 584 } 585 let mut regions = Vec::new(); 586 let mut offset = 0; 587 for _ in 0..body_reply.value { 588 regions.push( 589 // Can't fail because the input is the correct size. 590 zerocopy_from_reader(&buf_reply[offset..(offset + struct_size)]).unwrap(), 591 ); 592 offset += struct_size; 593 } 594 Ok(regions) 595 } 596 } 597 598 impl<E: Endpoint<MasterReq> + AsRawDescriptor> AsRawDescriptor for Master<E> { as_raw_descriptor(&self) -> RawDescriptor599 fn as_raw_descriptor(&self) -> RawDescriptor { 600 let node = self.node(); 601 // TODO(b/221882601): why is this here? The underlying Tube needs to use a read notifier 602 // if this is for polling. 603 node.main_sock.as_raw_descriptor() 604 } 605 } 606 607 // TODO(b/221882601): likely need pairs of RDs and/or SharedMemory to represent mmaps on Windows. 608 /// Context object to pass guest memory configuration to VhostUserMaster::set_mem_table(). 609 struct VhostUserMemoryContext { 610 regions: VhostUserMemoryPayload, 611 fds: Vec<RawDescriptor>, 612 } 613 614 impl VhostUserMemoryContext { 615 /// Create a context object. new() -> Self616 pub fn new() -> Self { 617 VhostUserMemoryContext { 618 regions: VhostUserMemoryPayload::new(), 619 fds: Vec::new(), 620 } 621 } 622 623 /// Append a user memory region and corresponding RawDescriptor into the context object. append(&mut self, region: &VhostUserMemoryRegion, fd: RawDescriptor)624 pub fn append(&mut self, region: &VhostUserMemoryRegion, fd: RawDescriptor) { 625 self.regions.push(*region); 626 self.fds.push(fd); 627 } 628 } 629 630 struct MasterInternal<E: Endpoint<MasterReq>> { 631 // Used to send requests to the slave. 632 main_sock: E, 633 // Cached virtio features from the slave. 634 virtio_features: u64, 635 // Cached acked virtio features from the driver. 636 acked_virtio_features: u64, 637 // Cached vhost-user protocol features from the slave. 638 protocol_features: u64, 639 // Cached vhost-user protocol features. 640 acked_protocol_features: u64, 641 // Cached vhost-user protocol features are ready to use. 642 protocol_features_ready: bool, 643 // Cached maxinum number of queues supported from the slave. 644 max_queue_num: u64, 645 // List of header flags. 646 hdr_flags: VhostUserHeaderFlag, 647 } 648 649 impl<E: Endpoint<MasterReq>> MasterInternal<E> { send_request_header( &mut self, code: MasterReq, fds: Option<&[RawDescriptor]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>650 fn send_request_header( 651 &mut self, 652 code: MasterReq, 653 fds: Option<&[RawDescriptor]>, 654 ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> { 655 let hdr = self.new_request_header(code, 0); 656 self.main_sock.send_header(&hdr, fds)?; 657 Ok(hdr) 658 } 659 send_request_with_body<T: Sized + DataInit>( &mut self, code: MasterReq, msg: &T, fds: Option<&[RawDescriptor]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>660 fn send_request_with_body<T: Sized + DataInit>( 661 &mut self, 662 code: MasterReq, 663 msg: &T, 664 fds: Option<&[RawDescriptor]>, 665 ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> { 666 if mem::size_of::<T>() > MAX_MSG_SIZE { 667 return Err(VhostUserError::InvalidParam); 668 } 669 let hdr = self.new_request_header(code, mem::size_of::<T>() as u32); 670 self.main_sock.send_message(&hdr, msg, fds)?; 671 Ok(hdr) 672 } 673 send_request_with_payload<T: Sized + DataInit>( &mut self, code: MasterReq, msg: &T, payload: &[u8], fds: Option<&[RawDescriptor]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>674 fn send_request_with_payload<T: Sized + DataInit>( 675 &mut self, 676 code: MasterReq, 677 msg: &T, 678 payload: &[u8], 679 fds: Option<&[RawDescriptor]>, 680 ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> { 681 let len = mem::size_of::<T>() + payload.len(); 682 if len > MAX_MSG_SIZE { 683 return Err(VhostUserError::InvalidParam); 684 } 685 if let Some(fd_arr) = fds { 686 if fd_arr.len() > MAX_ATTACHED_FD_ENTRIES { 687 return Err(VhostUserError::InvalidParam); 688 } 689 } 690 let hdr = self.new_request_header(code, len as u32); 691 self.main_sock 692 .send_message_with_payload(&hdr, msg, payload, fds)?; 693 Ok(hdr) 694 } 695 send_fd_for_vring( &mut self, code: MasterReq, queue_index: usize, fd: RawDescriptor, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>696 fn send_fd_for_vring( 697 &mut self, 698 code: MasterReq, 699 queue_index: usize, 700 fd: RawDescriptor, 701 ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> { 702 if queue_index as u64 >= self.max_queue_num { 703 return Err(VhostUserError::InvalidParam); 704 } 705 // Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. 706 // This flag is set when there is no file descriptor in the ancillary data. This signals 707 // that polling will be used instead of waiting for the call. 708 let msg = VhostUserU64::new(queue_index as u64); 709 let hdr = self.new_request_header(code, mem::size_of::<VhostUserU64>() as u32); 710 self.main_sock.send_message(&hdr, &msg, Some(&[fd]))?; 711 Ok(hdr) 712 } 713 recv_reply<T: Sized + DataInit + Default + VhostUserMsgValidator>( &mut self, hdr: &VhostUserMsgHeader<MasterReq>, ) -> VhostUserResult<T>714 fn recv_reply<T: Sized + DataInit + Default + VhostUserMsgValidator>( 715 &mut self, 716 hdr: &VhostUserMsgHeader<MasterReq>, 717 ) -> VhostUserResult<T> { 718 if mem::size_of::<T>() > MAX_MSG_SIZE || hdr.is_reply() { 719 return Err(VhostUserError::InvalidParam); 720 } 721 let (reply, body, rfds) = self.main_sock.recv_body::<T>()?; 722 if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() { 723 return Err(VhostUserError::InvalidMessage); 724 } 725 Ok(body) 726 } 727 recv_reply_with_files<T: Sized + DataInit + Default + VhostUserMsgValidator>( &mut self, hdr: &VhostUserMsgHeader<MasterReq>, ) -> VhostUserResult<(T, Option<Vec<File>>)>728 fn recv_reply_with_files<T: Sized + DataInit + Default + VhostUserMsgValidator>( 729 &mut self, 730 hdr: &VhostUserMsgHeader<MasterReq>, 731 ) -> VhostUserResult<(T, Option<Vec<File>>)> { 732 if mem::size_of::<T>() > MAX_MSG_SIZE || hdr.is_reply() { 733 return Err(VhostUserError::InvalidParam); 734 } 735 736 let (reply, body, files) = self.main_sock.recv_body::<T>()?; 737 if !reply.is_reply_for(hdr) || files.is_none() || !body.is_valid() { 738 return Err(VhostUserError::InvalidMessage); 739 } 740 Ok((body, files)) 741 } 742 recv_reply_with_payload<T: Sized + DataInit + Default + VhostUserMsgValidator>( &mut self, hdr: &VhostUserMsgHeader<MasterReq>, ) -> VhostUserResult<(T, Vec<u8>, Option<Vec<File>>)>743 fn recv_reply_with_payload<T: Sized + DataInit + Default + VhostUserMsgValidator>( 744 &mut self, 745 hdr: &VhostUserMsgHeader<MasterReq>, 746 ) -> VhostUserResult<(T, Vec<u8>, Option<Vec<File>>)> { 747 if mem::size_of::<T>() > MAX_MSG_SIZE || hdr.is_reply() { 748 return Err(VhostUserError::InvalidParam); 749 } 750 751 let (reply, body, buf, files) = self.main_sock.recv_payload_into_buf::<T>()?; 752 if !reply.is_reply_for(hdr) || files.is_some() || !body.is_valid() { 753 return Err(VhostUserError::InvalidMessage); 754 } 755 756 Ok((body, buf, files)) 757 } 758 wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<MasterReq>) -> VhostUserResult<()>759 fn wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<MasterReq>) -> VhostUserResult<()> { 760 if self.acked_protocol_features & VhostUserProtocolFeatures::REPLY_ACK.bits() == 0 761 || !hdr.is_need_reply() 762 { 763 return Ok(()); 764 } 765 766 let (reply, body, rfds) = self.main_sock.recv_body::<VhostUserU64>()?; 767 if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() { 768 return Err(VhostUserError::InvalidMessage); 769 } 770 if body.value != 0 { 771 return Err(VhostUserError::SlaveInternalError); 772 } 773 Ok(()) 774 } 775 is_feature_mq_available(&self) -> bool776 fn is_feature_mq_available(&self) -> bool { 777 self.acked_protocol_features & VhostUserProtocolFeatures::MQ.bits() != 0 778 } 779 780 #[inline] new_request_header(&self, request: MasterReq, size: u32) -> VhostUserMsgHeader<MasterReq>781 fn new_request_header(&self, request: MasterReq, size: u32) -> VhostUserMsgHeader<MasterReq> { 782 VhostUserMsgHeader::new(request, self.hdr_flags.bits() | 0x1, size) 783 } 784 } 785 786 #[cfg(test)] 787 mod tests { 788 use base::INVALID_DESCRIPTOR; 789 790 use super::*; 791 use crate::connection::tests::create_pair; 792 use crate::connection::tests::TestEndpoint; 793 use crate::connection::tests::TestMaster; 794 795 #[test] create_master()796 fn create_master() { 797 let (master, mut slave) = create_pair(); 798 799 assert!(master.as_raw_descriptor() != INVALID_DESCRIPTOR); 800 // Send two messages continuously 801 master.set_owner().unwrap(); 802 master.reset_owner().unwrap(); 803 804 let (hdr, rfds) = slave.recv_header().unwrap(); 805 assert_eq!(hdr.get_code(), MasterReq::SET_OWNER); 806 assert_eq!(hdr.get_size(), 0); 807 assert_eq!(hdr.get_version(), 0x1); 808 assert!(rfds.is_none()); 809 810 let (hdr, rfds) = slave.recv_header().unwrap(); 811 assert_eq!(hdr.get_code(), MasterReq::RESET_OWNER); 812 assert_eq!(hdr.get_size(), 0); 813 assert_eq!(hdr.get_version(), 0x1); 814 assert!(rfds.is_none()); 815 } 816 817 #[test] test_features()818 fn test_features() { 819 let (master, mut peer) = create_pair(); 820 821 master.set_owner().unwrap(); 822 let (hdr, rfds) = peer.recv_header().unwrap(); 823 assert_eq!(hdr.get_code(), MasterReq::SET_OWNER); 824 assert_eq!(hdr.get_size(), 0); 825 assert_eq!(hdr.get_version(), 0x1); 826 assert!(rfds.is_none()); 827 828 let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8); 829 let msg = VhostUserU64::new(0x15); 830 peer.send_message(&hdr, &msg, None).unwrap(); 831 let features = master.get_features().unwrap(); 832 assert_eq!(features, 0x15u64); 833 let (_hdr, rfds) = peer.recv_header().unwrap(); 834 assert!(rfds.is_none()); 835 836 let hdr = VhostUserMsgHeader::new(MasterReq::SET_FEATURES, 0x4, 8); 837 let msg = VhostUserU64::new(0x15); 838 peer.send_message(&hdr, &msg, None).unwrap(); 839 master.set_features(0x15).unwrap(); 840 let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap(); 841 assert!(rfds.is_none()); 842 let val = msg.value; 843 assert_eq!(val, 0x15); 844 845 let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8); 846 let msg = 0x15u32; 847 peer.send_message(&hdr, &msg, None).unwrap(); 848 assert!(master.get_features().is_err()); 849 } 850 851 #[test] test_protocol_features()852 fn test_protocol_features() { 853 let (mut master, mut peer) = create_pair(); 854 855 master.set_owner().unwrap(); 856 let (hdr, rfds) = peer.recv_header().unwrap(); 857 assert_eq!(hdr.get_code(), MasterReq::SET_OWNER); 858 assert!(rfds.is_none()); 859 860 assert!(master.get_protocol_features().is_err()); 861 assert!(master 862 .set_protocol_features(VhostUserProtocolFeatures::all()) 863 .is_err()); 864 865 let vfeatures = 0x15 | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits(); 866 let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8); 867 let msg = VhostUserU64::new(vfeatures); 868 peer.send_message(&hdr, &msg, None).unwrap(); 869 let features = master.get_features().unwrap(); 870 assert_eq!(features, vfeatures); 871 let (_hdr, rfds) = peer.recv_header().unwrap(); 872 assert!(rfds.is_none()); 873 874 master.set_features(vfeatures).unwrap(); 875 let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap(); 876 assert!(rfds.is_none()); 877 let val = msg.value; 878 assert_eq!(val, vfeatures); 879 880 let pfeatures = VhostUserProtocolFeatures::all(); 881 let hdr = VhostUserMsgHeader::new(MasterReq::GET_PROTOCOL_FEATURES, 0x4, 8); 882 let msg = VhostUserU64::new(pfeatures.bits()); 883 peer.send_message(&hdr, &msg, None).unwrap(); 884 let features = master.get_protocol_features().unwrap(); 885 assert_eq!(features, pfeatures); 886 let (_hdr, rfds) = peer.recv_header().unwrap(); 887 assert!(rfds.is_none()); 888 889 master.set_protocol_features(pfeatures).unwrap(); 890 let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap(); 891 assert!(rfds.is_none()); 892 let val = msg.value; 893 assert_eq!(val, pfeatures.bits()); 894 895 let hdr = VhostUserMsgHeader::new(MasterReq::SET_PROTOCOL_FEATURES, 0x4, 8); 896 let msg = VhostUserU64::new(pfeatures.bits()); 897 peer.send_message(&hdr, &msg, None).unwrap(); 898 assert!(master.get_protocol_features().is_err()); 899 } 900 901 #[test] test_master_set_config_negative()902 fn test_master_set_config_negative() { 903 let (mut master, _peer) = create_pair(); 904 let buf = vec![0x0; MAX_MSG_SIZE + 1]; 905 906 master 907 .set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 908 .unwrap_err(); 909 910 { 911 let mut node = master.node(); 912 node.virtio_features = 0xffff_ffff; 913 node.acked_virtio_features = 0xffff_ffff; 914 node.protocol_features = 0xffff_ffff; 915 node.acked_protocol_features = 0xffff_ffff; 916 } 917 918 master 919 .set_config(0, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 920 .unwrap(); 921 master 922 .set_config( 923 VHOST_USER_CONFIG_SIZE, 924 VhostUserConfigFlags::WRITABLE, 925 &buf[0..4], 926 ) 927 .unwrap_err(); 928 master 929 .set_config(0x1000, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 930 .unwrap_err(); 931 master 932 .set_config( 933 0x100, 934 unsafe { VhostUserConfigFlags::from_bits_unchecked(0xffff_ffff) }, 935 &buf[0..4], 936 ) 937 .unwrap_err(); 938 master 939 .set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &buf) 940 .unwrap_err(); 941 master 942 .set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &[]) 943 .unwrap_err(); 944 } 945 create_pair2() -> (TestMaster, TestEndpoint)946 fn create_pair2() -> (TestMaster, TestEndpoint) { 947 let (master, peer) = create_pair(); 948 { 949 let mut node = master.node(); 950 node.virtio_features = 0xffff_ffff; 951 node.acked_virtio_features = 0xffff_ffff; 952 node.protocol_features = 0xffff_ffff; 953 node.acked_protocol_features = 0xffff_ffff; 954 } 955 956 (master, peer) 957 } 958 959 #[test] test_master_get_config_negative0()960 fn test_master_get_config_negative0() { 961 let (mut master, mut peer) = create_pair2(); 962 let buf = vec![0x0; MAX_MSG_SIZE + 1]; 963 964 let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16); 965 let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 966 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 967 .unwrap(); 968 assert!(master 969 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 970 .is_ok()); 971 972 hdr.set_code(MasterReq::GET_FEATURES); 973 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 974 .unwrap(); 975 assert!(master 976 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 977 .is_err()); 978 hdr.set_code(MasterReq::GET_CONFIG); 979 } 980 981 #[test] test_master_get_config_negative1()982 fn test_master_get_config_negative1() { 983 let (mut master, mut peer) = create_pair2(); 984 let buf = vec![0x0; MAX_MSG_SIZE + 1]; 985 986 let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16); 987 let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 988 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 989 .unwrap(); 990 assert!(master 991 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 992 .is_ok()); 993 994 hdr.set_reply(false); 995 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 996 .unwrap(); 997 assert!(master 998 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 999 .is_err()); 1000 } 1001 1002 #[test] test_master_get_config_negative2()1003 fn test_master_get_config_negative2() { 1004 let (mut master, mut peer) = create_pair2(); 1005 let buf = vec![0x0; MAX_MSG_SIZE + 1]; 1006 1007 let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16); 1008 let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 1009 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 1010 .unwrap(); 1011 assert!(master 1012 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1013 .is_ok()); 1014 } 1015 1016 #[test] test_master_get_config_negative3()1017 fn test_master_get_config_negative3() { 1018 let (mut master, mut peer) = create_pair2(); 1019 let buf = vec![0x0; MAX_MSG_SIZE + 1]; 1020 1021 let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16); 1022 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 1023 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 1024 .unwrap(); 1025 assert!(master 1026 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1027 .is_ok()); 1028 1029 msg.offset = 0; 1030 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 1031 .unwrap(); 1032 assert!(master 1033 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1034 .is_err()); 1035 } 1036 1037 #[test] test_master_get_config_negative4()1038 fn test_master_get_config_negative4() { 1039 let (mut master, mut peer) = create_pair2(); 1040 let buf = vec![0x0; MAX_MSG_SIZE + 1]; 1041 1042 let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16); 1043 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 1044 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 1045 .unwrap(); 1046 assert!(master 1047 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1048 .is_ok()); 1049 1050 msg.offset = 0x101; 1051 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 1052 .unwrap(); 1053 assert!(master 1054 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1055 .is_err()); 1056 } 1057 1058 #[test] test_master_get_config_negative5()1059 fn test_master_get_config_negative5() { 1060 let (mut master, mut peer) = create_pair2(); 1061 let buf = vec![0x0; MAX_MSG_SIZE + 1]; 1062 1063 let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16); 1064 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 1065 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 1066 .unwrap(); 1067 assert!(master 1068 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1069 .is_ok()); 1070 1071 msg.offset = (MAX_MSG_SIZE + 1) as u32; 1072 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 1073 .unwrap(); 1074 assert!(master 1075 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1076 .is_err()); 1077 } 1078 1079 #[test] test_master_get_config_negative6()1080 fn test_master_get_config_negative6() { 1081 let (mut master, mut peer) = create_pair2(); 1082 let buf = vec![0x0; MAX_MSG_SIZE + 1]; 1083 1084 let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16); 1085 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty()); 1086 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None) 1087 .unwrap(); 1088 assert!(master 1089 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1090 .is_ok()); 1091 1092 msg.size = 6; 1093 peer.send_message_with_payload(&hdr, &msg, &buf[0..6], None) 1094 .unwrap(); 1095 assert!(master 1096 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4]) 1097 .is_err()); 1098 } 1099 1100 #[test] test_maset_set_mem_table_failure()1101 fn test_maset_set_mem_table_failure() { 1102 let (master, _peer) = create_pair2(); 1103 1104 master.set_mem_table(&[]).unwrap_err(); 1105 let tables = vec![VhostUserMemoryRegionInfo::default(); MAX_ATTACHED_FD_ENTRIES + 1]; 1106 master.set_mem_table(&tables).unwrap_err(); 1107 } 1108 } 1109