1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
2 // SPDX-License-Identifier: Apache-2.0
3
4 //! Traits and Struct for vhost-user master.
5
6 use std::mem;
7 use std::os::unix::io::{AsRawFd, RawFd};
8 use std::os::unix::net::UnixStream;
9 use std::path::Path;
10 use std::sync::{Arc, Mutex, MutexGuard};
11
12 use sys_util::EventFd;
13
14 use super::connection::Endpoint;
15 use super::message::*;
16 use super::{Error as VhostUserError, Result as VhostUserResult};
17 use crate::backend::{VhostBackend, VhostUserMemoryRegionInfo, VringConfigData};
18 use crate::{Error, Result};
19
20 /// Trait for vhost-user master to provide extra methods not covered by the VhostBackend yet.
21 pub trait VhostUserMaster: VhostBackend {
22 /// Get the protocol feature bitmask from the underlying vhost implementation.
get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>23 fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>;
24
25 /// Enable protocol features in the underlying vhost implementation.
set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>26 fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>;
27
28 /// Query how many queues the backend supports.
get_queue_num(&mut self) -> Result<u64>29 fn get_queue_num(&mut self) -> Result<u64>;
30
31 /// Signal slave to enable or disable corresponding vring.
32 ///
33 /// Slave must not pass data to/from the backend until ring is enabled by
34 /// VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has been
35 /// disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0.
set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>36 fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>;
37
38 /// Fetch the contents of the virtio device configuration space.
get_config( &mut self, offset: u32, size: u32, flags: VhostUserConfigFlags, buf: &[u8], ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>39 fn get_config(
40 &mut self,
41 offset: u32,
42 size: u32,
43 flags: VhostUserConfigFlags,
44 buf: &[u8],
45 ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>;
46
47 /// Change the virtio device configuration space. It also can be used for live migration on the
48 /// destination host to set readonly configuration space fields.
set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>49 fn set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>;
50
51 /// Setup slave communication channel.
set_slave_request_fd(&mut self, fd: RawFd) -> Result<()>52 fn set_slave_request_fd(&mut self, fd: RawFd) -> Result<()>;
53
54 /// Query the maximum amount of memory slots supported by the backend.
get_max_mem_slots(&mut self) -> Result<u64>55 fn get_max_mem_slots(&mut self) -> Result<u64>;
56
57 /// Add a new guest memory mapping for vhost to use.
add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>58 fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>;
59
60 /// Remove a guest memory mapping from vhost.
remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>61 fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>;
62 }
63
error_code<T>(err: VhostUserError) -> Result<T>64 fn error_code<T>(err: VhostUserError) -> Result<T> {
65 Err(Error::VhostUserProtocol(err))
66 }
67
68 /// Struct for the vhost-user master endpoint.
69 #[derive(Clone)]
70 pub struct Master {
71 node: Arc<Mutex<MasterInternal>>,
72 }
73
74 impl Master {
75 /// Create a new instance.
new(ep: Endpoint<MasterReq>, max_queue_num: u64) -> Self76 fn new(ep: Endpoint<MasterReq>, max_queue_num: u64) -> Self {
77 Master {
78 node: Arc::new(Mutex::new(MasterInternal {
79 main_sock: ep,
80 virtio_features: 0,
81 acked_virtio_features: 0,
82 protocol_features: 0,
83 acked_protocol_features: 0,
84 protocol_features_ready: false,
85 max_queue_num,
86 error: None,
87 })),
88 }
89 }
90
node(&self) -> MutexGuard<MasterInternal>91 fn node(&self) -> MutexGuard<MasterInternal> {
92 self.node.lock().unwrap()
93 }
94
95 /// Create a new instance from a Unix stream socket.
from_stream(sock: UnixStream, max_queue_num: u64) -> Self96 pub fn from_stream(sock: UnixStream, max_queue_num: u64) -> Self {
97 Self::new(Endpoint::<MasterReq>::from_stream(sock), max_queue_num)
98 }
99
100 /// Create a new vhost-user master endpoint.
101 ///
102 /// Will retry as the backend may not be ready to accept the connection.
103 ///
104 /// # Arguments
105 /// * `path` - path of Unix domain socket listener to connect to
connect<P: AsRef<Path>>(path: P, max_queue_num: u64) -> Result<Self>106 pub fn connect<P: AsRef<Path>>(path: P, max_queue_num: u64) -> Result<Self> {
107 let mut retry_count = 5;
108 let endpoint = loop {
109 match Endpoint::<MasterReq>::connect(&path) {
110 Ok(endpoint) => break Ok(endpoint),
111 Err(e) => match &e {
112 VhostUserError::SocketConnect(why) => {
113 if why.kind() == std::io::ErrorKind::ConnectionRefused && retry_count > 0 {
114 std::thread::sleep(std::time::Duration::from_millis(100));
115 retry_count -= 1;
116 continue;
117 } else {
118 break Err(e);
119 }
120 }
121 _ => break Err(e),
122 },
123 }
124 }?;
125
126 Ok(Self::new(endpoint, max_queue_num))
127 }
128 }
129
130 impl VhostBackend for Master {
131 /// Get from the underlying vhost implementation the feature bitmask.
get_features(&self) -> Result<u64>132 fn get_features(&self) -> Result<u64> {
133 let mut node = self.node();
134 let hdr = node.send_request_header(MasterReq::GET_FEATURES, None)?;
135 let val = node.recv_reply::<VhostUserU64>(&hdr)?;
136 node.virtio_features = val.value;
137 Ok(node.virtio_features)
138 }
139
140 /// Enable features in the underlying vhost implementation using a bitmask.
set_features(&self, features: u64) -> Result<()>141 fn set_features(&self, features: u64) -> Result<()> {
142 let mut node = self.node();
143 let val = VhostUserU64::new(features);
144 let _ = node.send_request_with_body(MasterReq::SET_FEATURES, &val, None)?;
145 // Don't wait for ACK here because the protocol feature negotiation process hasn't been
146 // completed yet.
147 node.acked_virtio_features = features & node.virtio_features;
148 Ok(())
149 }
150
151 /// Set the current Master as an owner of the session.
set_owner(&self) -> Result<()>152 fn set_owner(&self) -> Result<()> {
153 // We unwrap() the return value to assert that we are not expecting threads to ever fail
154 // while holding the lock.
155 let mut node = self.node();
156 let _ = node.send_request_header(MasterReq::SET_OWNER, None)?;
157 // Don't wait for ACK here because the protocol feature negotiation process hasn't been
158 // completed yet.
159 Ok(())
160 }
161
reset_owner(&self) -> Result<()>162 fn reset_owner(&self) -> Result<()> {
163 let mut node = self.node();
164 let _ = node.send_request_header(MasterReq::RESET_OWNER, None)?;
165 // Don't wait for ACK here because the protocol feature negotiation process hasn't been
166 // completed yet.
167 Ok(())
168 }
169
170 /// Set the memory map regions on the slave so it can translate the vring
171 /// addresses. In the ancillary data there is an array of file descriptors
set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>172 fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
173 if regions.is_empty() || regions.len() > MAX_ATTACHED_FD_ENTRIES {
174 return error_code(VhostUserError::InvalidParam);
175 }
176
177 let mut ctx = VhostUserMemoryContext::new();
178 for region in regions.iter() {
179 if region.memory_size == 0 || region.mmap_handle < 0 {
180 return error_code(VhostUserError::InvalidParam);
181 }
182 let reg = VhostUserMemoryRegion {
183 guest_phys_addr: region.guest_phys_addr,
184 memory_size: region.memory_size,
185 user_addr: region.userspace_addr,
186 mmap_offset: region.mmap_offset,
187 };
188 ctx.append(®, region.mmap_handle);
189 }
190
191 let mut node = self.node();
192 let body = VhostUserMemory::new(ctx.regions.len() as u32);
193 let (_, payload, _) = unsafe { ctx.regions.align_to::<u8>() };
194 let hdr = node.send_request_with_payload(
195 MasterReq::SET_MEM_TABLE,
196 &body,
197 payload,
198 Some(ctx.fds.as_slice()),
199 )?;
200 node.wait_for_ack(&hdr).map_err(|e| e.into())
201 }
202
203 // Clippy doesn't seem to know that if let with && is still experimental
204 #[allow(clippy::unnecessary_unwrap)]
set_log_base(&self, base: u64, fd: Option<RawFd>) -> Result<()>205 fn set_log_base(&self, base: u64, fd: Option<RawFd>) -> Result<()> {
206 let mut node = self.node();
207 let val = VhostUserU64::new(base);
208
209 if node.acked_protocol_features & VhostUserProtocolFeatures::LOG_SHMFD.bits() != 0
210 && fd.is_some()
211 {
212 let fds = [fd.unwrap()];
213 let _ = node.send_request_with_body(MasterReq::SET_LOG_BASE, &val, Some(&fds))?;
214 } else {
215 let _ = node.send_request_with_body(MasterReq::SET_LOG_BASE, &val, None)?;
216 }
217 Ok(())
218 }
219
set_log_fd(&self, fd: RawFd) -> Result<()>220 fn set_log_fd(&self, fd: RawFd) -> Result<()> {
221 let mut node = self.node();
222 let fds = [fd];
223 node.send_request_header(MasterReq::SET_LOG_FD, Some(&fds))?;
224 Ok(())
225 }
226
227 /// Set the size of the queue.
set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>228 fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> {
229 let mut node = self.node();
230 if queue_index as u64 >= node.max_queue_num {
231 return error_code(VhostUserError::InvalidParam);
232 }
233
234 let val = VhostUserVringState::new(queue_index as u32, num.into());
235 let hdr = node.send_request_with_body(MasterReq::SET_VRING_NUM, &val, None)?;
236 node.wait_for_ack(&hdr).map_err(|e| e.into())
237 }
238
239 /// Sets the addresses of the different aspects of the vring.
set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>240 fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> {
241 let mut node = self.node();
242 if queue_index as u64 >= node.max_queue_num
243 || config_data.flags & !(VhostUserVringAddrFlags::all().bits()) != 0
244 {
245 return error_code(VhostUserError::InvalidParam);
246 }
247
248 let val = VhostUserVringAddr::from_config_data(queue_index as u32, config_data);
249 let hdr = node.send_request_with_body(MasterReq::SET_VRING_ADDR, &val, None)?;
250 node.wait_for_ack(&hdr).map_err(|e| e.into())
251 }
252
253 /// Sets the base offset in the available vring.
set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>254 fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> {
255 let mut node = self.node();
256 if queue_index as u64 >= node.max_queue_num {
257 return error_code(VhostUserError::InvalidParam);
258 }
259
260 let val = VhostUserVringState::new(queue_index as u32, base.into());
261 let hdr = node.send_request_with_body(MasterReq::SET_VRING_BASE, &val, None)?;
262 node.wait_for_ack(&hdr).map_err(|e| e.into())
263 }
264
get_vring_base(&self, queue_index: usize) -> Result<u32>265 fn get_vring_base(&self, queue_index: usize) -> Result<u32> {
266 let mut node = self.node();
267 if queue_index as u64 >= node.max_queue_num {
268 return error_code(VhostUserError::InvalidParam);
269 }
270
271 let req = VhostUserVringState::new(queue_index as u32, 0);
272 let hdr = node.send_request_with_body(MasterReq::GET_VRING_BASE, &req, None)?;
273 let reply = node.recv_reply::<VhostUserVringState>(&hdr)?;
274 Ok(reply.num)
275 }
276
277 /// Set the event file descriptor to signal when buffers are used.
278 /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
279 /// is set when there is no file descriptor in the ancillary data. This signals that polling
280 /// will be used instead of waiting for the call.
set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()>281 fn set_vring_call(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
282 let mut node = self.node();
283 if queue_index as u64 >= node.max_queue_num {
284 return error_code(VhostUserError::InvalidParam);
285 }
286 node.send_fd_for_vring(MasterReq::SET_VRING_CALL, queue_index, fd.as_raw_fd())?;
287 Ok(())
288 }
289
290 /// Set the event file descriptor for adding buffers to the vring.
291 /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
292 /// is set when there is no file descriptor in the ancillary data. This signals that polling
293 /// should be used instead of waiting for a kick.
set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()>294 fn set_vring_kick(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
295 let mut node = self.node();
296 if queue_index as u64 >= node.max_queue_num {
297 return error_code(VhostUserError::InvalidParam);
298 }
299 node.send_fd_for_vring(MasterReq::SET_VRING_KICK, queue_index, fd.as_raw_fd())?;
300 Ok(())
301 }
302
303 /// Set the event file descriptor to signal when error occurs.
304 /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
305 /// is set when there is no file descriptor in the ancillary data.
set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()>306 fn set_vring_err(&self, queue_index: usize, fd: &EventFd) -> Result<()> {
307 let mut node = self.node();
308 if queue_index as u64 >= node.max_queue_num {
309 return error_code(VhostUserError::InvalidParam);
310 }
311 node.send_fd_for_vring(MasterReq::SET_VRING_ERR, queue_index, fd.as_raw_fd())?;
312 Ok(())
313 }
314 }
315
316 impl VhostUserMaster for Master {
get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>317 fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures> {
318 let mut node = self.node();
319 let flag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
320 if node.virtio_features & flag == 0 || node.acked_virtio_features & flag == 0 {
321 return error_code(VhostUserError::InvalidOperation);
322 }
323 let hdr = node.send_request_header(MasterReq::GET_PROTOCOL_FEATURES, None)?;
324 let val = node.recv_reply::<VhostUserU64>(&hdr)?;
325 node.protocol_features = val.value;
326 // Should we support forward compatibility?
327 // If so just mask out unrecognized flags instead of return errors.
328 match VhostUserProtocolFeatures::from_bits(node.protocol_features) {
329 Some(val) => Ok(val),
330 None => error_code(VhostUserError::InvalidMessage),
331 }
332 }
333
set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>334 fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()> {
335 let mut node = self.node();
336 let flag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
337 if node.virtio_features & flag == 0 || node.acked_virtio_features & flag == 0 {
338 return error_code(VhostUserError::InvalidOperation);
339 }
340 let val = VhostUserU64::new(features.bits());
341 let _ = node.send_request_with_body(MasterReq::SET_PROTOCOL_FEATURES, &val, None)?;
342 // Don't wait for ACK here because the protocol feature negotiation process hasn't been
343 // completed yet.
344 node.acked_protocol_features = features.bits();
345 node.protocol_features_ready = true;
346 Ok(())
347 }
348
get_queue_num(&mut self) -> Result<u64>349 fn get_queue_num(&mut self) -> Result<u64> {
350 let mut node = self.node();
351 if !node.is_feature_mq_available() {
352 return error_code(VhostUserError::InvalidOperation);
353 }
354
355 let hdr = node.send_request_header(MasterReq::GET_QUEUE_NUM, None)?;
356 let val = node.recv_reply::<VhostUserU64>(&hdr)?;
357 if val.value > VHOST_USER_MAX_VRINGS {
358 return error_code(VhostUserError::InvalidMessage);
359 }
360 node.max_queue_num = val.value;
361 Ok(node.max_queue_num)
362 }
363
set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>364 fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()> {
365 let mut node = self.node();
366 // set_vring_enable() is supported only when PROTOCOL_FEATURES has been enabled.
367 if node.acked_virtio_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0 {
368 return error_code(VhostUserError::InvalidOperation);
369 } else if queue_index as u64 >= node.max_queue_num {
370 return error_code(VhostUserError::InvalidParam);
371 }
372
373 let flag = if enable { 1 } else { 0 };
374 let val = VhostUserVringState::new(queue_index as u32, flag);
375 let hdr = node.send_request_with_body(MasterReq::SET_VRING_ENABLE, &val, None)?;
376 node.wait_for_ack(&hdr).map_err(|e| e.into())
377 }
378
get_config( &mut self, offset: u32, size: u32, flags: VhostUserConfigFlags, buf: &[u8], ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>379 fn get_config(
380 &mut self,
381 offset: u32,
382 size: u32,
383 flags: VhostUserConfigFlags,
384 buf: &[u8],
385 ) -> Result<(VhostUserConfig, VhostUserConfigPayload)> {
386 let body = VhostUserConfig::new(offset, size, flags);
387 if !body.is_valid() {
388 return error_code(VhostUserError::InvalidParam);
389 }
390
391 let mut node = self.node();
392 // depends on VhostUserProtocolFeatures::CONFIG
393 if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
394 return error_code(VhostUserError::InvalidOperation);
395 }
396
397 // vhost-user spec states that:
398 // "Master payload: virtio device config space"
399 // "Slave payload: virtio device config space"
400 let hdr = node.send_request_with_payload(MasterReq::GET_CONFIG, &body, buf, None)?;
401 let (body_reply, buf_reply, rfds) =
402 node.recv_reply_with_payload::<VhostUserConfig>(&hdr)?;
403 if rfds.is_some() {
404 Endpoint::<MasterReq>::close_rfds(rfds);
405 return error_code(VhostUserError::InvalidMessage);
406 } else if body_reply.size == 0 {
407 return error_code(VhostUserError::SlaveInternalError);
408 } else if body_reply.size != body.size
409 || body_reply.size as usize != buf.len()
410 || body_reply.offset != body.offset
411 {
412 return error_code(VhostUserError::InvalidMessage);
413 }
414
415 Ok((body_reply, buf_reply))
416 }
417
set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>418 fn set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()> {
419 if buf.len() > MAX_MSG_SIZE {
420 return error_code(VhostUserError::InvalidParam);
421 }
422 let body = VhostUserConfig::new(offset, buf.len() as u32, flags);
423 if !body.is_valid() {
424 return error_code(VhostUserError::InvalidParam);
425 }
426
427 let mut node = self.node();
428 // depends on VhostUserProtocolFeatures::CONFIG
429 if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
430 return error_code(VhostUserError::InvalidOperation);
431 }
432
433 let hdr = node.send_request_with_payload(MasterReq::SET_CONFIG, &body, buf, None)?;
434 node.wait_for_ack(&hdr).map_err(|e| e.into())
435 }
436
set_slave_request_fd(&mut self, fd: RawFd) -> Result<()>437 fn set_slave_request_fd(&mut self, fd: RawFd) -> Result<()> {
438 let mut node = self.node();
439 if node.acked_protocol_features & VhostUserProtocolFeatures::SLAVE_REQ.bits() == 0 {
440 return error_code(VhostUserError::InvalidOperation);
441 }
442
443 let fds = [fd];
444 node.send_request_header(MasterReq::SET_SLAVE_REQ_FD, Some(&fds))?;
445 Ok(())
446 }
447
get_max_mem_slots(&mut self) -> Result<u64>448 fn get_max_mem_slots(&mut self) -> Result<u64> {
449 let mut node = self.node();
450 if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0
451 {
452 return error_code(VhostUserError::InvalidOperation);
453 }
454
455 let hdr = node.send_request_header(MasterReq::GET_MAX_MEM_SLOTS, None)?;
456 let val = node.recv_reply::<VhostUserU64>(&hdr)?;
457
458 Ok(val.value)
459 }
460
add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>461 fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> {
462 let mut node = self.node();
463 if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0
464 {
465 return error_code(VhostUserError::InvalidOperation);
466 }
467 if region.memory_size == 0 || region.mmap_handle < 0 {
468 return error_code(VhostUserError::InvalidParam);
469 }
470
471 let body = VhostUserSingleMemoryRegion::new(
472 region.guest_phys_addr,
473 region.memory_size,
474 region.userspace_addr,
475 region.mmap_offset,
476 );
477 let fds = [region.mmap_handle];
478 let hdr = node.send_request_with_body(MasterReq::ADD_MEM_REG, &body, Some(&fds))?;
479 node.wait_for_ack(&hdr).map_err(|e| e.into())
480 }
481
remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>482 fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> {
483 let mut node = self.node();
484 if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0
485 {
486 return error_code(VhostUserError::InvalidOperation);
487 }
488 if region.memory_size == 0 {
489 return error_code(VhostUserError::InvalidParam);
490 }
491
492 let body = VhostUserSingleMemoryRegion::new(
493 region.guest_phys_addr,
494 region.memory_size,
495 region.userspace_addr,
496 region.mmap_offset,
497 );
498 let hdr = node.send_request_with_body(MasterReq::REM_MEM_REG, &body, None)?;
499 node.wait_for_ack(&hdr).map_err(|e| e.into())
500 }
501 }
502
503 impl AsRawFd for Master {
as_raw_fd(&self) -> RawFd504 fn as_raw_fd(&self) -> RawFd {
505 let node = self.node();
506 node.main_sock.as_raw_fd()
507 }
508 }
509
510 /// Context object to pass guest memory configuration to VhostUserMaster::set_mem_table().
511 struct VhostUserMemoryContext {
512 regions: VhostUserMemoryPayload,
513 fds: Vec<RawFd>,
514 }
515
516 impl VhostUserMemoryContext {
517 /// Create a context object.
new() -> Self518 pub fn new() -> Self {
519 VhostUserMemoryContext {
520 regions: VhostUserMemoryPayload::new(),
521 fds: Vec::new(),
522 }
523 }
524
525 /// Append a user memory region and corresponding RawFd into the context object.
append(&mut self, region: &VhostUserMemoryRegion, fd: RawFd)526 pub fn append(&mut self, region: &VhostUserMemoryRegion, fd: RawFd) {
527 self.regions.push(*region);
528 self.fds.push(fd);
529 }
530 }
531
532 struct MasterInternal {
533 // Used to send requests to the slave.
534 main_sock: Endpoint<MasterReq>,
535 // Cached virtio features from the slave.
536 virtio_features: u64,
537 // Cached acked virtio features from the driver.
538 acked_virtio_features: u64,
539 // Cached vhost-user protocol features from the slave.
540 protocol_features: u64,
541 // Cached vhost-user protocol features.
542 acked_protocol_features: u64,
543 // Cached vhost-user protocol features are ready to use.
544 protocol_features_ready: bool,
545 // Cached maxinum number of queues supported from the slave.
546 max_queue_num: u64,
547 // Internal flag to mark failure state.
548 error: Option<i32>,
549 }
550
551 impl MasterInternal {
send_request_header( &mut self, code: MasterReq, fds: Option<&[RawFd]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>552 fn send_request_header(
553 &mut self,
554 code: MasterReq,
555 fds: Option<&[RawFd]>,
556 ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
557 self.check_state()?;
558 let hdr = Self::new_request_header(code, 0);
559 self.main_sock.send_header(&hdr, fds)?;
560 Ok(hdr)
561 }
562
send_request_with_body<T: Sized>( &mut self, code: MasterReq, msg: &T, fds: Option<&[RawFd]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>563 fn send_request_with_body<T: Sized>(
564 &mut self,
565 code: MasterReq,
566 msg: &T,
567 fds: Option<&[RawFd]>,
568 ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
569 if mem::size_of::<T>() > MAX_MSG_SIZE {
570 return Err(VhostUserError::InvalidParam);
571 }
572 self.check_state()?;
573
574 let hdr = Self::new_request_header(code, mem::size_of::<T>() as u32);
575 self.main_sock.send_message(&hdr, msg, fds)?;
576 Ok(hdr)
577 }
578
send_request_with_payload<T: Sized>( &mut self, code: MasterReq, msg: &T, payload: &[u8], fds: Option<&[RawFd]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>579 fn send_request_with_payload<T: Sized>(
580 &mut self,
581 code: MasterReq,
582 msg: &T,
583 payload: &[u8],
584 fds: Option<&[RawFd]>,
585 ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
586 let len = mem::size_of::<T>() + payload.len();
587 if len > MAX_MSG_SIZE {
588 return Err(VhostUserError::InvalidParam);
589 }
590 if let Some(ref fd_arr) = fds {
591 if fd_arr.len() > MAX_ATTACHED_FD_ENTRIES {
592 return Err(VhostUserError::InvalidParam);
593 }
594 }
595 self.check_state()?;
596
597 let hdr = Self::new_request_header(code, len as u32);
598 self.main_sock
599 .send_message_with_payload(&hdr, msg, payload, fds)?;
600 Ok(hdr)
601 }
602
send_fd_for_vring( &mut self, code: MasterReq, queue_index: usize, fd: RawFd, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>603 fn send_fd_for_vring(
604 &mut self,
605 code: MasterReq,
606 queue_index: usize,
607 fd: RawFd,
608 ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
609 if queue_index as u64 >= self.max_queue_num {
610 return Err(VhostUserError::InvalidParam);
611 }
612 self.check_state()?;
613
614 // Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag.
615 // This flag is set when there is no file descriptor in the ancillary data. This signals
616 // that polling will be used instead of waiting for the call.
617 let msg = VhostUserU64::new(queue_index as u64);
618 let hdr = Self::new_request_header(code, mem::size_of::<VhostUserU64>() as u32);
619 self.main_sock.send_message(&hdr, &msg, Some(&[fd]))?;
620 Ok(hdr)
621 }
622
recv_reply<T: Sized + Default + VhostUserMsgValidator>( &mut self, hdr: &VhostUserMsgHeader<MasterReq>, ) -> VhostUserResult<T>623 fn recv_reply<T: Sized + Default + VhostUserMsgValidator>(
624 &mut self,
625 hdr: &VhostUserMsgHeader<MasterReq>,
626 ) -> VhostUserResult<T> {
627 if mem::size_of::<T>() > MAX_MSG_SIZE || hdr.is_reply() {
628 return Err(VhostUserError::InvalidParam);
629 }
630 self.check_state()?;
631
632 let (reply, body, rfds) = self.main_sock.recv_body::<T>()?;
633 if !reply.is_reply_for(&hdr) || rfds.is_some() || !body.is_valid() {
634 Endpoint::<MasterReq>::close_rfds(rfds);
635 return Err(VhostUserError::InvalidMessage);
636 }
637 Ok(body)
638 }
639
recv_reply_with_payload<T: Sized + Default + VhostUserMsgValidator>( &mut self, hdr: &VhostUserMsgHeader<MasterReq>, ) -> VhostUserResult<(T, Vec<u8>, Option<Vec<RawFd>>)>640 fn recv_reply_with_payload<T: Sized + Default + VhostUserMsgValidator>(
641 &mut self,
642 hdr: &VhostUserMsgHeader<MasterReq>,
643 ) -> VhostUserResult<(T, Vec<u8>, Option<Vec<RawFd>>)> {
644 if mem::size_of::<T>() > MAX_MSG_SIZE
645 || hdr.get_size() as usize <= mem::size_of::<T>()
646 || hdr.get_size() as usize > MAX_MSG_SIZE
647 || hdr.is_reply()
648 {
649 return Err(VhostUserError::InvalidParam);
650 }
651 self.check_state()?;
652
653 let mut buf: Vec<u8> = vec![0; hdr.get_size() as usize - mem::size_of::<T>()];
654 let (reply, body, bytes, rfds) = self.main_sock.recv_payload_into_buf::<T>(&mut buf)?;
655 if !reply.is_reply_for(hdr)
656 || reply.get_size() as usize != mem::size_of::<T>() + bytes
657 || rfds.is_some()
658 || !body.is_valid()
659 {
660 Endpoint::<MasterReq>::close_rfds(rfds);
661 return Err(VhostUserError::InvalidMessage);
662 } else if bytes != buf.len() {
663 return Err(VhostUserError::InvalidMessage);
664 }
665 Ok((body, buf, rfds))
666 }
667
wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<MasterReq>) -> VhostUserResult<()>668 fn wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<MasterReq>) -> VhostUserResult<()> {
669 if self.acked_protocol_features & VhostUserProtocolFeatures::REPLY_ACK.bits() == 0
670 || !hdr.is_need_reply()
671 {
672 return Ok(());
673 }
674 self.check_state()?;
675
676 let (reply, body, rfds) = self.main_sock.recv_body::<VhostUserU64>()?;
677 if !reply.is_reply_for(&hdr) || rfds.is_some() || !body.is_valid() {
678 Endpoint::<MasterReq>::close_rfds(rfds);
679 return Err(VhostUserError::InvalidMessage);
680 }
681 if body.value != 0 {
682 return Err(VhostUserError::SlaveInternalError);
683 }
684 Ok(())
685 }
686
is_feature_mq_available(&self) -> bool687 fn is_feature_mq_available(&self) -> bool {
688 self.acked_protocol_features & VhostUserProtocolFeatures::MQ.bits() != 0
689 }
690
check_state(&self) -> VhostUserResult<()>691 fn check_state(&self) -> VhostUserResult<()> {
692 match self.error {
693 Some(e) => Err(VhostUserError::SocketBroken(
694 std::io::Error::from_raw_os_error(e),
695 )),
696 None => Ok(()),
697 }
698 }
699
700 #[inline]
new_request_header(request: MasterReq, size: u32) -> VhostUserMsgHeader<MasterReq>701 fn new_request_header(request: MasterReq, size: u32) -> VhostUserMsgHeader<MasterReq> {
702 // TODO: handle NEED_REPLY flag
703 VhostUserMsgHeader::new(request, 0x1, size)
704 }
705 }
706
707 #[cfg(test)]
708 mod tests {
709 use super::super::connection::Listener;
710 use super::*;
711 use tempfile::{Builder, TempDir};
712
temp_dir() -> TempDir713 fn temp_dir() -> TempDir {
714 Builder::new().prefix("/tmp/vhost_test").tempdir().unwrap()
715 }
716
create_pair<P: AsRef<Path>>(path: P) -> (Master, Endpoint<MasterReq>)717 fn create_pair<P: AsRef<Path>>(path: P) -> (Master, Endpoint<MasterReq>) {
718 let listener = Listener::new(&path, true).unwrap();
719 listener.set_nonblocking(true).unwrap();
720 let master = Master::connect(path, 2).unwrap();
721 let slave = listener.accept().unwrap().unwrap();
722 (master, Endpoint::from_stream(slave))
723 }
724
725 #[test]
create_master()726 fn create_master() {
727 let dir = temp_dir();
728 let mut path = dir.path().to_owned();
729 path.push("sock");
730 let listener = Listener::new(&path, true).unwrap();
731 listener.set_nonblocking(true).unwrap();
732
733 let master = Master::connect(&path, 1).unwrap();
734 let mut slave = Endpoint::<MasterReq>::from_stream(listener.accept().unwrap().unwrap());
735
736 assert!(master.as_raw_fd() > 0);
737 // Send two messages continuously
738 master.set_owner().unwrap();
739 master.reset_owner().unwrap();
740
741 let (hdr, rfds) = slave.recv_header().unwrap();
742 assert_eq!(hdr.get_code(), MasterReq::SET_OWNER);
743 assert_eq!(hdr.get_size(), 0);
744 assert_eq!(hdr.get_version(), 0x1);
745 assert!(rfds.is_none());
746
747 let (hdr, rfds) = slave.recv_header().unwrap();
748 assert_eq!(hdr.get_code(), MasterReq::RESET_OWNER);
749 assert_eq!(hdr.get_size(), 0);
750 assert_eq!(hdr.get_version(), 0x1);
751 assert!(rfds.is_none());
752 }
753
754 #[test]
test_create_failure()755 fn test_create_failure() {
756 let dir = temp_dir();
757 let mut path = dir.path().to_owned();
758 path.push("sock");
759 let _ = Listener::new(&path, true).unwrap();
760 let _ = Listener::new(&path, false).is_err();
761 assert!(Master::connect(&path, 1).is_err());
762
763 let listener = Listener::new(&path, true).unwrap();
764 assert!(Listener::new(&path, false).is_err());
765 listener.set_nonblocking(true).unwrap();
766
767 let _master = Master::connect(&path, 1).unwrap();
768 let _slave = listener.accept().unwrap().unwrap();
769 }
770
771 #[test]
test_features()772 fn test_features() {
773 let dir = temp_dir();
774 let mut path = dir.path().to_owned();
775 path.push("sock");
776 let (master, mut peer) = create_pair(&path);
777
778 master.set_owner().unwrap();
779 let (hdr, rfds) = peer.recv_header().unwrap();
780 assert_eq!(hdr.get_code(), MasterReq::SET_OWNER);
781 assert_eq!(hdr.get_size(), 0);
782 assert_eq!(hdr.get_version(), 0x1);
783 assert!(rfds.is_none());
784
785 let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
786 let msg = VhostUserU64::new(0x15);
787 peer.send_message(&hdr, &msg, None).unwrap();
788 let features = master.get_features().unwrap();
789 assert_eq!(features, 0x15u64);
790 let (_hdr, rfds) = peer.recv_header().unwrap();
791 assert!(rfds.is_none());
792
793 let hdr = VhostUserMsgHeader::new(MasterReq::SET_FEATURES, 0x4, 8);
794 let msg = VhostUserU64::new(0x15);
795 peer.send_message(&hdr, &msg, None).unwrap();
796 master.set_features(0x15).unwrap();
797 let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
798 assert!(rfds.is_none());
799 let val = msg.value;
800 assert_eq!(val, 0x15);
801
802 let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
803 let msg = 0x15u32;
804 peer.send_message(&hdr, &msg, None).unwrap();
805 assert!(master.get_features().is_err());
806 }
807
808 #[test]
test_protocol_features()809 fn test_protocol_features() {
810 let dir = temp_dir();
811 let mut path = dir.path().to_owned();
812 path.push("sock");
813 let (mut master, mut peer) = create_pair(&path);
814
815 master.set_owner().unwrap();
816 let (hdr, rfds) = peer.recv_header().unwrap();
817 assert_eq!(hdr.get_code(), MasterReq::SET_OWNER);
818 assert!(rfds.is_none());
819
820 assert!(master.get_protocol_features().is_err());
821 assert!(master
822 .set_protocol_features(VhostUserProtocolFeatures::all())
823 .is_err());
824
825 let vfeatures = 0x15 | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
826 let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
827 let msg = VhostUserU64::new(vfeatures);
828 peer.send_message(&hdr, &msg, None).unwrap();
829 let features = master.get_features().unwrap();
830 assert_eq!(features, vfeatures);
831 let (_hdr, rfds) = peer.recv_header().unwrap();
832 assert!(rfds.is_none());
833
834 master.set_features(vfeatures).unwrap();
835 let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
836 assert!(rfds.is_none());
837 let val = msg.value;
838 assert_eq!(val, vfeatures);
839
840 let pfeatures = VhostUserProtocolFeatures::all();
841 let hdr = VhostUserMsgHeader::new(MasterReq::GET_PROTOCOL_FEATURES, 0x4, 8);
842 let msg = VhostUserU64::new(pfeatures.bits());
843 peer.send_message(&hdr, &msg, None).unwrap();
844 let features = master.get_protocol_features().unwrap();
845 assert_eq!(features, pfeatures);
846 let (_hdr, rfds) = peer.recv_header().unwrap();
847 assert!(rfds.is_none());
848
849 master.set_protocol_features(pfeatures).unwrap();
850 let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
851 assert!(rfds.is_none());
852 let val = msg.value;
853 assert_eq!(val, pfeatures.bits());
854
855 let hdr = VhostUserMsgHeader::new(MasterReq::SET_PROTOCOL_FEATURES, 0x4, 8);
856 let msg = VhostUserU64::new(pfeatures.bits());
857 peer.send_message(&hdr, &msg, None).unwrap();
858 assert!(master.get_protocol_features().is_err());
859 }
860
861 #[test]
test_master_set_config_negative()862 fn test_master_set_config_negative() {
863 let dir = temp_dir();
864 let mut path = dir.path().to_owned();
865 path.push("sock");
866 let (mut master, _peer) = create_pair(&path);
867 let buf = vec![0x0; MAX_MSG_SIZE + 1];
868
869 master
870 .set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4])
871 .unwrap_err();
872
873 {
874 let mut node = master.node();
875 node.virtio_features = 0xffff_ffff;
876 node.acked_virtio_features = 0xffff_ffff;
877 node.protocol_features = 0xffff_ffff;
878 node.acked_protocol_features = 0xffff_ffff;
879 }
880
881 master
882 .set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4])
883 .unwrap();
884 master
885 .set_config(0x0, VhostUserConfigFlags::WRITABLE, &buf[0..4])
886 .unwrap_err();
887 master
888 .set_config(0x1000, VhostUserConfigFlags::WRITABLE, &buf[0..4])
889 .unwrap_err();
890 master
891 .set_config(
892 0x100,
893 unsafe { VhostUserConfigFlags::from_bits_unchecked(0xffff_ffff) },
894 &buf[0..4],
895 )
896 .unwrap_err();
897 master
898 .set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf)
899 .unwrap_err();
900 master
901 .set_config(0x100, VhostUserConfigFlags::WRITABLE, &[])
902 .unwrap_err();
903 }
904
create_pair2() -> (Master, Endpoint<MasterReq>)905 fn create_pair2() -> (Master, Endpoint<MasterReq>) {
906 let dir = temp_dir();
907 let mut path = dir.path().to_owned();
908 path.push("sock");
909 let (master, peer) = create_pair(&path);
910
911 {
912 let mut node = master.node();
913 node.virtio_features = 0xffff_ffff;
914 node.acked_virtio_features = 0xffff_ffff;
915 node.protocol_features = 0xffff_ffff;
916 node.acked_protocol_features = 0xffff_ffff;
917 }
918
919 (master, peer)
920 }
921
922 #[test]
test_master_get_config_negative0()923 fn test_master_get_config_negative0() {
924 let (mut master, mut peer) = create_pair2();
925 let buf = vec![0x0; MAX_MSG_SIZE + 1];
926
927 let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
928 let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
929 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
930 .unwrap();
931 assert!(master
932 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
933 .is_ok());
934
935 hdr.set_code(MasterReq::GET_FEATURES);
936 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
937 .unwrap();
938 assert!(master
939 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
940 .is_err());
941 hdr.set_code(MasterReq::GET_CONFIG);
942 }
943
944 #[test]
test_master_get_config_negative1()945 fn test_master_get_config_negative1() {
946 let (mut master, mut peer) = create_pair2();
947 let buf = vec![0x0; MAX_MSG_SIZE + 1];
948
949 let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
950 let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
951 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
952 .unwrap();
953 assert!(master
954 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
955 .is_ok());
956
957 hdr.set_reply(false);
958 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
959 .unwrap();
960 assert!(master
961 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
962 .is_err());
963 }
964
965 #[test]
test_master_get_config_negative2()966 fn test_master_get_config_negative2() {
967 let (mut master, mut peer) = create_pair2();
968 let buf = vec![0x0; MAX_MSG_SIZE + 1];
969
970 let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
971 let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
972 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
973 .unwrap();
974 assert!(master
975 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
976 .is_ok());
977 }
978
979 #[test]
test_master_get_config_negative3()980 fn test_master_get_config_negative3() {
981 let (mut master, mut peer) = create_pair2();
982 let buf = vec![0x0; MAX_MSG_SIZE + 1];
983
984 let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
985 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
986 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
987 .unwrap();
988 assert!(master
989 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
990 .is_ok());
991
992 msg.offset = 0;
993 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
994 .unwrap();
995 assert!(master
996 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
997 .is_err());
998 }
999
1000 #[test]
test_master_get_config_negative4()1001 fn test_master_get_config_negative4() {
1002 let (mut master, mut peer) = create_pair2();
1003 let buf = vec![0x0; MAX_MSG_SIZE + 1];
1004
1005 let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1006 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1007 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1008 .unwrap();
1009 assert!(master
1010 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1011 .is_ok());
1012
1013 msg.offset = 0x101;
1014 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1015 .unwrap();
1016 assert!(master
1017 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1018 .is_err());
1019 }
1020
1021 #[test]
test_master_get_config_negative5()1022 fn test_master_get_config_negative5() {
1023 let (mut master, mut peer) = create_pair2();
1024 let buf = vec![0x0; MAX_MSG_SIZE + 1];
1025
1026 let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1027 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1028 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1029 .unwrap();
1030 assert!(master
1031 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1032 .is_ok());
1033
1034 msg.offset = (MAX_MSG_SIZE + 1) as u32;
1035 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1036 .unwrap();
1037 assert!(master
1038 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1039 .is_err());
1040 }
1041
1042 #[test]
test_master_get_config_negative6()1043 fn test_master_get_config_negative6() {
1044 let (mut master, mut peer) = create_pair2();
1045 let buf = vec![0x0; MAX_MSG_SIZE + 1];
1046
1047 let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1048 let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1049 peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1050 .unwrap();
1051 assert!(master
1052 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1053 .is_ok());
1054
1055 msg.size = 6;
1056 peer.send_message_with_payload(&hdr, &msg, &buf[0..6], None)
1057 .unwrap();
1058 assert!(master
1059 .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1060 .is_err());
1061 }
1062
1063 #[test]
test_maset_set_mem_table_failure()1064 fn test_maset_set_mem_table_failure() {
1065 let (master, _peer) = create_pair2();
1066
1067 master.set_mem_table(&[]).unwrap_err();
1068 let tables = vec![VhostUserMemoryRegionInfo::default(); MAX_ATTACHED_FD_ENTRIES + 1];
1069 master.set_mem_table(&tables).unwrap_err();
1070 }
1071 }
1072