• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 
4 //! Traits and Struct for vhost-user master.
5 
6 use std::fs::File;
7 use std::mem;
8 use std::path::Path;
9 use std::sync::{Arc, Mutex, MutexGuard};
10 
11 use base::{AsRawDescriptor, Event, RawDescriptor, INVALID_DESCRIPTOR};
12 use data_model::DataInit;
13 
14 use super::connection::{Endpoint, EndpointExt};
15 use super::message::*;
16 use super::{take_single_file, Error as VhostUserError, Result as VhostUserResult};
17 use crate::backend::{VhostBackend, VhostUserMemoryRegionInfo, VringConfigData};
18 use crate::{Result, SystemStream};
19 
20 /// Trait for vhost-user master to provide extra methods not covered by the VhostBackend yet.
21 pub trait VhostUserMaster: VhostBackend {
22     /// Get the protocol feature bitmask from the underlying vhost implementation.
get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>23     fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>;
24 
25     /// Enable protocol features in the underlying vhost implementation.
set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>26     fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>;
27 
28     /// Query how many queues the backend supports.
get_queue_num(&mut self) -> Result<u64>29     fn get_queue_num(&mut self) -> Result<u64>;
30 
31     /// Signal slave to enable or disable corresponding vring.
32     ///
33     /// Slave must not pass data to/from the backend until ring is enabled by
34     /// VHOST_USER_SET_VRING_ENABLE with parameter 1, or after it has been
35     /// disabled by VHOST_USER_SET_VRING_ENABLE with parameter 0.
set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>36     fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>;
37 
38     /// Fetch the contents of the virtio device configuration space.
get_config( &mut self, offset: u32, size: u32, flags: VhostUserConfigFlags, buf: &[u8], ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>39     fn get_config(
40         &mut self,
41         offset: u32,
42         size: u32,
43         flags: VhostUserConfigFlags,
44         buf: &[u8],
45     ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>;
46 
47     /// Change the virtio device configuration space. It also can be used for live migration on the
48     /// destination host to set readonly configuration space fields.
set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>49     fn set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>;
50 
51     /// Setup slave communication channel.
set_slave_request_fd(&mut self, fd: &dyn AsRawDescriptor) -> Result<()>52     fn set_slave_request_fd(&mut self, fd: &dyn AsRawDescriptor) -> Result<()>;
53 
54     /// Retrieve shared buffer for inflight I/O tracking.
get_inflight_fd( &mut self, inflight: &VhostUserInflight, ) -> Result<(VhostUserInflight, File)>55     fn get_inflight_fd(
56         &mut self,
57         inflight: &VhostUserInflight,
58     ) -> Result<(VhostUserInflight, File)>;
59 
60     /// Set shared buffer for inflight I/O tracking.
set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawDescriptor) -> Result<()>61     fn set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawDescriptor) -> Result<()>;
62 
63     /// Query the maximum amount of memory slots supported by the backend.
get_max_mem_slots(&mut self) -> Result<u64>64     fn get_max_mem_slots(&mut self) -> Result<u64>;
65 
66     /// Add a new guest memory mapping for vhost to use.
add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>67     fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>;
68 
69     /// Remove a guest memory mapping from vhost.
remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>70     fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>;
71 }
72 
73 /// Struct for the vhost-user master endpoint.
74 #[derive(Clone)]
75 pub struct Master<E: Endpoint<MasterReq>> {
76     node: Arc<Mutex<MasterInternal<E>>>,
77 }
78 
79 impl<E: Endpoint<MasterReq> + From<SystemStream>> Master<E> {
80     /// Create a new instance from a Unix stream socket.
from_stream(sock: SystemStream, max_queue_num: u64) -> Self81     pub fn from_stream(sock: SystemStream, max_queue_num: u64) -> Self {
82         Self::new(E::from(sock), max_queue_num)
83     }
84 }
85 
86 impl<E: Endpoint<MasterReq>> Master<E> {
87     /// Create a new instance.
new(ep: E, max_queue_num: u64) -> Self88     fn new(ep: E, max_queue_num: u64) -> Self {
89         Master {
90             node: Arc::new(Mutex::new(MasterInternal {
91                 main_sock: ep,
92                 virtio_features: 0,
93                 acked_virtio_features: 0,
94                 protocol_features: 0,
95                 acked_protocol_features: 0,
96                 protocol_features_ready: false,
97                 max_queue_num,
98                 error: None,
99                 hdr_flags: VhostUserHeaderFlag::empty(),
100             })),
101         }
102     }
103 
node(&self) -> MutexGuard<MasterInternal<E>>104     fn node(&self) -> MutexGuard<MasterInternal<E>> {
105         self.node.lock().unwrap()
106     }
107 
108     /// Create a new vhost-user master endpoint.
109     ///
110     /// Will retry as the backend may not be ready to accept the connection.
111     ///
112     /// # Arguments
113     /// * `path` - path of Unix domain socket listener to connect to
connect<P: AsRef<Path>>(path: P, max_queue_num: u64) -> Result<Self>114     pub fn connect<P: AsRef<Path>>(path: P, max_queue_num: u64) -> Result<Self> {
115         let mut retry_count = 5;
116         let endpoint = loop {
117             match E::connect(&path) {
118                 Ok(endpoint) => break Ok(endpoint),
119                 Err(e) => match &e {
120                     VhostUserError::SocketConnect(why) => {
121                         if why.kind() == std::io::ErrorKind::ConnectionRefused && retry_count > 0 {
122                             std::thread::sleep(std::time::Duration::from_millis(100));
123                             retry_count -= 1;
124                             continue;
125                         } else {
126                             break Err(e);
127                         }
128                     }
129                     _ => break Err(e),
130                 },
131             }
132         }?;
133 
134         Ok(Self::new(endpoint, max_queue_num))
135     }
136 
137     /// Set the header flags that should be applied to all following messages.
set_hdr_flags(&self, flags: VhostUserHeaderFlag)138     pub fn set_hdr_flags(&self, flags: VhostUserHeaderFlag) {
139         let mut node = self.node();
140         node.hdr_flags = flags;
141     }
142 }
143 
144 impl<E: Endpoint<MasterReq>> VhostBackend for Master<E> {
145     /// Get from the underlying vhost implementation the feature bitmask.
get_features(&self) -> Result<u64>146     fn get_features(&self) -> Result<u64> {
147         let mut node = self.node();
148         let hdr = node.send_request_header(MasterReq::GET_FEATURES, None)?;
149         let val = node.recv_reply::<VhostUserU64>(&hdr)?;
150         node.virtio_features = val.value;
151         Ok(node.virtio_features)
152     }
153 
154     /// Enable features in the underlying vhost implementation using a bitmask.
set_features(&self, features: u64) -> Result<()>155     fn set_features(&self, features: u64) -> Result<()> {
156         let mut node = self.node();
157         let val = VhostUserU64::new(features);
158         let hdr = node.send_request_with_body(MasterReq::SET_FEATURES, &val, None)?;
159         node.acked_virtio_features = features & node.virtio_features;
160         node.wait_for_ack(&hdr)
161     }
162 
163     /// Set the current Master as an owner of the session.
set_owner(&self) -> Result<()>164     fn set_owner(&self) -> Result<()> {
165         // We unwrap() the return value to assert that we are not expecting threads to ever fail
166         // while holding the lock.
167         let mut node = self.node();
168         let hdr = node.send_request_header(MasterReq::SET_OWNER, None)?;
169         node.wait_for_ack(&hdr)
170     }
171 
reset_owner(&self) -> Result<()>172     fn reset_owner(&self) -> Result<()> {
173         let mut node = self.node();
174         let hdr = node.send_request_header(MasterReq::RESET_OWNER, None)?;
175         node.wait_for_ack(&hdr)
176     }
177 
178     /// Set the memory map regions on the slave so it can translate the vring
179     /// addresses. In the ancillary data there is an array of file descriptors
set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()>180     fn set_mem_table(&self, regions: &[VhostUserMemoryRegionInfo]) -> Result<()> {
181         if regions.is_empty() || regions.len() > MAX_ATTACHED_FD_ENTRIES {
182             return Err(VhostUserError::InvalidParam);
183         }
184 
185         let mut ctx = VhostUserMemoryContext::new();
186         for region in regions.iter() {
187             // TODO(b/221882601): once mmap handle cross platform story exists, update this null
188             // check.
189             if region.memory_size == 0 || (region.mmap_handle as isize) < 0 {
190                 return Err(VhostUserError::InvalidParam);
191             }
192             let reg = VhostUserMemoryRegion {
193                 guest_phys_addr: region.guest_phys_addr,
194                 memory_size: region.memory_size,
195                 user_addr: region.userspace_addr,
196                 mmap_offset: region.mmap_offset,
197             };
198             ctx.append(&reg, region.mmap_handle);
199         }
200 
201         let mut node = self.node();
202         let body = VhostUserMemory::new(ctx.regions.len() as u32);
203         let (_, payload, _) = unsafe { ctx.regions.align_to::<u8>() };
204         let hdr = node.send_request_with_payload(
205             MasterReq::SET_MEM_TABLE,
206             &body,
207             payload,
208             Some(ctx.fds.as_slice()),
209         )?;
210         node.wait_for_ack(&hdr)
211     }
212 
213     // Clippy doesn't seem to know that if let with && is still experimental
214     #[allow(clippy::unnecessary_unwrap)]
set_log_base(&self, base: u64, fd: Option<RawDescriptor>) -> Result<()>215     fn set_log_base(&self, base: u64, fd: Option<RawDescriptor>) -> Result<()> {
216         let mut node = self.node();
217         let val = VhostUserU64::new(base);
218 
219         if node.acked_protocol_features & VhostUserProtocolFeatures::LOG_SHMFD.bits() != 0
220             && fd.is_some()
221         {
222             let fds = [fd.unwrap()];
223             let _ = node.send_request_with_body(MasterReq::SET_LOG_BASE, &val, Some(&fds))?;
224         } else {
225             let _ = node.send_request_with_body(MasterReq::SET_LOG_BASE, &val, None)?;
226         }
227         Ok(())
228     }
229 
set_log_fd(&self, fd: RawDescriptor) -> Result<()>230     fn set_log_fd(&self, fd: RawDescriptor) -> Result<()> {
231         let mut node = self.node();
232         let fds = [fd];
233         let hdr = node.send_request_header(MasterReq::SET_LOG_FD, Some(&fds))?;
234         node.wait_for_ack(&hdr)
235     }
236 
237     /// Set the size of the queue.
set_vring_num(&self, queue_index: usize, num: u16) -> Result<()>238     fn set_vring_num(&self, queue_index: usize, num: u16) -> Result<()> {
239         let mut node = self.node();
240         if queue_index as u64 >= node.max_queue_num {
241             return Err(VhostUserError::InvalidParam);
242         }
243 
244         let val = VhostUserVringState::new(queue_index as u32, num.into());
245         let hdr = node.send_request_with_body(MasterReq::SET_VRING_NUM, &val, None)?;
246         node.wait_for_ack(&hdr)
247     }
248 
249     /// Sets the addresses of the different aspects of the vring.
set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()>250     fn set_vring_addr(&self, queue_index: usize, config_data: &VringConfigData) -> Result<()> {
251         let mut node = self.node();
252         if queue_index as u64 >= node.max_queue_num
253             || config_data.flags & !(VhostUserVringAddrFlags::all().bits()) != 0
254         {
255             return Err(VhostUserError::InvalidParam);
256         }
257 
258         let val = VhostUserVringAddr::from_config_data(queue_index as u32, config_data);
259         let hdr = node.send_request_with_body(MasterReq::SET_VRING_ADDR, &val, None)?;
260         node.wait_for_ack(&hdr)
261     }
262 
263     /// Sets the base offset in the available vring.
set_vring_base(&self, queue_index: usize, base: u16) -> Result<()>264     fn set_vring_base(&self, queue_index: usize, base: u16) -> Result<()> {
265         let mut node = self.node();
266         if queue_index as u64 >= node.max_queue_num {
267             return Err(VhostUserError::InvalidParam);
268         }
269 
270         let val = VhostUserVringState::new(queue_index as u32, base.into());
271         let hdr = node.send_request_with_body(MasterReq::SET_VRING_BASE, &val, None)?;
272         node.wait_for_ack(&hdr)
273     }
274 
get_vring_base(&self, queue_index: usize) -> Result<u32>275     fn get_vring_base(&self, queue_index: usize) -> Result<u32> {
276         let mut node = self.node();
277         if queue_index as u64 >= node.max_queue_num {
278             return Err(VhostUserError::InvalidParam);
279         }
280 
281         let req = VhostUserVringState::new(queue_index as u32, 0);
282         let hdr = node.send_request_with_body(MasterReq::GET_VRING_BASE, &req, None)?;
283         let reply = node.recv_reply::<VhostUserVringState>(&hdr)?;
284         Ok(reply.num)
285     }
286 
287     /// Set the event file descriptor to signal when buffers are used.
288     /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
289     /// is set when there is no file descriptor in the ancillary data. This signals that polling
290     /// will be used instead of waiting for the call.
set_vring_call(&self, queue_index: usize, event: &Event) -> Result<()>291     fn set_vring_call(&self, queue_index: usize, event: &Event) -> Result<()> {
292         let mut node = self.node();
293         if queue_index as u64 >= node.max_queue_num {
294             return Err(VhostUserError::InvalidParam);
295         }
296         let hdr = node.send_fd_for_vring(
297             MasterReq::SET_VRING_CALL,
298             queue_index,
299             event.as_raw_descriptor(),
300         )?;
301         node.wait_for_ack(&hdr)
302     }
303 
304     /// Set the event file descriptor for adding buffers to the vring.
305     /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
306     /// is set when there is no file descriptor in the ancillary data. This signals that polling
307     /// should be used instead of waiting for a kick.
set_vring_kick(&self, queue_index: usize, event: &Event) -> Result<()>308     fn set_vring_kick(&self, queue_index: usize, event: &Event) -> Result<()> {
309         let mut node = self.node();
310         if queue_index as u64 >= node.max_queue_num {
311             return Err(VhostUserError::InvalidParam);
312         }
313         let hdr = node.send_fd_for_vring(
314             MasterReq::SET_VRING_KICK,
315             queue_index,
316             event.as_raw_descriptor(),
317         )?;
318         node.wait_for_ack(&hdr)
319     }
320 
321     /// Set the event file descriptor to signal when error occurs.
322     /// Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag. This flag
323     /// is set when there is no file descriptor in the ancillary data.
set_vring_err(&self, queue_index: usize, event: &Event) -> Result<()>324     fn set_vring_err(&self, queue_index: usize, event: &Event) -> Result<()> {
325         let mut node = self.node();
326         if queue_index as u64 >= node.max_queue_num {
327             return Err(VhostUserError::InvalidParam);
328         }
329         let hdr = node.send_fd_for_vring(
330             MasterReq::SET_VRING_ERR,
331             queue_index,
332             event.as_raw_descriptor(),
333         )?;
334         node.wait_for_ack(&hdr)
335     }
336 }
337 
338 impl<E: Endpoint<MasterReq>> VhostUserMaster for Master<E> {
get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures>339     fn get_protocol_features(&mut self) -> Result<VhostUserProtocolFeatures> {
340         let mut node = self.node();
341         let flag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
342         if node.virtio_features & flag == 0 {
343             return Err(VhostUserError::InvalidOperation);
344         }
345         let hdr = node.send_request_header(MasterReq::GET_PROTOCOL_FEATURES, None)?;
346         let val = node.recv_reply::<VhostUserU64>(&hdr)?;
347         node.protocol_features = val.value;
348         // Should we support forward compatibility?
349         // If so just mask out unrecognized flags instead of return errors.
350         match VhostUserProtocolFeatures::from_bits(node.protocol_features) {
351             Some(val) => Ok(val),
352             None => Err(VhostUserError::InvalidMessage),
353         }
354     }
355 
set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()>356     fn set_protocol_features(&mut self, features: VhostUserProtocolFeatures) -> Result<()> {
357         let mut node = self.node();
358         let flag = VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
359         if node.virtio_features & flag == 0 {
360             return Err(VhostUserError::InvalidOperation);
361         }
362         let val = VhostUserU64::new(features.bits());
363         let hdr = node.send_request_with_body(MasterReq::SET_PROTOCOL_FEATURES, &val, None)?;
364         // Don't wait for ACK here because the protocol feature negotiation process hasn't been
365         // completed yet.
366         node.acked_protocol_features = features.bits();
367         node.protocol_features_ready = true;
368         node.wait_for_ack(&hdr)
369     }
370 
get_queue_num(&mut self) -> Result<u64>371     fn get_queue_num(&mut self) -> Result<u64> {
372         let mut node = self.node();
373         if !node.is_feature_mq_available() {
374             return Err(VhostUserError::InvalidOperation);
375         }
376 
377         let hdr = node.send_request_header(MasterReq::GET_QUEUE_NUM, None)?;
378         let val = node.recv_reply::<VhostUserU64>(&hdr)?;
379         if val.value > VHOST_USER_MAX_VRINGS {
380             return Err(VhostUserError::InvalidMessage);
381         }
382         node.max_queue_num = val.value;
383         Ok(node.max_queue_num)
384     }
385 
set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()>386     fn set_vring_enable(&mut self, queue_index: usize, enable: bool) -> Result<()> {
387         let mut node = self.node();
388         // set_vring_enable() is supported only when PROTOCOL_FEATURES has been enabled.
389         if node.acked_virtio_features & VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits() == 0 {
390             return Err(VhostUserError::InvalidOperation);
391         } else if queue_index as u64 >= node.max_queue_num {
392             return Err(VhostUserError::InvalidParam);
393         }
394 
395         let flag = if enable { 1 } else { 0 };
396         let val = VhostUserVringState::new(queue_index as u32, flag);
397         let hdr = node.send_request_with_body(MasterReq::SET_VRING_ENABLE, &val, None)?;
398         node.wait_for_ack(&hdr)
399     }
400 
get_config( &mut self, offset: u32, size: u32, flags: VhostUserConfigFlags, buf: &[u8], ) -> Result<(VhostUserConfig, VhostUserConfigPayload)>401     fn get_config(
402         &mut self,
403         offset: u32,
404         size: u32,
405         flags: VhostUserConfigFlags,
406         buf: &[u8],
407     ) -> Result<(VhostUserConfig, VhostUserConfigPayload)> {
408         let body = VhostUserConfig::new(offset, size, flags);
409         if !body.is_valid() {
410             return Err(VhostUserError::InvalidParam);
411         }
412 
413         let mut node = self.node();
414         // depends on VhostUserProtocolFeatures::CONFIG
415         if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
416             return Err(VhostUserError::InvalidOperation);
417         }
418 
419         // vhost-user spec states that:
420         // "Master payload: virtio device config space"
421         // "Slave payload: virtio device config space"
422         let hdr = node.send_request_with_payload(MasterReq::GET_CONFIG, &body, buf, None)?;
423         let (body_reply, buf_reply, rfds) =
424             node.recv_reply_with_payload::<VhostUserConfig>(&hdr)?;
425         if rfds.is_some() {
426             return Err(VhostUserError::InvalidMessage);
427         } else if body_reply.size == 0 {
428             return Err(VhostUserError::SlaveInternalError);
429         } else if body_reply.size != body.size
430             || body_reply.size as usize != buf.len()
431             || body_reply.offset != body.offset
432         {
433             return Err(VhostUserError::InvalidMessage);
434         }
435 
436         Ok((body_reply, buf_reply))
437     }
438 
set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()>439     fn set_config(&mut self, offset: u32, flags: VhostUserConfigFlags, buf: &[u8]) -> Result<()> {
440         if buf.len() > MAX_MSG_SIZE {
441             return Err(VhostUserError::InvalidParam);
442         }
443         let body = VhostUserConfig::new(offset, buf.len() as u32, flags);
444         if !body.is_valid() {
445             return Err(VhostUserError::InvalidParam);
446         }
447 
448         let mut node = self.node();
449         // depends on VhostUserProtocolFeatures::CONFIG
450         if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIG.bits() == 0 {
451             return Err(VhostUserError::InvalidOperation);
452         }
453 
454         let hdr = node.send_request_with_payload(MasterReq::SET_CONFIG, &body, buf, None)?;
455         node.wait_for_ack(&hdr)
456     }
457 
set_slave_request_fd(&mut self, fd: &dyn AsRawDescriptor) -> Result<()>458     fn set_slave_request_fd(&mut self, fd: &dyn AsRawDescriptor) -> Result<()> {
459         let mut node = self.node();
460         if node.acked_protocol_features & VhostUserProtocolFeatures::SLAVE_REQ.bits() == 0 {
461             return Err(VhostUserError::InvalidOperation);
462         }
463         let fds = [fd.as_raw_descriptor()];
464         let hdr = node.send_request_header(MasterReq::SET_SLAVE_REQ_FD, Some(&fds))?;
465         node.wait_for_ack(&hdr)
466     }
467 
get_inflight_fd( &mut self, inflight: &VhostUserInflight, ) -> Result<(VhostUserInflight, File)>468     fn get_inflight_fd(
469         &mut self,
470         inflight: &VhostUserInflight,
471     ) -> Result<(VhostUserInflight, File)> {
472         let mut node = self.node();
473         if node.acked_protocol_features & VhostUserProtocolFeatures::INFLIGHT_SHMFD.bits() == 0 {
474             return Err(VhostUserError::InvalidOperation);
475         }
476 
477         let hdr = node.send_request_with_body(MasterReq::GET_INFLIGHT_FD, inflight, None)?;
478         let (inflight, files) = node.recv_reply_with_files::<VhostUserInflight>(&hdr)?;
479 
480         match take_single_file(files) {
481             Some(file) => Ok((inflight, file)),
482             None => Err(VhostUserError::IncorrectFds),
483         }
484     }
485 
set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawDescriptor) -> Result<()>486     fn set_inflight_fd(&mut self, inflight: &VhostUserInflight, fd: RawDescriptor) -> Result<()> {
487         let mut node = self.node();
488         if node.acked_protocol_features & VhostUserProtocolFeatures::INFLIGHT_SHMFD.bits() == 0 {
489             return Err(VhostUserError::InvalidOperation);
490         }
491 
492         if inflight.mmap_size == 0
493             || inflight.num_queues == 0
494             || inflight.queue_size == 0
495             || fd == INVALID_DESCRIPTOR
496         {
497             return Err(VhostUserError::InvalidParam);
498         }
499 
500         let hdr = node.send_request_with_body(MasterReq::SET_INFLIGHT_FD, inflight, Some(&[fd]))?;
501         node.wait_for_ack(&hdr)
502     }
503 
get_max_mem_slots(&mut self) -> Result<u64>504     fn get_max_mem_slots(&mut self) -> Result<u64> {
505         let mut node = self.node();
506         if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0
507         {
508             return Err(VhostUserError::InvalidOperation);
509         }
510 
511         let hdr = node.send_request_header(MasterReq::GET_MAX_MEM_SLOTS, None)?;
512         let val = node.recv_reply::<VhostUserU64>(&hdr)?;
513 
514         Ok(val.value)
515     }
516 
add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>517     fn add_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> {
518         let mut node = self.node();
519         if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0
520         {
521             return Err(VhostUserError::InvalidOperation);
522         }
523         // TODO(b/221882601): once mmap handle cross platform story exists, update this null check.
524         if region.memory_size == 0 || (region.mmap_handle as isize) < 0 {
525             return Err(VhostUserError::InvalidParam);
526         }
527 
528         let body = VhostUserSingleMemoryRegion::new(
529             region.guest_phys_addr,
530             region.memory_size,
531             region.userspace_addr,
532             region.mmap_offset,
533         );
534         let fds = [region.mmap_handle];
535         let hdr = node.send_request_with_body(MasterReq::ADD_MEM_REG, &body, Some(&fds))?;
536         node.wait_for_ack(&hdr)
537     }
538 
remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()>539     fn remove_mem_region(&mut self, region: &VhostUserMemoryRegionInfo) -> Result<()> {
540         let mut node = self.node();
541         if node.acked_protocol_features & VhostUserProtocolFeatures::CONFIGURE_MEM_SLOTS.bits() == 0
542         {
543             return Err(VhostUserError::InvalidOperation);
544         }
545         if region.memory_size == 0 {
546             return Err(VhostUserError::InvalidParam);
547         }
548 
549         let body = VhostUserSingleMemoryRegion::new(
550             region.guest_phys_addr,
551             region.memory_size,
552             region.userspace_addr,
553             region.mmap_offset,
554         );
555         let hdr = node.send_request_with_body(MasterReq::REM_MEM_REG, &body, None)?;
556         node.wait_for_ack(&hdr)
557     }
558 }
559 
560 impl<E: Endpoint<MasterReq> + AsRawDescriptor> AsRawDescriptor for Master<E> {
as_raw_descriptor(&self) -> RawDescriptor561     fn as_raw_descriptor(&self) -> RawDescriptor {
562         let node = self.node();
563         // TODO(b/221882601): why is this here? The underlying Tube needs to use a read notifier
564         // if this is for polling.
565         node.main_sock.as_raw_descriptor()
566     }
567 }
568 
569 // TODO(b/221882601): likely need pairs of RDs and/or SharedMemory to represent mmaps on Windows.
570 /// Context object to pass guest memory configuration to VhostUserMaster::set_mem_table().
571 struct VhostUserMemoryContext {
572     regions: VhostUserMemoryPayload,
573     fds: Vec<RawDescriptor>,
574 }
575 
576 impl VhostUserMemoryContext {
577     /// Create a context object.
new() -> Self578     pub fn new() -> Self {
579         VhostUserMemoryContext {
580             regions: VhostUserMemoryPayload::new(),
581             fds: Vec::new(),
582         }
583     }
584 
585     /// Append a user memory region and corresponding RawDescriptor into the context object.
append(&mut self, region: &VhostUserMemoryRegion, fd: RawDescriptor)586     pub fn append(&mut self, region: &VhostUserMemoryRegion, fd: RawDescriptor) {
587         self.regions.push(*region);
588         self.fds.push(fd);
589     }
590 }
591 
592 struct MasterInternal<E: Endpoint<MasterReq>> {
593     // Used to send requests to the slave.
594     main_sock: E,
595     // Cached virtio features from the slave.
596     virtio_features: u64,
597     // Cached acked virtio features from the driver.
598     acked_virtio_features: u64,
599     // Cached vhost-user protocol features from the slave.
600     protocol_features: u64,
601     // Cached vhost-user protocol features.
602     acked_protocol_features: u64,
603     // Cached vhost-user protocol features are ready to use.
604     protocol_features_ready: bool,
605     // Cached maxinum number of queues supported from the slave.
606     max_queue_num: u64,
607     // Internal flag to mark failure state.
608     error: Option<i32>,
609     // List of header flags.
610     hdr_flags: VhostUserHeaderFlag,
611 }
612 
613 impl<E: Endpoint<MasterReq>> MasterInternal<E> {
send_request_header( &mut self, code: MasterReq, fds: Option<&[RawDescriptor]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>614     fn send_request_header(
615         &mut self,
616         code: MasterReq,
617         fds: Option<&[RawDescriptor]>,
618     ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
619         self.check_state()?;
620         let hdr = self.new_request_header(code, 0);
621         self.main_sock.send_header(&hdr, fds)?;
622         Ok(hdr)
623     }
624 
send_request_with_body<T: Sized + DataInit>( &mut self, code: MasterReq, msg: &T, fds: Option<&[RawDescriptor]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>625     fn send_request_with_body<T: Sized + DataInit>(
626         &mut self,
627         code: MasterReq,
628         msg: &T,
629         fds: Option<&[RawDescriptor]>,
630     ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
631         if mem::size_of::<T>() > MAX_MSG_SIZE {
632             return Err(VhostUserError::InvalidParam);
633         }
634         self.check_state()?;
635 
636         let hdr = self.new_request_header(code, mem::size_of::<T>() as u32);
637         self.main_sock.send_message(&hdr, msg, fds)?;
638         Ok(hdr)
639     }
640 
send_request_with_payload<T: Sized + DataInit>( &mut self, code: MasterReq, msg: &T, payload: &[u8], fds: Option<&[RawDescriptor]>, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>641     fn send_request_with_payload<T: Sized + DataInit>(
642         &mut self,
643         code: MasterReq,
644         msg: &T,
645         payload: &[u8],
646         fds: Option<&[RawDescriptor]>,
647     ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
648         let len = mem::size_of::<T>() + payload.len();
649         if len > MAX_MSG_SIZE {
650             return Err(VhostUserError::InvalidParam);
651         }
652         if let Some(fd_arr) = fds {
653             if fd_arr.len() > MAX_ATTACHED_FD_ENTRIES {
654                 return Err(VhostUserError::InvalidParam);
655             }
656         }
657         self.check_state()?;
658 
659         let hdr = self.new_request_header(code, len as u32);
660         self.main_sock
661             .send_message_with_payload(&hdr, msg, payload, fds)?;
662         Ok(hdr)
663     }
664 
send_fd_for_vring( &mut self, code: MasterReq, queue_index: usize, fd: RawDescriptor, ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>>665     fn send_fd_for_vring(
666         &mut self,
667         code: MasterReq,
668         queue_index: usize,
669         fd: RawDescriptor,
670     ) -> VhostUserResult<VhostUserMsgHeader<MasterReq>> {
671         if queue_index as u64 >= self.max_queue_num {
672             return Err(VhostUserError::InvalidParam);
673         }
674         self.check_state()?;
675 
676         // Bits (0-7) of the payload contain the vring index. Bit 8 is the invalid FD flag.
677         // This flag is set when there is no file descriptor in the ancillary data. This signals
678         // that polling will be used instead of waiting for the call.
679         let msg = VhostUserU64::new(queue_index as u64);
680         let hdr = self.new_request_header(code, mem::size_of::<VhostUserU64>() as u32);
681         self.main_sock.send_message(&hdr, &msg, Some(&[fd]))?;
682         Ok(hdr)
683     }
684 
recv_reply<T: Sized + DataInit + Default + VhostUserMsgValidator>( &mut self, hdr: &VhostUserMsgHeader<MasterReq>, ) -> VhostUserResult<T>685     fn recv_reply<T: Sized + DataInit + Default + VhostUserMsgValidator>(
686         &mut self,
687         hdr: &VhostUserMsgHeader<MasterReq>,
688     ) -> VhostUserResult<T> {
689         if mem::size_of::<T>() > MAX_MSG_SIZE || hdr.is_reply() {
690             return Err(VhostUserError::InvalidParam);
691         }
692         self.check_state()?;
693 
694         let (reply, body, rfds) = self.main_sock.recv_body::<T>()?;
695         if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() {
696             return Err(VhostUserError::InvalidMessage);
697         }
698         Ok(body)
699     }
700 
recv_reply_with_files<T: Sized + DataInit + Default + VhostUserMsgValidator>( &mut self, hdr: &VhostUserMsgHeader<MasterReq>, ) -> VhostUserResult<(T, Option<Vec<File>>)>701     fn recv_reply_with_files<T: Sized + DataInit + Default + VhostUserMsgValidator>(
702         &mut self,
703         hdr: &VhostUserMsgHeader<MasterReq>,
704     ) -> VhostUserResult<(T, Option<Vec<File>>)> {
705         if mem::size_of::<T>() > MAX_MSG_SIZE || hdr.is_reply() {
706             return Err(VhostUserError::InvalidParam);
707         }
708         self.check_state()?;
709 
710         let (reply, body, files) = self.main_sock.recv_body::<T>()?;
711         if !reply.is_reply_for(hdr) || files.is_none() || !body.is_valid() {
712             return Err(VhostUserError::InvalidMessage);
713         }
714         Ok((body, files))
715     }
716 
recv_reply_with_payload<T: Sized + DataInit + Default + VhostUserMsgValidator>( &mut self, hdr: &VhostUserMsgHeader<MasterReq>, ) -> VhostUserResult<(T, Vec<u8>, Option<Vec<File>>)>717     fn recv_reply_with_payload<T: Sized + DataInit + Default + VhostUserMsgValidator>(
718         &mut self,
719         hdr: &VhostUserMsgHeader<MasterReq>,
720     ) -> VhostUserResult<(T, Vec<u8>, Option<Vec<File>>)> {
721         if mem::size_of::<T>() > MAX_MSG_SIZE
722             || hdr.get_size() as usize <= mem::size_of::<T>()
723             || hdr.get_size() as usize > MAX_MSG_SIZE
724             || hdr.is_reply()
725         {
726             return Err(VhostUserError::InvalidParam);
727         }
728         self.check_state()?;
729 
730         let mut buf: Vec<u8> = vec![0; hdr.get_size() as usize - mem::size_of::<T>()];
731         let (reply, body, bytes, files) = self.main_sock.recv_payload_into_buf::<T>(&mut buf)?;
732         if !reply.is_reply_for(hdr)
733             || reply.get_size() as usize != mem::size_of::<T>() + bytes
734             || files.is_some()
735             || !body.is_valid()
736             || bytes != buf.len()
737         {
738             return Err(VhostUserError::InvalidMessage);
739         }
740 
741         Ok((body, buf, files))
742     }
743 
wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<MasterReq>) -> VhostUserResult<()>744     fn wait_for_ack(&mut self, hdr: &VhostUserMsgHeader<MasterReq>) -> VhostUserResult<()> {
745         if self.acked_protocol_features & VhostUserProtocolFeatures::REPLY_ACK.bits() == 0
746             || !hdr.is_need_reply()
747         {
748             return Ok(());
749         }
750         self.check_state()?;
751 
752         let (reply, body, rfds) = self.main_sock.recv_body::<VhostUserU64>()?;
753         if !reply.is_reply_for(hdr) || rfds.is_some() || !body.is_valid() {
754             return Err(VhostUserError::InvalidMessage);
755         }
756         if body.value != 0 {
757             return Err(VhostUserError::SlaveInternalError);
758         }
759         Ok(())
760     }
761 
is_feature_mq_available(&self) -> bool762     fn is_feature_mq_available(&self) -> bool {
763         self.acked_protocol_features & VhostUserProtocolFeatures::MQ.bits() != 0
764     }
765 
check_state(&self) -> VhostUserResult<()>766     fn check_state(&self) -> VhostUserResult<()> {
767         match self.error {
768             Some(e) => Err(VhostUserError::SocketBroken(
769                 std::io::Error::from_raw_os_error(e),
770             )),
771             None => Ok(()),
772         }
773     }
774 
775     #[inline]
new_request_header(&self, request: MasterReq, size: u32) -> VhostUserMsgHeader<MasterReq>776     fn new_request_header(&self, request: MasterReq, size: u32) -> VhostUserMsgHeader<MasterReq> {
777         VhostUserMsgHeader::new(request, self.hdr_flags.bits() | 0x1, size)
778     }
779 }
780 
781 #[cfg(test)]
782 mod tests {
783     use super::*;
784     use crate::connection::tests::{create_pair, TestEndpoint, TestMaster};
785     use base::INVALID_DESCRIPTOR;
786 
787     #[test]
create_master()788     fn create_master() {
789         let (master, mut slave) = create_pair();
790 
791         assert!(master.as_raw_descriptor() != INVALID_DESCRIPTOR);
792         // Send two messages continuously
793         master.set_owner().unwrap();
794         master.reset_owner().unwrap();
795 
796         let (hdr, rfds) = slave.recv_header().unwrap();
797         assert_eq!(hdr.get_code(), MasterReq::SET_OWNER);
798         assert_eq!(hdr.get_size(), 0);
799         assert_eq!(hdr.get_version(), 0x1);
800         assert!(rfds.is_none());
801 
802         let (hdr, rfds) = slave.recv_header().unwrap();
803         assert_eq!(hdr.get_code(), MasterReq::RESET_OWNER);
804         assert_eq!(hdr.get_size(), 0);
805         assert_eq!(hdr.get_version(), 0x1);
806         assert!(rfds.is_none());
807     }
808 
809     #[test]
test_features()810     fn test_features() {
811         let (master, mut peer) = create_pair();
812 
813         master.set_owner().unwrap();
814         let (hdr, rfds) = peer.recv_header().unwrap();
815         assert_eq!(hdr.get_code(), MasterReq::SET_OWNER);
816         assert_eq!(hdr.get_size(), 0);
817         assert_eq!(hdr.get_version(), 0x1);
818         assert!(rfds.is_none());
819 
820         let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
821         let msg = VhostUserU64::new(0x15);
822         peer.send_message(&hdr, &msg, None).unwrap();
823         let features = master.get_features().unwrap();
824         assert_eq!(features, 0x15u64);
825         let (_hdr, rfds) = peer.recv_header().unwrap();
826         assert!(rfds.is_none());
827 
828         let hdr = VhostUserMsgHeader::new(MasterReq::SET_FEATURES, 0x4, 8);
829         let msg = VhostUserU64::new(0x15);
830         peer.send_message(&hdr, &msg, None).unwrap();
831         master.set_features(0x15).unwrap();
832         let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
833         assert!(rfds.is_none());
834         let val = msg.value;
835         assert_eq!(val, 0x15);
836 
837         let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
838         let msg = 0x15u32;
839         peer.send_message(&hdr, &msg, None).unwrap();
840         assert!(master.get_features().is_err());
841     }
842 
843     #[test]
test_protocol_features()844     fn test_protocol_features() {
845         let (mut master, mut peer) = create_pair();
846 
847         master.set_owner().unwrap();
848         let (hdr, rfds) = peer.recv_header().unwrap();
849         assert_eq!(hdr.get_code(), MasterReq::SET_OWNER);
850         assert!(rfds.is_none());
851 
852         assert!(master.get_protocol_features().is_err());
853         assert!(master
854             .set_protocol_features(VhostUserProtocolFeatures::all())
855             .is_err());
856 
857         let vfeatures = 0x15 | VhostUserVirtioFeatures::PROTOCOL_FEATURES.bits();
858         let hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0x4, 8);
859         let msg = VhostUserU64::new(vfeatures);
860         peer.send_message(&hdr, &msg, None).unwrap();
861         let features = master.get_features().unwrap();
862         assert_eq!(features, vfeatures);
863         let (_hdr, rfds) = peer.recv_header().unwrap();
864         assert!(rfds.is_none());
865 
866         master.set_features(vfeatures).unwrap();
867         let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
868         assert!(rfds.is_none());
869         let val = msg.value;
870         assert_eq!(val, vfeatures);
871 
872         let pfeatures = VhostUserProtocolFeatures::all();
873         let hdr = VhostUserMsgHeader::new(MasterReq::GET_PROTOCOL_FEATURES, 0x4, 8);
874         let msg = VhostUserU64::new(pfeatures.bits());
875         peer.send_message(&hdr, &msg, None).unwrap();
876         let features = master.get_protocol_features().unwrap();
877         assert_eq!(features, pfeatures);
878         let (_hdr, rfds) = peer.recv_header().unwrap();
879         assert!(rfds.is_none());
880 
881         master.set_protocol_features(pfeatures).unwrap();
882         let (_hdr, msg, rfds) = peer.recv_body::<VhostUserU64>().unwrap();
883         assert!(rfds.is_none());
884         let val = msg.value;
885         assert_eq!(val, pfeatures.bits());
886 
887         let hdr = VhostUserMsgHeader::new(MasterReq::SET_PROTOCOL_FEATURES, 0x4, 8);
888         let msg = VhostUserU64::new(pfeatures.bits());
889         peer.send_message(&hdr, &msg, None).unwrap();
890         assert!(master.get_protocol_features().is_err());
891     }
892 
893     #[test]
test_master_set_config_negative()894     fn test_master_set_config_negative() {
895         let (mut master, _peer) = create_pair();
896         let buf = vec![0x0; MAX_MSG_SIZE + 1];
897 
898         master
899             .set_config(0x100, VhostUserConfigFlags::WRITABLE, &buf[0..4])
900             .unwrap_err();
901 
902         {
903             let mut node = master.node();
904             node.virtio_features = 0xffff_ffff;
905             node.acked_virtio_features = 0xffff_ffff;
906             node.protocol_features = 0xffff_ffff;
907             node.acked_protocol_features = 0xffff_ffff;
908         }
909 
910         master
911             .set_config(0, VhostUserConfigFlags::WRITABLE, &buf[0..4])
912             .unwrap();
913         master
914             .set_config(
915                 VHOST_USER_CONFIG_SIZE,
916                 VhostUserConfigFlags::WRITABLE,
917                 &buf[0..4],
918             )
919             .unwrap_err();
920         master
921             .set_config(0x1000, VhostUserConfigFlags::WRITABLE, &buf[0..4])
922             .unwrap_err();
923         master
924             .set_config(
925                 0x100,
926                 unsafe { VhostUserConfigFlags::from_bits_unchecked(0xffff_ffff) },
927                 &buf[0..4],
928             )
929             .unwrap_err();
930         master
931             .set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &buf)
932             .unwrap_err();
933         master
934             .set_config(VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE, &[])
935             .unwrap_err();
936     }
937 
create_pair2() -> (TestMaster, TestEndpoint)938     fn create_pair2() -> (TestMaster, TestEndpoint) {
939         let (master, peer) = create_pair();
940         {
941             let mut node = master.node();
942             node.virtio_features = 0xffff_ffff;
943             node.acked_virtio_features = 0xffff_ffff;
944             node.protocol_features = 0xffff_ffff;
945             node.acked_protocol_features = 0xffff_ffff;
946         }
947 
948         (master, peer)
949     }
950 
951     #[test]
test_master_get_config_negative0()952     fn test_master_get_config_negative0() {
953         let (mut master, mut peer) = create_pair2();
954         let buf = vec![0x0; MAX_MSG_SIZE + 1];
955 
956         let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
957         let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
958         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
959             .unwrap();
960         assert!(master
961             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
962             .is_ok());
963 
964         hdr.set_code(MasterReq::GET_FEATURES);
965         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
966             .unwrap();
967         assert!(master
968             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
969             .is_err());
970         hdr.set_code(MasterReq::GET_CONFIG);
971     }
972 
973     #[test]
test_master_get_config_negative1()974     fn test_master_get_config_negative1() {
975         let (mut master, mut peer) = create_pair2();
976         let buf = vec![0x0; MAX_MSG_SIZE + 1];
977 
978         let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
979         let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
980         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
981             .unwrap();
982         assert!(master
983             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
984             .is_ok());
985 
986         hdr.set_reply(false);
987         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
988             .unwrap();
989         assert!(master
990             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
991             .is_err());
992     }
993 
994     #[test]
test_master_get_config_negative2()995     fn test_master_get_config_negative2() {
996         let (mut master, mut peer) = create_pair2();
997         let buf = vec![0x0; MAX_MSG_SIZE + 1];
998 
999         let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1000         let msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1001         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1002             .unwrap();
1003         assert!(master
1004             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1005             .is_ok());
1006     }
1007 
1008     #[test]
test_master_get_config_negative3()1009     fn test_master_get_config_negative3() {
1010         let (mut master, mut peer) = create_pair2();
1011         let buf = vec![0x0; MAX_MSG_SIZE + 1];
1012 
1013         let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1014         let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1015         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1016             .unwrap();
1017         assert!(master
1018             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1019             .is_ok());
1020 
1021         msg.offset = 0;
1022         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1023             .unwrap();
1024         assert!(master
1025             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1026             .is_err());
1027     }
1028 
1029     #[test]
test_master_get_config_negative4()1030     fn test_master_get_config_negative4() {
1031         let (mut master, mut peer) = create_pair2();
1032         let buf = vec![0x0; MAX_MSG_SIZE + 1];
1033 
1034         let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1035         let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1036         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1037             .unwrap();
1038         assert!(master
1039             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1040             .is_ok());
1041 
1042         msg.offset = 0x101;
1043         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1044             .unwrap();
1045         assert!(master
1046             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1047             .is_err());
1048     }
1049 
1050     #[test]
test_master_get_config_negative5()1051     fn test_master_get_config_negative5() {
1052         let (mut master, mut peer) = create_pair2();
1053         let buf = vec![0x0; MAX_MSG_SIZE + 1];
1054 
1055         let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1056         let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1057         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1058             .unwrap();
1059         assert!(master
1060             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1061             .is_ok());
1062 
1063         msg.offset = (MAX_MSG_SIZE + 1) as u32;
1064         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1065             .unwrap();
1066         assert!(master
1067             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1068             .is_err());
1069     }
1070 
1071     #[test]
test_master_get_config_negative6()1072     fn test_master_get_config_negative6() {
1073         let (mut master, mut peer) = create_pair2();
1074         let buf = vec![0x0; MAX_MSG_SIZE + 1];
1075 
1076         let hdr = VhostUserMsgHeader::new(MasterReq::GET_CONFIG, 0x4, 16);
1077         let mut msg = VhostUserConfig::new(0x100, 4, VhostUserConfigFlags::empty());
1078         peer.send_message_with_payload(&hdr, &msg, &buf[0..4], None)
1079             .unwrap();
1080         assert!(master
1081             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1082             .is_ok());
1083 
1084         msg.size = 6;
1085         peer.send_message_with_payload(&hdr, &msg, &buf[0..6], None)
1086             .unwrap();
1087         assert!(master
1088             .get_config(0x100, 4, VhostUserConfigFlags::WRITABLE, &buf[0..4])
1089             .is_err());
1090     }
1091 
1092     #[test]
test_maset_set_mem_table_failure()1093     fn test_maset_set_mem_table_failure() {
1094         let (master, _peer) = create_pair2();
1095 
1096         master.set_mem_table(&[]).unwrap_err();
1097         let tables = vec![VhostUserMemoryRegionInfo::default(); MAX_ATTACHED_FD_ENTRIES + 1];
1098         master.set_mem_table(&tables).unwrap_err();
1099     }
1100 }
1101