• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved.
2 // SPDX-License-Identifier: Apache-2.0
3 
4 //! Define communication messages for the vhost-user protocol.
5 //!
6 //! For message definition, please refer to the [vhost-user spec](https://qemu.readthedocs.io/en/latest/interop/vhost-user.html).
7 
8 #![allow(dead_code)]
9 #![allow(non_camel_case_types)]
10 #![allow(clippy::upper_case_acronyms)]
11 
12 use std::fmt::Debug;
13 use std::fs::File;
14 use std::io;
15 use std::marker::PhantomData;
16 use std::ops::Deref;
17 
18 use uuid::Uuid;
19 
20 use vm_memory::{mmap::NewBitmap, ByteValued, Error as MmapError, FileOffset, MmapRegion};
21 
22 #[cfg(feature = "xen")]
23 use vm_memory::{GuestAddress, MmapRange, MmapXenFlags};
24 
25 use super::{enum_value, Error, Result};
26 use crate::VringConfigData;
27 
28 /*
29 TODO: Consider deprecating this. We don't actually have any preallocated buffers except in tests,
30 so we should be able to support u32::MAX normally.
31 Also this doesn't need to be public api, since Endpoint is private anyway, this doesn't seem
32 useful for consumers of this crate.
33 
34 There are GPU specific messages (GpuBackendReq::UPDATE and CURSOR_UPDATE) that are larger than 4K.
35 We can use MsgHeader::MAX_MSG_SIZE, if we want to support larger messages only for GPU headers.
36 */
37 /// The vhost-user specification uses a field of u32 to store message length.
38 /// On the other hand, preallocated buffers are needed to receive messages from the Unix domain
39 /// socket. To preallocating a 4GB buffer for each vhost-user message is really just an overhead.
40 /// Among all defined vhost-user messages, only the VhostUserConfig and VhostUserMemory has variable
41 /// message size. For the VhostUserConfig, a maximum size of 4K is enough because the user
42 /// configuration space for virtio devices is (4K - 0x100) bytes at most. For the VhostUserMemory,
43 /// 4K should be enough too because it can support 255 memory regions at most.
44 pub const MAX_MSG_SIZE: usize = 0x1000;
45 
46 /// The VhostUserMemory message has variable message size and variable number of attached file
47 /// descriptors. Each user memory region entry in the message payload occupies 32 bytes,
48 /// so setting maximum number of attached file descriptors based on the maximum message size.
49 /// But rust only implements Default and AsMut traits for arrays with 0 - 32 entries, so further
50 /// reduce the maximum number...
51 // pub const MAX_ATTACHED_FD_ENTRIES: usize = (MAX_MSG_SIZE - 8) / 32;
52 pub const MAX_ATTACHED_FD_ENTRIES: usize = 32;
53 
54 /// Starting position (inclusion) of the device configuration space in virtio devices.
55 pub const VHOST_USER_CONFIG_OFFSET: u32 = 0x100;
56 
57 /// Ending position (exclusion) of the device configuration space in virtio devices.
58 pub const VHOST_USER_CONFIG_SIZE: u32 = 0x1000;
59 
60 /// Maximum number of vrings supported.
61 pub const VHOST_USER_MAX_VRINGS: u64 = 0x8000u64;
62 
63 pub(super) trait Req:
64     Clone + Copy + Debug + PartialEq + Eq + PartialOrd + Ord + Send + Sync + Into<u32> + TryFrom<u32>
65 {
66 }
67 
68 pub(super) trait MsgHeader: ByteValued + Copy + Default + VhostUserMsgValidator {
69     type Request: Req;
70 
71     /// The maximum size of a msg that can be encapsulated by this MsgHeader
72     const MAX_MSG_SIZE: usize;
73 }
74 
75 enum_value! {
76     #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
77     /// Type of requests sending from frontends to backends.
78     pub enum FrontendReq: u32 {
79         /// Get from the underlying vhost implementation the features bit mask.
80         GET_FEATURES = 1,
81         /// Enable features in the underlying vhost implementation using a bit mask.
82         SET_FEATURES = 2,
83         /// Set the current Frontend as an owner of the session.
84         SET_OWNER = 3,
85         /// No longer used.
86         RESET_OWNER = 4,
87         /// Set the memory map regions on the backend so it can translate the vring addresses.
88         SET_MEM_TABLE = 5,
89         /// Set logging shared memory space.
90         SET_LOG_BASE = 6,
91         /// Set the logging file descriptor, which is passed as ancillary data.
92         SET_LOG_FD = 7,
93         /// Set the size of the queue.
94         SET_VRING_NUM = 8,
95         /// Set the addresses of the different aspects of the vring.
96         SET_VRING_ADDR = 9,
97         /// Set the base offset in the available vring.
98         SET_VRING_BASE = 10,
99         /// Get the available vring base offset.
100         GET_VRING_BASE = 11,
101         /// Set the event file descriptor for adding buffers to the vring.
102         SET_VRING_KICK = 12,
103         /// Set the event file descriptor to signal when buffers are used.
104         SET_VRING_CALL = 13,
105         /// Set the event file descriptor to signal when error occurs.
106         SET_VRING_ERR = 14,
107         /// Get the protocol feature bit mask from the underlying vhost implementation.
108         GET_PROTOCOL_FEATURES = 15,
109         /// Enable protocol features in the underlying vhost implementation.
110         SET_PROTOCOL_FEATURES = 16,
111         /// Query how many queues the backend supports.
112         GET_QUEUE_NUM = 17,
113         /// Signal backend to enable or disable corresponding vring.
114         SET_VRING_ENABLE = 18,
115         /// Ask vhost user backend to broadcast a fake RARP to notify the migration is terminated
116         /// for guest that does not support GUEST_ANNOUNCE.
117         SEND_RARP = 19,
118         /// Set host MTU value exposed to the guest.
119         NET_SET_MTU = 20,
120         /// Set the socket file descriptor for backend initiated requests.
121         SET_BACKEND_REQ_FD = 21,
122         /// Send IOTLB messages with struct vhost_iotlb_msg as payload.
123         IOTLB_MSG = 22,
124         /// Set the endianness of a VQ for legacy devices.
125         SET_VRING_ENDIAN = 23,
126         /// Fetch the contents of the virtio device configuration space.
127         GET_CONFIG = 24,
128         /// Change the contents of the virtio device configuration space.
129         SET_CONFIG = 25,
130         /// Create a session for crypto operation.
131         CREATE_CRYPTO_SESSION = 26,
132         /// Close a session for crypto operation.
133         CLOSE_CRYPTO_SESSION = 27,
134         /// Advise backend that a migration with postcopy enabled is underway.
135         POSTCOPY_ADVISE = 28,
136         /// Advise backend that a transition to postcopy mode has happened.
137         POSTCOPY_LISTEN = 29,
138         /// Advise that postcopy migration has now completed.
139         POSTCOPY_END = 30,
140         /// Get a shared buffer from backend.
141         GET_INFLIGHT_FD = 31,
142         /// Send the shared inflight buffer back to backend.
143         SET_INFLIGHT_FD = 32,
144         /// Sets the GPU protocol socket file descriptor.
145         GPU_SET_SOCKET = 33,
146         /// Ask the vhost user backend to disable all rings and reset all internal
147         /// device state to the initial state.
148         RESET_DEVICE = 34,
149         /// Indicate that a buffer was added to the vring instead of signalling it
150         /// using the vring’s kick file descriptor.
151         VRING_KICK = 35,
152         /// Return a u64 payload containing the maximum number of memory slots.
153         GET_MAX_MEM_SLOTS = 36,
154         /// Update the memory tables by adding the region described.
155         ADD_MEM_REG = 37,
156         /// Update the memory tables by removing the region described.
157         REM_MEM_REG = 38,
158         /// Notify the backend with updated device status as defined in the VIRTIO
159         /// specification.
160         SET_STATUS = 39,
161         /// Query the backend for its device status as defined in the VIRTIO
162         /// specification.
163         GET_STATUS = 40,
164         /// Retrieve a shared object from the device.
165         GET_SHARED_OBJECT = 41,
166         /// Begin transfer of internal state to/from the backend for migration
167         /// purposes.
168         SET_DEVICE_STATE_FD = 42,
169         /// After transferring state, check the backend for any errors that may have
170         /// occurred during the transfer
171         CHECK_DEVICE_STATE = 43,
172     }
173 }
174 
175 impl Req for FrontendReq {}
176 
177 enum_value! {
178     /// Type of requests sending from backends to frontends.
179     #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
180     pub enum BackendReq: u32 {
181         /// Send IOTLB messages with struct vhost_iotlb_msg as payload.
182         IOTLB_MSG = 1,
183         /// Notify that the virtio device's configuration space has changed.
184         CONFIG_CHANGE_MSG = 2,
185         /// Set host notifier for a specified queue.
186         VRING_HOST_NOTIFIER_MSG = 3,
187         /// Indicate that a buffer was used from the vring.
188         VRING_CALL = 4,
189         /// Indicate that an error occurred on the specific vring.
190         VRING_ERR = 5,
191         /// Add a virtio shared object.
192         SHARED_OBJECT_ADD = 6,
193         /// Remove a virtio shared object.
194         SHARED_OBJECT_REMOVE = 7,
195         /// Lookup for a virtio shared object.
196         SHARED_OBJECT_LOOKUP = 8,
197     }
198 }
199 
200 impl Req for BackendReq {}
201 
202 /// Vhost message Validator.
203 pub trait VhostUserMsgValidator: ByteValued {
204     /// Validate message syntax only.
205     /// It doesn't validate message semantics such as protocol version number and dependency
206     /// on feature flags etc.
is_valid(&self) -> bool207     fn is_valid(&self) -> bool {
208         true
209     }
210 }
211 
212 // Bit mask for common message flags.
213 bitflags! {
214     /// Common message flags for vhost-user requests and replies.
215     pub struct VhostUserHeaderFlag: u32 {
216         /// Bits[0..2] is message version number.
217         const VERSION = 0x3;
218         /// Mark message as reply.
219         const REPLY = 0x4;
220         /// Sender anticipates a reply message from the peer.
221         const NEED_REPLY = 0x8;
222         /// All valid bits.
223         const ALL_FLAGS = 0xc;
224         /// All reserved bits.
225         const RESERVED_BITS = !0xf;
226     }
227 }
228 
229 /// Common message header for vhost-user requests and replies.
230 /// A vhost-user message consists of 3 header fields and an optional payload. All numbers are in the
231 /// machine native byte order.
232 #[repr(C, packed)]
233 #[derive(Copy)]
234 pub(super) struct VhostUserMsgHeader<R: Req> {
235     request: u32,
236     flags: u32,
237     size: u32,
238     _r: PhantomData<R>,
239 }
240 
241 impl<R: Req> MsgHeader for VhostUserMsgHeader<R> {
242     type Request = R;
243     const MAX_MSG_SIZE: usize = MAX_MSG_SIZE;
244 }
245 
246 impl<R: Req> Debug for VhostUserMsgHeader<R> {
fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result247     fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
248         f.debug_struct("VhostUserMsgHeader")
249             .field("request", &{ self.request })
250             .field("flags", &{ self.flags })
251             .field("size", &{ self.size })
252             .finish()
253     }
254 }
255 
256 impl<R: Req> Clone for VhostUserMsgHeader<R> {
clone(&self) -> VhostUserMsgHeader<R>257     fn clone(&self) -> VhostUserMsgHeader<R> {
258         *self
259     }
260 }
261 
262 impl<R: Req> PartialEq for VhostUserMsgHeader<R> {
eq(&self, other: &Self) -> bool263     fn eq(&self, other: &Self) -> bool {
264         self.request == other.request && self.flags == other.flags && self.size == other.size
265     }
266 }
267 
268 impl<R: Req> VhostUserMsgHeader<R> {
269     /// Create a new instance of `VhostUserMsgHeader`.
new(request: R, flags: u32, size: u32) -> Self270     pub fn new(request: R, flags: u32, size: u32) -> Self {
271         // Default to protocol version 1
272         let fl = (flags & VhostUserHeaderFlag::ALL_FLAGS.bits()) | 0x1;
273         VhostUserMsgHeader {
274             request: request.into(),
275             flags: fl,
276             size,
277             _r: PhantomData,
278         }
279     }
280 
281     /// Get message type.
get_code(&self) -> Result<R>282     pub fn get_code(&self) -> Result<R> {
283         R::try_from(self.request).map_err(|_| Error::InvalidMessage)
284     }
285 
286     /// Set message type.
set_code(&mut self, request: R)287     pub fn set_code(&mut self, request: R) {
288         self.request = request.into();
289     }
290 
291     /// Get message version number.
get_version(&self) -> u32292     pub fn get_version(&self) -> u32 {
293         self.flags & 0x3
294     }
295 
296     /// Set message version number.
set_version(&mut self, ver: u32)297     pub fn set_version(&mut self, ver: u32) {
298         self.flags &= !0x3;
299         self.flags |= ver & 0x3;
300     }
301 
302     /// Check whether it's a reply message.
is_reply(&self) -> bool303     pub fn is_reply(&self) -> bool {
304         (self.flags & VhostUserHeaderFlag::REPLY.bits()) != 0
305     }
306 
307     /// Mark message as reply.
set_reply(&mut self, is_reply: bool)308     pub fn set_reply(&mut self, is_reply: bool) {
309         if is_reply {
310             self.flags |= VhostUserHeaderFlag::REPLY.bits();
311         } else {
312             self.flags &= !VhostUserHeaderFlag::REPLY.bits();
313         }
314     }
315 
316     /// Check whether reply for this message is requested.
is_need_reply(&self) -> bool317     pub fn is_need_reply(&self) -> bool {
318         (self.flags & VhostUserHeaderFlag::NEED_REPLY.bits()) != 0
319     }
320 
321     /// Mark that reply for this message is needed.
set_need_reply(&mut self, need_reply: bool)322     pub fn set_need_reply(&mut self, need_reply: bool) {
323         if need_reply {
324             self.flags |= VhostUserHeaderFlag::NEED_REPLY.bits();
325         } else {
326             self.flags &= !VhostUserHeaderFlag::NEED_REPLY.bits();
327         }
328     }
329 
330     /// Check whether it's the reply message for the request `req`.
is_reply_for(&self, req: &VhostUserMsgHeader<R>) -> bool331     pub fn is_reply_for(&self, req: &VhostUserMsgHeader<R>) -> bool {
332         if let (Ok(code1), Ok(code2)) = (self.get_code(), req.get_code()) {
333             self.is_reply() && !req.is_reply() && code1 == code2
334         } else {
335             false
336         }
337     }
338 
339     /// Get message size.
get_size(&self) -> u32340     pub fn get_size(&self) -> u32 {
341         self.size
342     }
343 
344     /// Set message size.
set_size(&mut self, size: u32)345     pub fn set_size(&mut self, size: u32) {
346         self.size = size;
347     }
348 }
349 
350 impl<R: Req> Default for VhostUserMsgHeader<R> {
default() -> Self351     fn default() -> Self {
352         VhostUserMsgHeader {
353             request: 0,
354             flags: 0x1,
355             size: 0,
356             _r: PhantomData,
357         }
358     }
359 }
360 
361 // SAFETY: Safe because all fields of VhostUserMsgHeader are POD.
362 unsafe impl<R: Req> ByteValued for VhostUserMsgHeader<R> {}
363 
364 impl<T: Req> VhostUserMsgValidator for VhostUserMsgHeader<T> {
365     #[allow(clippy::if_same_then_else)]
is_valid(&self) -> bool366     fn is_valid(&self) -> bool {
367         if self.get_code().is_err() {
368             return false;
369         } else if self.size as usize > MAX_MSG_SIZE {
370             return false;
371         } else if self.get_version() != 0x1 {
372             return false;
373         } else if (self.flags & VhostUserHeaderFlag::RESERVED_BITS.bits()) != 0 {
374             return false;
375         }
376         true
377     }
378 }
379 
380 // Bit mask for transport specific flags in VirtIO feature set defined by vhost-user.
381 bitflags! {
382     #[derive(Copy, Clone, Debug, Eq, PartialEq)]
383     /// Transport specific flags in VirtIO feature set defined by vhost-user.
384     pub struct VhostUserVirtioFeatures: u64 {
385         /// Log dirtied shared memory pages.
386         const LOG_ALL = 0x400_0000;
387         /// Feature flag for the protocol feature.
388         const PROTOCOL_FEATURES = 0x4000_0000;
389     }
390 }
391 
392 // Bit mask for vhost-user protocol feature flags.
393 bitflags! {
394     #[derive(Copy, Clone, Debug, Eq, PartialEq)]
395     /// Vhost-user protocol feature flags.
396     pub struct VhostUserProtocolFeatures: u64 {
397         /// Support multiple queues.
398         const MQ = 0x0000_0001;
399         /// Support logging through shared memory fd.
400         const LOG_SHMFD = 0x0000_0002;
401         /// Support broadcasting fake RARP packet.
402         const RARP = 0x0000_0004;
403         /// Support sending reply messages for requests with NEED_REPLY flag set.
404         const REPLY_ACK = 0x0000_0008;
405         /// Support setting MTU for virtio-net devices.
406         const MTU = 0x0000_0010;
407         /// Allow the backend to send requests to the frontend by an optional communication channel.
408         const BACKEND_REQ = 0x0000_0020;
409         /// Support setting backend endian by SET_VRING_ENDIAN.
410         const CROSS_ENDIAN = 0x0000_0040;
411         /// Support crypto operations.
412         const CRYPTO_SESSION = 0x0000_0080;
413         /// Support sending userfault_fd from backends to frontends.
414         const PAGEFAULT = 0x0000_0100;
415         /// Support Virtio device configuration.
416         const CONFIG = 0x0000_0200;
417         /// Allow the backend to send fds (at most 8 descriptors in each message) to the frontend.
418         const BACKEND_SEND_FD = 0x0000_0400;
419         /// Allow the backend to register a host notifier.
420         const HOST_NOTIFIER = 0x0000_0800;
421         /// Support inflight shmfd.
422         const INFLIGHT_SHMFD = 0x0000_1000;
423         /// Support resetting the device.
424         const RESET_DEVICE = 0x0000_2000;
425         /// Support inband notifications.
426         const INBAND_NOTIFICATIONS = 0x0000_4000;
427         /// Support configuring memory slots.
428         const CONFIGURE_MEM_SLOTS = 0x0000_8000;
429         /// Support reporting status.
430         const STATUS = 0x0001_0000;
431         /// Support Xen mmap.
432         const XEN_MMAP = 0x0002_0000;
433         /// Support shared objects.
434         const SHARED_OBJECT = 0x0004_0000;
435         /// Support transferring internal device state.
436         const DEVICE_STATE = 0x0008_0000;
437     }
438 }
439 
440 /// An empty message.
441 #[derive(Copy, Clone, Default)]
442 pub struct VhostUserEmpty;
443 
444 // SAFETY: Safe because type is zero size.
445 unsafe impl ByteValued for VhostUserEmpty {}
446 
447 impl VhostUserMsgValidator for VhostUserEmpty {}
448 
449 /// A generic message to encapsulate a 64-bit value.
450 #[repr(transparent)]
451 #[derive(Copy, Clone, Default)]
452 pub struct VhostUserU64 {
453     /// The encapsulated 64-bit common value.
454     pub value: u64,
455 }
456 
457 impl VhostUserU64 {
458     /// Create a new instance.
new(value: u64) -> Self459     pub fn new(value: u64) -> Self {
460         VhostUserU64 { value }
461     }
462 }
463 
464 // SAFETY: Safe because all fields of VhostUserU64 are POD.
465 unsafe impl ByteValued for VhostUserU64 {}
466 
467 impl VhostUserMsgValidator for VhostUserU64 {}
468 
469 /// Memory region descriptor for the SET_MEM_TABLE request.
470 #[repr(C, packed)]
471 #[derive(Copy, Clone, Default)]
472 pub struct VhostUserMemory {
473     /// Number of memory regions in the payload.
474     pub num_regions: u32,
475     /// Padding for alignment.
476     pub padding1: u32,
477 }
478 
479 impl VhostUserMemory {
480     /// Create a new instance.
new(cnt: u32) -> Self481     pub fn new(cnt: u32) -> Self {
482         VhostUserMemory {
483             num_regions: cnt,
484             padding1: 0,
485         }
486     }
487 }
488 
489 // SAFETY: Safe because all fields of VhostUserMemory are POD.
490 unsafe impl ByteValued for VhostUserMemory {}
491 
492 impl VhostUserMsgValidator for VhostUserMemory {
493     #[allow(clippy::if_same_then_else)]
is_valid(&self) -> bool494     fn is_valid(&self) -> bool {
495         if self.padding1 != 0 {
496             return false;
497         } else if self.num_regions == 0 || self.num_regions > MAX_ATTACHED_FD_ENTRIES as u32 {
498             return false;
499         }
500         true
501     }
502 }
503 
504 /// Memory region descriptors as payload for the SET_MEM_TABLE request.
505 #[repr(C, packed)]
506 #[derive(Default, Clone, Copy)]
507 pub struct VhostUserMemoryRegion {
508     /// Guest physical address of the memory region.
509     pub guest_phys_addr: u64,
510     /// Size of the memory region.
511     pub memory_size: u64,
512     /// Virtual address in the current process.
513     pub user_addr: u64,
514     /// Offset where region starts in the mapped memory.
515     pub mmap_offset: u64,
516 
517     #[cfg(feature = "xen")]
518     /// Xen specific flags.
519     pub xen_mmap_flags: u32,
520 
521     #[cfg(feature = "xen")]
522     /// Xen specific data.
523     pub xen_mmap_data: u32,
524 }
525 
526 impl VhostUserMemoryRegion {
is_valid_common(&self) -> bool527     fn is_valid_common(&self) -> bool {
528         self.memory_size != 0
529             && self.guest_phys_addr.checked_add(self.memory_size).is_some()
530             && self.user_addr.checked_add(self.memory_size).is_some()
531             && self.mmap_offset.checked_add(self.memory_size).is_some()
532     }
533 }
534 
535 #[cfg(not(feature = "xen"))]
536 impl VhostUserMemoryRegion {
537     /// Create a new instance.
new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self538     pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self {
539         VhostUserMemoryRegion {
540             guest_phys_addr,
541             memory_size,
542             user_addr,
543             mmap_offset,
544         }
545     }
546 
547     /// Creates mmap region from Self.
mmap_region<B: NewBitmap>(&self, file: File) -> Result<MmapRegion<B>>548     pub fn mmap_region<B: NewBitmap>(&self, file: File) -> Result<MmapRegion<B>> {
549         MmapRegion::<B>::from_file(
550             FileOffset::new(file, self.mmap_offset),
551             self.memory_size as usize,
552         )
553         .map_err(MmapError::MmapRegion)
554         .map_err(|e| Error::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)))
555     }
556 
is_valid(&self) -> bool557     fn is_valid(&self) -> bool {
558         self.is_valid_common()
559     }
560 }
561 
562 #[cfg(feature = "xen")]
563 impl VhostUserMemoryRegion {
564     /// Create a new instance.
with_xen( guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64, xen_mmap_flags: u32, xen_mmap_data: u32, ) -> Self565     pub fn with_xen(
566         guest_phys_addr: u64,
567         memory_size: u64,
568         user_addr: u64,
569         mmap_offset: u64,
570         xen_mmap_flags: u32,
571         xen_mmap_data: u32,
572     ) -> Self {
573         VhostUserMemoryRegion {
574             guest_phys_addr,
575             memory_size,
576             user_addr,
577             mmap_offset,
578             xen_mmap_flags,
579             xen_mmap_data,
580         }
581     }
582 
583     /// Creates mmap region from Self.
mmap_region<B: NewBitmap>(&self, file: File) -> Result<MmapRegion<B>>584     pub fn mmap_region<B: NewBitmap>(&self, file: File) -> Result<MmapRegion<B>> {
585         let range = MmapRange::new(
586             self.memory_size as usize,
587             Some(FileOffset::new(file, self.mmap_offset)),
588             GuestAddress(self.guest_phys_addr),
589             self.xen_mmap_flags,
590             self.xen_mmap_data,
591         );
592 
593         MmapRegion::<B>::from_range(range)
594             .map_err(MmapError::MmapRegion)
595             .map_err(|e| Error::ReqHandlerError(io::Error::new(io::ErrorKind::Other, e)))
596     }
597 
is_valid(&self) -> bool598     fn is_valid(&self) -> bool {
599         if !self.is_valid_common() {
600             false
601         } else {
602             // Only of one of FOREIGN or GRANT should be set.
603             match MmapXenFlags::from_bits(self.xen_mmap_flags) {
604                 Some(flags) => flags.is_valid(),
605                 None => false,
606             }
607         }
608     }
609 }
610 
611 // SAFETY: Safe because all fields of VhostUserMemoryRegion are POD.
612 unsafe impl ByteValued for VhostUserMemoryRegion {}
613 
614 impl VhostUserMsgValidator for VhostUserMemoryRegion {
is_valid(&self) -> bool615     fn is_valid(&self) -> bool {
616         self.is_valid()
617     }
618 }
619 
620 /// Payload of the VhostUserMemory message.
621 pub type VhostUserMemoryPayload = Vec<VhostUserMemoryRegion>;
622 
623 /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG
624 /// requests.
625 #[repr(C)]
626 #[derive(Default, Clone, Copy)]
627 pub struct VhostUserSingleMemoryRegion {
628     /// Padding for correct alignment
629     padding: u64,
630     /// General memory region
631     region: VhostUserMemoryRegion,
632 }
633 
634 impl Deref for VhostUserSingleMemoryRegion {
635     type Target = VhostUserMemoryRegion;
636 
deref(&self) -> &VhostUserMemoryRegion637     fn deref(&self) -> &VhostUserMemoryRegion {
638         &self.region
639     }
640 }
641 
642 #[cfg(not(feature = "xen"))]
643 impl VhostUserSingleMemoryRegion {
644     /// Create a new instance.
new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self645     pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self {
646         VhostUserSingleMemoryRegion {
647             padding: 0,
648             region: VhostUserMemoryRegion::new(
649                 guest_phys_addr,
650                 memory_size,
651                 user_addr,
652                 mmap_offset,
653             ),
654         }
655     }
656 }
657 
658 #[cfg(feature = "xen")]
659 impl VhostUserSingleMemoryRegion {
660     /// Create a new instance.
new( guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64, xen_mmap_flags: u32, xen_mmap_data: u32, ) -> Self661     pub fn new(
662         guest_phys_addr: u64,
663         memory_size: u64,
664         user_addr: u64,
665         mmap_offset: u64,
666         xen_mmap_flags: u32,
667         xen_mmap_data: u32,
668     ) -> Self {
669         VhostUserSingleMemoryRegion {
670             padding: 0,
671             region: VhostUserMemoryRegion::with_xen(
672                 guest_phys_addr,
673                 memory_size,
674                 user_addr,
675                 mmap_offset,
676                 xen_mmap_flags,
677                 xen_mmap_data,
678             ),
679         }
680     }
681 }
682 
683 // SAFETY: Safe because all fields of VhostUserSingleMemoryRegion are POD.
684 unsafe impl ByteValued for VhostUserSingleMemoryRegion {}
685 impl VhostUserMsgValidator for VhostUserSingleMemoryRegion {}
686 
687 /// Vring state descriptor.
688 #[repr(C, packed)]
689 #[derive(Copy, Clone, Default)]
690 pub struct VhostUserVringState {
691     /// Vring index.
692     pub index: u32,
693     /// A common 32bit value to encapsulate vring state etc.
694     pub num: u32,
695 }
696 
697 impl VhostUserVringState {
698     /// Create a new instance.
new(index: u32, num: u32) -> Self699     pub fn new(index: u32, num: u32) -> Self {
700         VhostUserVringState { index, num }
701     }
702 }
703 
704 // SAFETY: Safe because all fields of VhostUserVringState are POD.
705 unsafe impl ByteValued for VhostUserVringState {}
706 
707 impl VhostUserMsgValidator for VhostUserVringState {}
708 
709 // Bit mask for vring address flags.
710 bitflags! {
711     /// Flags for vring address.
712     pub struct VhostUserVringAddrFlags: u32 {
713         /// Support log of vring operations.
714         /// Modifications to "used" vring should be logged.
715         const VHOST_VRING_F_LOG = 0x1;
716     }
717 }
718 
719 /// Vring address descriptor.
720 #[repr(C, packed)]
721 #[derive(Copy, Clone, Default)]
722 pub struct VhostUserVringAddr {
723     /// Vring index.
724     pub index: u32,
725     /// Vring flags defined by VhostUserVringAddrFlags.
726     pub flags: u32,
727     /// Ring address of the vring descriptor table.
728     pub descriptor: u64,
729     /// Ring address of the vring used ring.
730     pub used: u64,
731     /// Ring address of the vring available ring.
732     pub available: u64,
733     /// Guest address for logging.
734     pub log: u64,
735 }
736 
737 impl VhostUserVringAddr {
738     /// Create a new instance.
new( index: u32, flags: VhostUserVringAddrFlags, descriptor: u64, used: u64, available: u64, log: u64, ) -> Self739     pub fn new(
740         index: u32,
741         flags: VhostUserVringAddrFlags,
742         descriptor: u64,
743         used: u64,
744         available: u64,
745         log: u64,
746     ) -> Self {
747         VhostUserVringAddr {
748             index,
749             flags: flags.bits(),
750             descriptor,
751             used,
752             available,
753             log,
754         }
755     }
756 
757     /// Create a new instance from `VringConfigData`.
758     #[allow(clippy::useless_conversion)]
from_config_data(index: u32, config_data: &VringConfigData) -> Self759     pub fn from_config_data(index: u32, config_data: &VringConfigData) -> Self {
760         let log_addr = config_data.log_addr.unwrap_or(0);
761         VhostUserVringAddr {
762             index,
763             flags: config_data.flags,
764             descriptor: config_data.desc_table_addr,
765             used: config_data.used_ring_addr,
766             available: config_data.avail_ring_addr,
767             log: log_addr,
768         }
769     }
770 }
771 
772 // SAFETY: Safe because all fields of VhostUserVringAddr are POD.
773 unsafe impl ByteValued for VhostUserVringAddr {}
774 
775 impl VhostUserMsgValidator for VhostUserVringAddr {
776     #[allow(clippy::if_same_then_else)]
is_valid(&self) -> bool777     fn is_valid(&self) -> bool {
778         if (self.flags & !VhostUserVringAddrFlags::all().bits()) != 0 {
779             return false;
780         } else if self.descriptor & 0xf != 0 {
781             return false;
782         } else if self.available & 0x1 != 0 {
783             return false;
784         } else if self.used & 0x3 != 0 {
785             return false;
786         }
787         true
788     }
789 }
790 
791 // Bit mask for the vhost-user device configuration message.
792 bitflags! {
793     #[derive(Copy, Clone, Debug, Eq, PartialEq)]
794     /// Flags for the device configuration message.
795     pub struct VhostUserConfigFlags: u32 {
796         /// Vhost frontend messages used for writeable fields.
797         const WRITABLE = 0x1;
798         /// Vhost frontend messages used for live migration.
799         const LIVE_MIGRATION = 0x2;
800     }
801 }
802 
803 /// Message to read/write device configuration space.
804 #[repr(C, packed)]
805 #[derive(Copy, Clone, Default)]
806 pub struct VhostUserConfig {
807     /// Offset of virtio device's configuration space.
808     pub offset: u32,
809     /// Configuration space access size in bytes.
810     pub size: u32,
811     /// Flags for the device configuration operation.
812     pub flags: u32,
813 }
814 
815 impl VhostUserConfig {
816     /// Create a new instance.
new(offset: u32, size: u32, flags: VhostUserConfigFlags) -> Self817     pub fn new(offset: u32, size: u32, flags: VhostUserConfigFlags) -> Self {
818         VhostUserConfig {
819             offset,
820             size,
821             flags: flags.bits(),
822         }
823     }
824 }
825 
826 // SAFETY: Safe because all fields of VhostUserConfig are POD.
827 unsafe impl ByteValued for VhostUserConfig {}
828 
829 impl VhostUserMsgValidator for VhostUserConfig {
830     #[allow(clippy::if_same_then_else)]
is_valid(&self) -> bool831     fn is_valid(&self) -> bool {
832         let end_addr = match self.size.checked_add(self.offset) {
833             Some(addr) => addr,
834             None => return false,
835         };
836         if (self.flags & !VhostUserConfigFlags::all().bits()) != 0 {
837             return false;
838         } else if self.size == 0 || end_addr > VHOST_USER_CONFIG_SIZE {
839             return false;
840         }
841         true
842     }
843 }
844 
845 /// Payload for the VhostUserConfig message.
846 pub type VhostUserConfigPayload = Vec<u8>;
847 
848 /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG
849 /// requests.
850 #[repr(C)]
851 #[derive(Copy, Clone, Default)]
852 pub struct VhostUserInflight {
853     /// Size of the area to track inflight I/O.
854     pub mmap_size: u64,
855     /// Offset of this area from the start of the supplied file descriptor.
856     pub mmap_offset: u64,
857     /// Number of virtqueues.
858     pub num_queues: u16,
859     /// Size of virtqueues.
860     pub queue_size: u16,
861 }
862 
863 impl VhostUserInflight {
864     /// Create a new instance.
new(mmap_size: u64, mmap_offset: u64, num_queues: u16, queue_size: u16) -> Self865     pub fn new(mmap_size: u64, mmap_offset: u64, num_queues: u16, queue_size: u16) -> Self {
866         VhostUserInflight {
867             mmap_size,
868             mmap_offset,
869             num_queues,
870             queue_size,
871         }
872     }
873 }
874 
875 // SAFETY: Safe because all fields of VhostUserInflight are POD.
876 unsafe impl ByteValued for VhostUserInflight {}
877 
878 impl VhostUserMsgValidator for VhostUserInflight {
is_valid(&self) -> bool879     fn is_valid(&self) -> bool {
880         if self.num_queues == 0 || self.queue_size == 0 {
881             return false;
882         }
883         true
884     }
885 }
886 
887 /// Single memory region descriptor as payload for SET_LOG_BASE request.
888 #[repr(C)]
889 #[derive(Copy, Clone, Default)]
890 pub struct VhostUserLog {
891     /// Size of the area to log dirty pages.
892     pub mmap_size: u64,
893     /// Offset of this area from the start of the supplied file descriptor.
894     pub mmap_offset: u64,
895 }
896 
897 impl VhostUserLog {
898     /// Create a new instance.
new(mmap_size: u64, mmap_offset: u64) -> Self899     pub fn new(mmap_size: u64, mmap_offset: u64) -> Self {
900         VhostUserLog {
901             mmap_size,
902             mmap_offset,
903         }
904     }
905 }
906 
907 // SAFETY: Safe because all fields of VhostUserLog are POD.
908 unsafe impl ByteValued for VhostUserLog {}
909 
910 impl VhostUserMsgValidator for VhostUserLog {
is_valid(&self) -> bool911     fn is_valid(&self) -> bool {
912         if self.mmap_size == 0 || self.mmap_offset.checked_add(self.mmap_size).is_none() {
913             return false;
914         }
915         true
916     }
917 }
918 
919 enum_value! {
920     /// Direction of state transfer for migration
921     #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
922     pub enum VhostTransferStateDirection: u32 {
923         /// Outgoing migration: Transfer state from back-end to front-end
924         SAVE = 0,
925         /// Incoming migration: Transfer state from front-end to back-end
926         LOAD = 1,
927     }
928 }
929 
930 enum_value! {
931     /// Migration phases during which state transfer can occur
932     #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
933     pub enum VhostTransferStatePhase: u32 {
934         /// The device (and all its vrings) are stopped
935         STOPPED = 0,
936     }
937 }
938 
939 /// Contains UUID to interact with associated virtio shared object.
940 #[repr(C)]
941 #[derive(Clone, Copy, Default)]
942 pub struct VhostUserSharedMsg {
943     /// UUID of the shared object
944     pub uuid: Uuid,
945 }
946 
947 // SAFETY: Safe because VhostUserSharedMsg is a
948 // fixed-size array internally and there is no
949 // compiler-inserted padding.
950 unsafe impl ByteValued for VhostUserSharedMsg {}
951 
952 impl VhostUserMsgValidator for VhostUserSharedMsg {
is_valid(&self) -> bool953     fn is_valid(&self) -> bool {
954         !(self.uuid.is_nil() || self.uuid.is_max())
955     }
956 }
957 
958 /// Query/send virtio-fs migration state
959 // Note: this struct is not defined as `packed` in the SPEC and although
960 // it is not necessary, since the struct has no padding, it simplifies
961 // reviewing it because it is a requirement for implementing `ByteValued`.
962 #[repr(C, packed)]
963 #[derive(Clone, Copy, Default)]
964 pub struct VhostUserTransferDeviceState {
965     /// Direction of state transfer (save/load)
966     pub direction: u32,
967     /// Migration phase during which the transfer takes place
968     pub phase: u32,
969 }
970 
971 // SAFETY: Safe because VhostUserTransferDeviceState is a POD
972 // (i.e., none of its fields are references or raw pointers),
973 // and there is no compiler-inserted padding.
974 unsafe impl ByteValued for VhostUserTransferDeviceState {}
975 
976 impl VhostUserTransferDeviceState {
977     /// Create a new instance.
new(direction: VhostTransferStateDirection, phase: VhostTransferStatePhase) -> Self978     pub fn new(direction: VhostTransferStateDirection, phase: VhostTransferStatePhase) -> Self {
979         VhostUserTransferDeviceState {
980             direction: direction as u32,
981             phase: phase as u32,
982         }
983     }
984 }
985 
986 impl VhostUserMsgValidator for VhostUserTransferDeviceState {
is_valid(&self) -> bool987     fn is_valid(&self) -> bool {
988         VhostTransferStateDirection::try_from(self.direction).is_ok()
989             && VhostTransferStatePhase::try_from(self.phase).is_ok()
990     }
991 }
992 
993 /// Inflight I/O descriptor state for split virtqueues
994 #[repr(C, packed)]
995 #[derive(Clone, Copy, Default)]
996 pub struct DescStateSplit {
997     /// Indicate whether this descriptor (only head) is inflight or not.
998     pub inflight: u8,
999     /// Padding
1000     padding: [u8; 5],
1001     /// List of last batch of used descriptors, only when batching is used for submitting
1002     pub next: u16,
1003     /// Preserve order of fetching available descriptors, only for head descriptor
1004     pub counter: u64,
1005 }
1006 
1007 impl DescStateSplit {
1008     /// New instance of DescStateSplit struct
new() -> Self1009     pub fn new() -> Self {
1010         Self::default()
1011     }
1012 }
1013 
1014 /// Inflight I/O queue region for split virtqueues
1015 #[repr(C, packed)]
1016 pub struct QueueRegionSplit {
1017     /// Features flags of this region
1018     pub features: u64,
1019     /// Version of this region
1020     pub version: u16,
1021     /// Number of DescStateSplit entries
1022     pub desc_num: u16,
1023     /// List to track last batch of used descriptors
1024     pub last_batch_head: u16,
1025     /// Idx value of used ring
1026     pub used_idx: u16,
1027     /// Pointer to an array of DescStateSplit entries
1028     pub desc: u64,
1029 }
1030 
1031 impl QueueRegionSplit {
1032     /// New instance of QueueRegionSplit struct
new(features: u64, queue_size: u16) -> Self1033     pub fn new(features: u64, queue_size: u16) -> Self {
1034         QueueRegionSplit {
1035             features,
1036             version: 1,
1037             desc_num: queue_size,
1038             last_batch_head: 0,
1039             used_idx: 0,
1040             desc: 0,
1041         }
1042     }
1043 }
1044 
1045 /// Inflight I/O descriptor state for packed virtqueues
1046 #[repr(C, packed)]
1047 #[derive(Clone, Copy, Default)]
1048 pub struct DescStatePacked {
1049     /// Indicate whether this descriptor (only head) is inflight or not.
1050     pub inflight: u8,
1051     /// Padding
1052     padding: u8,
1053     /// Link to next free entry
1054     pub next: u16,
1055     /// Link to last entry of descriptor list, only for head
1056     pub last: u16,
1057     /// Length of descriptor list, only for head
1058     pub num: u16,
1059     /// Preserve order of fetching avail descriptors, only for head
1060     pub counter: u64,
1061     /// Buffer ID
1062     pub id: u16,
1063     /// Descriptor flags
1064     pub flags: u16,
1065     /// Buffer length
1066     pub len: u32,
1067     /// Buffer address
1068     pub addr: u64,
1069 }
1070 
1071 impl DescStatePacked {
1072     /// New instance of DescStatePacked struct
new() -> Self1073     pub fn new() -> Self {
1074         Self::default()
1075     }
1076 }
1077 
1078 /// Inflight I/O queue region for packed virtqueues
1079 #[repr(C, packed)]
1080 pub struct QueueRegionPacked {
1081     /// Features flags of this region
1082     pub features: u64,
1083     /// version of this region
1084     pub version: u16,
1085     /// size of descriptor state array
1086     pub desc_num: u16,
1087     /// head of free DescStatePacked entry list
1088     pub free_head: u16,
1089     /// old head of free DescStatePacked entry list
1090     pub old_free_head: u16,
1091     /// used idx of descriptor ring
1092     pub used_idx: u16,
1093     /// old used idx of descriptor ring
1094     pub old_used_idx: u16,
1095     /// device ring wrap counter
1096     pub used_wrap_counter: u8,
1097     /// old device ring wrap counter
1098     pub old_used_wrap_counter: u8,
1099     /// Padding
1100     padding: [u8; 7],
1101     /// Pointer to array tracking state of each descriptor from descriptor ring
1102     pub desc: u64,
1103 }
1104 
1105 impl QueueRegionPacked {
1106     /// New instance of QueueRegionPacked struct
new(features: u64, queue_size: u16) -> Self1107     pub fn new(features: u64, queue_size: u16) -> Self {
1108         QueueRegionPacked {
1109             features,
1110             version: 1,
1111             desc_num: queue_size,
1112             free_head: 0,
1113             old_free_head: 0,
1114             used_idx: 0,
1115             old_used_idx: 0,
1116             used_wrap_counter: 0,
1117             old_used_wrap_counter: 0,
1118             padding: [0; 7],
1119             desc: 0,
1120         }
1121     }
1122 }
1123 
1124 #[cfg(test)]
1125 mod tests {
1126     use super::*;
1127     use std::mem;
1128 
1129     #[cfg(feature = "xen")]
1130     impl VhostUserMemoryRegion {
new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self1131         fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self {
1132             Self::with_xen(
1133                 guest_phys_addr,
1134                 memory_size,
1135                 user_addr,
1136                 mmap_offset,
1137                 MmapXenFlags::FOREIGN.bits(),
1138                 0,
1139             )
1140         }
1141     }
1142 
1143     #[test]
check_transfer_state_direction_code()1144     fn check_transfer_state_direction_code() {
1145         let load_code: u32 = VhostTransferStateDirection::LOAD.into();
1146         assert!(VhostTransferStateDirection::try_from(load_code).is_ok());
1147         assert_eq!(load_code, load_code.clone());
1148 
1149         let save_code: u32 = VhostTransferStateDirection::SAVE.into();
1150         assert!(VhostTransferStateDirection::try_from(save_code).is_ok());
1151         assert_eq!(save_code, save_code.clone());
1152 
1153         assert!(VhostTransferStateDirection::try_from(3).is_err());
1154     }
1155 
1156     #[test]
check_transfer_state_phase_code()1157     fn check_transfer_state_phase_code() {
1158         let code: u32 = VhostTransferStatePhase::STOPPED.into();
1159         assert!(VhostTransferStatePhase::try_from(code).is_ok());
1160         assert_eq!(code, code.clone());
1161 
1162         assert!(VhostTransferStatePhase::try_from(1).is_err());
1163     }
1164 
1165     #[test]
check_frontend_request_code()1166     fn check_frontend_request_code() {
1167         let code: u32 = FrontendReq::GET_FEATURES.into();
1168         assert!(FrontendReq::try_from(code).is_ok());
1169         assert_eq!(code, code.clone());
1170         assert!(FrontendReq::try_from(10000).is_err());
1171     }
1172 
1173     #[test]
check_backend_request_code()1174     fn check_backend_request_code() {
1175         let code: u32 = BackendReq::CONFIG_CHANGE_MSG.into();
1176         assert!(BackendReq::try_from(code).is_ok());
1177         assert_eq!(code, code.clone());
1178         assert!(BackendReq::try_from(10000).is_err());
1179     }
1180 
1181     #[test]
msg_header_ops()1182     fn msg_header_ops() {
1183         let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0, 0x100);
1184         assert_eq!(hdr.get_code().unwrap(), FrontendReq::GET_FEATURES);
1185         hdr.set_code(FrontendReq::SET_FEATURES);
1186         assert_eq!(hdr.get_code().unwrap(), FrontendReq::SET_FEATURES);
1187 
1188         assert_eq!(hdr.get_version(), 0x1);
1189 
1190         assert!(!hdr.is_reply());
1191         hdr.set_reply(true);
1192         assert!(hdr.is_reply());
1193         hdr.set_reply(false);
1194 
1195         assert!(!hdr.is_need_reply());
1196         hdr.set_need_reply(true);
1197         assert!(hdr.is_need_reply());
1198         hdr.set_need_reply(false);
1199 
1200         assert_eq!(hdr.get_size(), 0x100);
1201         hdr.set_size(0x200);
1202         assert_eq!(hdr.get_size(), 0x200);
1203 
1204         assert!(!hdr.is_need_reply());
1205         assert!(!hdr.is_reply());
1206         assert_eq!(hdr.get_version(), 0x1);
1207 
1208         // Check message length
1209         assert!(hdr.is_valid());
1210         hdr.set_size(0x2000);
1211         assert!(!hdr.is_valid());
1212         hdr.set_size(0x100);
1213         assert_eq!(hdr.get_size(), 0x100);
1214         assert!(hdr.is_valid());
1215         hdr.set_size((MAX_MSG_SIZE - mem::size_of::<VhostUserMsgHeader<FrontendReq>>()) as u32);
1216         assert!(hdr.is_valid());
1217         hdr.set_size(0x0);
1218         assert!(hdr.is_valid());
1219 
1220         // Check version
1221         hdr.set_version(0x0);
1222         assert!(!hdr.is_valid());
1223         hdr.set_version(0x2);
1224         assert!(!hdr.is_valid());
1225         hdr.set_version(0x1);
1226         assert!(hdr.is_valid());
1227 
1228         // Test Debug, Clone, PartiaEq trait
1229         assert_eq!(hdr, hdr.clone());
1230         assert_eq!(hdr.clone().get_code().unwrap(), hdr.get_code().unwrap());
1231         assert_eq!(format!("{:?}", hdr.clone()), format!("{:?}", hdr));
1232     }
1233 
1234     #[test]
test_vhost_user_message_u64()1235     fn test_vhost_user_message_u64() {
1236         let val = VhostUserU64::default();
1237         let val1 = VhostUserU64::new(0);
1238 
1239         let a = val.value;
1240         let b = val1.value;
1241         assert_eq!(a, b);
1242         let a = VhostUserU64::new(1).value;
1243         assert_eq!(a, 1);
1244     }
1245 
1246     #[test]
check_user_memory()1247     fn check_user_memory() {
1248         let mut msg = VhostUserMemory::new(1);
1249         assert!(msg.is_valid());
1250         msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32;
1251         assert!(msg.is_valid());
1252 
1253         msg.num_regions += 1;
1254         assert!(!msg.is_valid());
1255         msg.num_regions = 0xFFFFFFFF;
1256         assert!(!msg.is_valid());
1257         msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32;
1258         msg.padding1 = 1;
1259         assert!(!msg.is_valid());
1260     }
1261 
1262     #[test]
check_user_memory_region()1263     fn check_user_memory_region() {
1264         let mut msg = VhostUserMemoryRegion::new(0, 0x1000, 0, 0);
1265         assert!(msg.is_valid());
1266         msg.guest_phys_addr = 0xFFFFFFFFFFFFEFFF;
1267         assert!(msg.is_valid());
1268         msg.guest_phys_addr = 0xFFFFFFFFFFFFF000;
1269         assert!(!msg.is_valid());
1270         msg.guest_phys_addr = 0xFFFFFFFFFFFF0000;
1271         msg.memory_size = 0;
1272         assert!(!msg.is_valid());
1273         let a = msg.guest_phys_addr;
1274         let b = msg.guest_phys_addr;
1275         assert_eq!(a, b);
1276 
1277         let msg = VhostUserMemoryRegion::default();
1278         let a = msg.guest_phys_addr;
1279         assert_eq!(a, 0);
1280         let a = msg.memory_size;
1281         assert_eq!(a, 0);
1282         let a = msg.user_addr;
1283         assert_eq!(a, 0);
1284         let a = msg.mmap_offset;
1285         assert_eq!(a, 0);
1286     }
1287 
1288     #[test]
test_vhost_user_state()1289     fn test_vhost_user_state() {
1290         let state = VhostUserVringState::new(5, 8);
1291 
1292         let a = state.index;
1293         assert_eq!(a, 5);
1294         let a = state.num;
1295         assert_eq!(a, 8);
1296         assert!(state.is_valid());
1297 
1298         let state = VhostUserVringState::default();
1299         let a = state.index;
1300         assert_eq!(a, 0);
1301         let a = state.num;
1302         assert_eq!(a, 0);
1303         assert!(state.is_valid());
1304     }
1305 
1306     #[test]
test_vhost_user_addr()1307     fn test_vhost_user_addr() {
1308         let mut addr = VhostUserVringAddr::new(
1309             2,
1310             VhostUserVringAddrFlags::VHOST_VRING_F_LOG,
1311             0x1000,
1312             0x2000,
1313             0x3000,
1314             0x4000,
1315         );
1316 
1317         let a = addr.index;
1318         assert_eq!(a, 2);
1319         let a = addr.flags;
1320         assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits());
1321         let a = addr.descriptor;
1322         assert_eq!(a, 0x1000);
1323         let a = addr.used;
1324         assert_eq!(a, 0x2000);
1325         let a = addr.available;
1326         assert_eq!(a, 0x3000);
1327         let a = addr.log;
1328         assert_eq!(a, 0x4000);
1329         assert!(addr.is_valid());
1330 
1331         addr.descriptor = 0x1001;
1332         assert!(!addr.is_valid());
1333         addr.descriptor = 0x1000;
1334 
1335         addr.available = 0x3001;
1336         assert!(!addr.is_valid());
1337         addr.available = 0x3000;
1338 
1339         addr.used = 0x2001;
1340         assert!(!addr.is_valid());
1341         addr.used = 0x2000;
1342         assert!(addr.is_valid());
1343     }
1344 
1345     #[test]
test_vhost_user_state_from_config()1346     fn test_vhost_user_state_from_config() {
1347         let config = VringConfigData {
1348             queue_max_size: 256,
1349             queue_size: 128,
1350             flags: VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits(),
1351             desc_table_addr: 0x1000,
1352             used_ring_addr: 0x2000,
1353             avail_ring_addr: 0x3000,
1354             log_addr: Some(0x4000),
1355         };
1356         let addr = VhostUserVringAddr::from_config_data(2, &config);
1357 
1358         let a = addr.index;
1359         assert_eq!(a, 2);
1360         let a = addr.flags;
1361         assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits());
1362         let a = addr.descriptor;
1363         assert_eq!(a, 0x1000);
1364         let a = addr.used;
1365         assert_eq!(a, 0x2000);
1366         let a = addr.available;
1367         assert_eq!(a, 0x3000);
1368         let a = addr.log;
1369         assert_eq!(a, 0x4000);
1370         assert!(addr.is_valid());
1371     }
1372 
1373     #[test]
check_user_vring_addr()1374     fn check_user_vring_addr() {
1375         let mut msg =
1376             VhostUserVringAddr::new(0, VhostUserVringAddrFlags::all(), 0x0, 0x0, 0x0, 0x0);
1377         assert!(msg.is_valid());
1378 
1379         msg.descriptor = 1;
1380         assert!(!msg.is_valid());
1381         msg.descriptor = 0;
1382 
1383         msg.available = 1;
1384         assert!(!msg.is_valid());
1385         msg.available = 0;
1386 
1387         msg.used = 1;
1388         assert!(!msg.is_valid());
1389         msg.used = 0;
1390 
1391         msg.flags |= 0x80000000;
1392         assert!(!msg.is_valid());
1393         msg.flags &= !0x80000000;
1394     }
1395 
1396     #[test]
check_user_config_msg()1397     fn check_user_config_msg() {
1398         let mut msg =
1399             VhostUserConfig::new(0, VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE);
1400 
1401         assert!(msg.is_valid());
1402         msg.size = 0;
1403         assert!(!msg.is_valid());
1404         msg.size = 1;
1405         assert!(msg.is_valid());
1406         msg.offset = u32::MAX;
1407         assert!(!msg.is_valid());
1408         msg.offset = VHOST_USER_CONFIG_SIZE;
1409         assert!(!msg.is_valid());
1410         msg.offset = VHOST_USER_CONFIG_SIZE - 1;
1411         assert!(msg.is_valid());
1412         msg.size = 2;
1413         assert!(!msg.is_valid());
1414         msg.size = 1;
1415         msg.flags |= VhostUserConfigFlags::LIVE_MIGRATION.bits();
1416         assert!(msg.is_valid());
1417         msg.flags |= 0x4;
1418         assert!(!msg.is_valid());
1419     }
1420 }
1421