1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. 2 // SPDX-License-Identifier: Apache-2.0 3 4 //! Define communication messages for the vhost-user protocol. 5 //! 6 //! For message definition, please refer to the [vhost-user spec](https://github.com/qemu/qemu/blob/f7526eece29cd2e36a63b6703508b24453095eb8/docs/interop/vhost-user.txt). 7 8 #![allow(dead_code)] 9 #![allow(non_camel_case_types)] 10 #![allow(clippy::upper_case_acronyms)] 11 12 use std::fmt::Debug; 13 use std::marker::PhantomData; 14 15 use base::Protection; 16 use bitflags::bitflags; 17 use zerocopy::FromBytes; 18 use zerocopy::Immutable; 19 use zerocopy::IntoBytes; 20 use zerocopy::KnownLayout; 21 22 use crate::VringConfigData; 23 24 /// The VhostUserMemory message has variable message size and variable number of attached file 25 /// descriptors. Each user memory region entry in the message payload occupies 32 bytes, 26 /// so setting maximum number of attached file descriptors based on the maximum message size. 27 /// But rust only implements Default and AsMut traits for arrays with 0 - 32 entries, so further 28 /// reduce the maximum number... 29 // pub const MAX_ATTACHED_FD_ENTRIES: usize = (MAX_MSG_SIZE - 8) / 32; 30 pub const MAX_ATTACHED_FD_ENTRIES: usize = 32; 31 32 /// Starting position (inclusion) of the device configuration space in virtio devices. 33 pub const VHOST_USER_CONFIG_OFFSET: u32 = 0x100; 34 35 /// Ending position (exclusion) of the device configuration space in virtio devices. 36 pub const VHOST_USER_CONFIG_SIZE: u32 = 0x1000; 37 38 /// Maximum number of vrings supported. 39 pub const VHOST_USER_MAX_VRINGS: u64 = 0x8000u64; 40 41 /// Message type. Either [[FrontendReq]] or [[BackendReq]]. 42 pub trait Req: 43 Clone + Copy + Debug + PartialEq + Eq + PartialOrd + Ord + Into<u32> + TryFrom<u32> + Send + Sync 44 { 45 } 46 47 /// Error when converting an integer to an enum value. 48 #[derive(Copy, Clone, Debug, PartialEq, Eq, thiserror::Error)] 49 pub enum ReqError { 50 /// The value does not correspond to a valid message code. 51 #[error("The value {0} does not correspond to a valid message code.")] 52 InvalidValue(u32), 53 } 54 55 /// Type of requests sent to the backend. 56 /// 57 /// These are called "front-end message types" in the spec, so we call them `FrontendReq` here even 58 /// though it is somewhat confusing that the `BackendClient` sends `FrontendReq`s to a 59 /// `BackendServer`. 60 #[repr(u32)] 61 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, enumn::N)] 62 pub enum FrontendReq { 63 /// Get from the underlying vhost implementation the features bit mask. 64 GET_FEATURES = 1, 65 /// Enable features in the underlying vhost implementation using a bit mask. 66 SET_FEATURES = 2, 67 /// Set the current frontend as an owner of the session. 68 SET_OWNER = 3, 69 /// No longer used. 70 RESET_OWNER = 4, 71 /// Set the memory map regions on the backend so it can translate the vring addresses. 72 SET_MEM_TABLE = 5, 73 /// Set logging shared memory space. 74 SET_LOG_BASE = 6, 75 /// Set the logging file descriptor, which is passed as ancillary data. 76 SET_LOG_FD = 7, 77 /// Set the size of the queue. 78 SET_VRING_NUM = 8, 79 /// Set the addresses of the different aspects of the vring. 80 SET_VRING_ADDR = 9, 81 /// Set the base offset in the available vring. 82 SET_VRING_BASE = 10, 83 /// Get the available vring base offset. 84 GET_VRING_BASE = 11, 85 /// Set the event file descriptor for adding buffers to the vring. 86 SET_VRING_KICK = 12, 87 /// Set the event file descriptor to signal when buffers are used. 88 SET_VRING_CALL = 13, 89 /// Set the event file descriptor to signal when error occurs. 90 SET_VRING_ERR = 14, 91 /// Get the protocol feature bit mask from the underlying vhost implementation. 92 GET_PROTOCOL_FEATURES = 15, 93 /// Enable protocol features in the underlying vhost implementation. 94 SET_PROTOCOL_FEATURES = 16, 95 /// Query how many queues the backend supports. 96 GET_QUEUE_NUM = 17, 97 /// Signal backend to enable or disable corresponding vring. 98 SET_VRING_ENABLE = 18, 99 /// Ask vhost user backend to broadcast a fake RARP to notify the migration is terminated 100 /// for guest that does not support GUEST_ANNOUNCE. 101 SEND_RARP = 19, 102 /// Set host MTU value exposed to the guest. 103 NET_SET_MTU = 20, 104 /// Set the socket file descriptor for backend initiated requests. 105 SET_BACKEND_REQ_FD = 21, 106 /// Send IOTLB messages with struct vhost_iotlb_msg as payload. 107 IOTLB_MSG = 22, 108 /// Set the endianness of a VQ for legacy devices. 109 SET_VRING_ENDIAN = 23, 110 /// Fetch the contents of the virtio device configuration space. 111 GET_CONFIG = 24, 112 /// Change the contents of the virtio device configuration space. 113 SET_CONFIG = 25, 114 /// Create a session for crypto operation. 115 CREATE_CRYPTO_SESSION = 26, 116 /// Close a session for crypto operation. 117 CLOSE_CRYPTO_SESSION = 27, 118 /// Advise backend that a migration with postcopy enabled is underway. 119 POSTCOPY_ADVISE = 28, 120 /// Advise backend that a transition to postcopy mode has happened. 121 POSTCOPY_LISTEN = 29, 122 /// Advise that postcopy migration has now completed. 123 POSTCOPY_END = 30, 124 /// Get a shared buffer from backend. 125 GET_INFLIGHT_FD = 31, 126 /// Send the shared inflight buffer back to backend. 127 SET_INFLIGHT_FD = 32, 128 /// Sets the GPU protocol socket file descriptor. 129 GPU_SET_SOCKET = 33, 130 /// Ask the vhost user backend to disable all rings and reset all internal 131 /// device state to the initial state. 132 RESET_DEVICE = 34, 133 /// Indicate that a buffer was added to the vring instead of signalling it 134 /// using the vring’s kick file descriptor. 135 VRING_KICK = 35, 136 /// Return a u64 payload containing the maximum number of memory slots. 137 GET_MAX_MEM_SLOTS = 36, 138 /// Update the memory tables by adding the region described. 139 ADD_MEM_REG = 37, 140 /// Update the memory tables by removing the region described. 141 REM_MEM_REG = 38, 142 /// Notify the backend with updated device status as defined in the VIRTIO 143 /// specification. 144 SET_STATUS = 39, 145 /// Query the backend for its device status as defined in the VIRTIO 146 /// specification. 147 GET_STATUS = 40, 148 /// Front-end and back-end negotiate a channel over which to transfer the back-end’s internal 149 /// state during migration. 150 SET_DEVICE_STATE_FD = 42, 151 /// After transferring the back-end’s internal state during migration, check whether the 152 /// back-end was able to successfully fully process the state. 153 CHECK_DEVICE_STATE = 43, 154 155 // Non-standard message types. 156 /// Get a list of the device's shared memory regions. 157 GET_SHARED_MEMORY_REGIONS = 1004, 158 } 159 160 impl From<FrontendReq> for u32 { from(req: FrontendReq) -> u32161 fn from(req: FrontendReq) -> u32 { 162 req as u32 163 } 164 } 165 166 impl Req for FrontendReq {} 167 168 impl TryFrom<u32> for FrontendReq { 169 type Error = ReqError; 170 try_from(value: u32) -> Result<Self, Self::Error>171 fn try_from(value: u32) -> Result<Self, Self::Error> { 172 FrontendReq::n(value).ok_or(ReqError::InvalidValue(value)) 173 } 174 } 175 176 /// Type of requests sending from backends to frontends. 177 /// 178 /// These are called "backend-end message types" in the spec, so we call them `BackendReq` here 179 /// even though it is somewhat confusing that the `FrontendClient` sends `BackendReq`s to a 180 /// `FrontendServer`. 181 #[repr(u32)] 182 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, enumn::N)] 183 pub enum BackendReq { 184 /// Send IOTLB messages with struct vhost_iotlb_msg as payload. 185 IOTLB_MSG = 1, 186 /// Notify that the virtio device's configuration space has changed. 187 CONFIG_CHANGE_MSG = 2, 188 /// Set host notifier for a specified queue. 189 VRING_HOST_NOTIFIER_MSG = 3, 190 /// Indicate that a buffer was used from the vring. 191 VRING_CALL = 4, 192 /// Indicate that an error occurred on the specific vring. 193 VRING_ERR = 5, 194 195 // Non-standard message types. 196 /// Indicates a request to map a fd into a shared memory region. 197 SHMEM_MAP = 1000, 198 /// Indicates a request to unmap part of a shared memory region. 199 SHMEM_UNMAP = 1001, 200 /// Virtio-fs draft: map file content into the window. 201 DEPRECATED__FS_MAP = 1002, 202 /// Virtio-fs draft: unmap file content from the window. 203 DEPRECATED__FS_UNMAP = 1003, 204 /// Virtio-fs draft: sync file content. 205 DEPRECATED__FS_SYNC = 1004, 206 /// Virtio-fs draft: perform a read/write from an fd directly to GPA. 207 DEPRECATED__FS_IO = 1005, 208 /// Indicates a request to map GPU memory into a shared memory region. 209 GPU_MAP = 1006, 210 /// Indicates a request to map external memory into a shared memory region. 211 EXTERNAL_MAP = 1007, 212 } 213 214 impl From<BackendReq> for u32 { from(req: BackendReq) -> u32215 fn from(req: BackendReq) -> u32 { 216 req as u32 217 } 218 } 219 220 impl Req for BackendReq {} 221 222 impl TryFrom<u32> for BackendReq { 223 type Error = ReqError; 224 try_from(value: u32) -> Result<Self, Self::Error>225 fn try_from(value: u32) -> Result<Self, Self::Error> { 226 BackendReq::n(value).ok_or(ReqError::InvalidValue(value)) 227 } 228 } 229 230 /// Vhost message Validator. 231 pub trait VhostUserMsgValidator { 232 /// Validate message syntax only. 233 /// It doesn't validate message semantics such as protocol version number and dependency 234 /// on feature flags etc. is_valid(&self) -> bool235 fn is_valid(&self) -> bool { 236 true 237 } 238 } 239 240 // Bit mask for common message flags. 241 bitflags! { 242 /// Common message flags for vhost-user requests and replies. 243 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 244 #[repr(transparent)] 245 pub struct VhostUserHeaderFlag: u32 { 246 /// Bits[0..2] is message version number. 247 const VERSION = 0x3; 248 /// Mark message as reply. 249 const REPLY = 0x4; 250 /// Sender anticipates a reply message from the peer. 251 const NEED_REPLY = 0x8; 252 /// All valid bits. 253 const ALL_FLAGS = 0xc; 254 /// All reserved bits. 255 const RESERVED_BITS = !0xf; 256 } 257 } 258 259 /// Common message header for vhost-user requests and replies. 260 /// A vhost-user message consists of 3 header fields and an optional payload. All numbers are in the 261 /// machine native byte order. 262 #[repr(C, packed)] 263 #[derive(Copy)] 264 pub struct VhostUserMsgHeader<R: Req> { 265 request: u32, 266 flags: u32, 267 size: u32, 268 _r: PhantomData<R>, 269 } 270 271 impl<R: Req> Debug for VhostUserMsgHeader<R> { fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result272 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 273 f.debug_struct("VhostUserMsgHeader") 274 .field("request", &{ self.request }) 275 .field("flags", &{ self.flags }) 276 .field("size", &{ self.size }) 277 .finish() 278 } 279 } 280 281 impl<R: Req> Clone for VhostUserMsgHeader<R> { clone(&self) -> VhostUserMsgHeader<R>282 fn clone(&self) -> VhostUserMsgHeader<R> { 283 *self 284 } 285 } 286 287 impl<R: Req> PartialEq for VhostUserMsgHeader<R> { eq(&self, other: &Self) -> bool288 fn eq(&self, other: &Self) -> bool { 289 self.request == other.request && self.flags == other.flags && self.size == other.size 290 } 291 } 292 293 impl<R: Req> VhostUserMsgHeader<R> { 294 /// Create a new instance of `VhostUserMsgHeader`. new(request: R, flags: u32, size: u32) -> Self295 pub fn new(request: R, flags: u32, size: u32) -> Self { 296 // Default to protocol version 1 297 let fl = (flags & VhostUserHeaderFlag::ALL_FLAGS.bits()) | 0x1; 298 VhostUserMsgHeader { 299 request: request.into(), 300 flags: fl, 301 size, 302 _r: PhantomData, 303 } 304 } 305 into_raw(self) -> [u32; 3]306 pub fn into_raw(self) -> [u32; 3] { 307 [self.request, self.flags, self.size] 308 } 309 from_raw(raw: [u32; 3]) -> Self310 pub fn from_raw(raw: [u32; 3]) -> Self { 311 Self { 312 request: raw[0], 313 flags: raw[1], 314 size: raw[2], 315 _r: PhantomData, 316 } 317 } 318 319 /// Get message type. get_code(&self) -> std::result::Result<R, R::Error>320 pub fn get_code(&self) -> std::result::Result<R, R::Error> { 321 R::try_from(self.request) 322 } 323 324 /// Set message type. set_code(&mut self, request: R)325 pub fn set_code(&mut self, request: R) { 326 self.request = request.into(); 327 } 328 329 /// Get message version number. get_version(&self) -> u32330 pub fn get_version(&self) -> u32 { 331 self.flags & 0x3 332 } 333 334 /// Set message version number. set_version(&mut self, ver: u32)335 pub fn set_version(&mut self, ver: u32) { 336 self.flags &= !0x3; 337 self.flags |= ver & 0x3; 338 } 339 340 /// Check whether it's a reply message. is_reply(&self) -> bool341 pub fn is_reply(&self) -> bool { 342 (self.flags & VhostUserHeaderFlag::REPLY.bits()) != 0 343 } 344 345 /// Mark message as reply. set_reply(&mut self, is_reply: bool)346 pub fn set_reply(&mut self, is_reply: bool) { 347 if is_reply { 348 self.flags |= VhostUserHeaderFlag::REPLY.bits(); 349 } else { 350 self.flags &= !VhostUserHeaderFlag::REPLY.bits(); 351 } 352 } 353 354 /// Check whether reply for this message is requested. is_need_reply(&self) -> bool355 pub fn is_need_reply(&self) -> bool { 356 (self.flags & VhostUserHeaderFlag::NEED_REPLY.bits()) != 0 357 } 358 359 /// Mark that reply for this message is needed. set_need_reply(&mut self, need_reply: bool)360 pub fn set_need_reply(&mut self, need_reply: bool) { 361 if need_reply { 362 self.flags |= VhostUserHeaderFlag::NEED_REPLY.bits(); 363 } else { 364 self.flags &= !VhostUserHeaderFlag::NEED_REPLY.bits(); 365 } 366 } 367 368 /// Check whether it's the reply message for the request `req`. is_reply_for(&self, req: &VhostUserMsgHeader<R>) -> bool369 pub fn is_reply_for(&self, req: &VhostUserMsgHeader<R>) -> bool { 370 self.is_reply() && !req.is_reply() && self.request == req.request 371 } 372 373 /// Get message size. get_size(&self) -> u32374 pub fn get_size(&self) -> u32 { 375 self.size 376 } 377 378 /// Set message size. set_size(&mut self, size: u32)379 pub fn set_size(&mut self, size: u32) { 380 self.size = size; 381 } 382 } 383 384 impl<R: Req> Default for VhostUserMsgHeader<R> { default() -> Self385 fn default() -> Self { 386 VhostUserMsgHeader { 387 request: 0, 388 flags: 0x1, 389 size: 0, 390 _r: PhantomData, 391 } 392 } 393 } 394 395 impl<T: Req> VhostUserMsgValidator for VhostUserMsgHeader<T> { 396 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool397 fn is_valid(&self) -> bool { 398 if self.get_code().is_err() { 399 return false; 400 } else if self.get_version() != 0x1 { 401 return false; 402 } else if (self.flags & VhostUserHeaderFlag::RESERVED_BITS.bits()) != 0 { 403 return false; 404 } 405 true 406 } 407 } 408 409 pub const VIRTIO_F_RING_PACKED: u32 = 34; 410 411 /// Virtio feature flag for the vhost-user protocol features. 412 pub const VHOST_USER_F_PROTOCOL_FEATURES: u32 = 30; 413 414 // Bit mask for vhost-user protocol feature flags. 415 bitflags! { 416 /// Vhost-user protocol feature flags. 417 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 418 #[repr(transparent)] 419 pub struct VhostUserProtocolFeatures: u64 { 420 /// Support multiple queues. 421 const MQ = 0x0000_0001; 422 /// Support logging through shared memory fd. 423 const LOG_SHMFD = 0x0000_0002; 424 /// Support broadcasting fake RARP packet. 425 const RARP = 0x0000_0004; 426 /// Support sending reply messages for requests with NEED_REPLY flag set. 427 const REPLY_ACK = 0x0000_0008; 428 /// Support setting MTU for virtio-net devices. 429 const MTU = 0x0000_0010; 430 /// Allow the backend to send requests to the frontend by an optional communication channel. 431 const BACKEND_REQ = 0x0000_0020; 432 /// Support setting backend endian by SET_VRING_ENDIAN. 433 const CROSS_ENDIAN = 0x0000_0040; 434 /// Support crypto operations. 435 const CRYPTO_SESSION = 0x0000_0080; 436 /// Support sending userfault_fd from backends to frontends. 437 const PAGEFAULT = 0x0000_0100; 438 /// Support Virtio device configuration. 439 const CONFIG = 0x0000_0200; 440 /// Allow the backend to send fds (at most 8 descriptors in each message) to the frontend. 441 const BACKEND_SEND_FD = 0x0000_0400; 442 /// Allow the backend to register a host notifier. 443 const HOST_NOTIFIER = 0x0000_0800; 444 /// Support inflight shmfd. 445 const INFLIGHT_SHMFD = 0x0000_1000; 446 /// Support resetting the device. 447 const RESET_DEVICE = 0x0000_2000; 448 /// Support inband notifications. 449 const INBAND_NOTIFICATIONS = 0x0000_4000; 450 /// Support configuring memory slots. 451 const CONFIGURE_MEM_SLOTS = 0x0000_8000; 452 /// Support reporting status. 453 const STATUS = 0x0001_0000; 454 /// Support Xen mmap. 455 const XEN_MMAP = 0x0002_0000; 456 /// Support VHOST_USER_SET_DEVICE_STATE_FD and VHOST_USER_CHECK_DEVICE_STATE messages. 457 const DEVICE_STATE = 0x0008_0000; 458 /// Support shared memory regions. (Non-standard.) 459 const SHARED_MEMORY_REGIONS = 0x8000_0000; 460 } 461 } 462 463 /// A generic message to encapsulate a 64-bit value. 464 #[repr(C, packed)] 465 #[derive(Default, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 466 pub struct VhostUserU64 { 467 /// The encapsulated 64-bit common value. 468 pub value: u64, 469 } 470 471 impl VhostUserU64 { 472 /// Create a new instance. new(value: u64) -> Self473 pub fn new(value: u64) -> Self { 474 VhostUserU64 { value } 475 } 476 } 477 478 impl VhostUserMsgValidator for VhostUserU64 {} 479 480 /// An empty message. 481 #[repr(C)] 482 #[derive(Default, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 483 pub struct VhostUserEmptyMsg; 484 485 impl VhostUserMsgValidator for VhostUserEmptyMsg {} 486 487 /// A generic message for empty message. 488 /// ZST in repr(C) has same type layout as repr(rust) 489 #[repr(C)] 490 #[derive(Default, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 491 pub struct VhostUserEmptyMessage; 492 493 impl VhostUserMsgValidator for VhostUserEmptyMessage {} 494 495 /// Memory region descriptor for the SET_MEM_TABLE request. 496 #[repr(C, packed)] 497 #[derive(Default, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 498 pub struct VhostUserMemory { 499 /// Number of memory regions in the payload. 500 pub num_regions: u32, 501 /// Padding for alignment. 502 pub padding1: u32, 503 } 504 505 impl VhostUserMemory { 506 /// Create a new instance. new(cnt: u32) -> Self507 pub fn new(cnt: u32) -> Self { 508 VhostUserMemory { 509 num_regions: cnt, 510 padding1: 0, 511 } 512 } 513 } 514 515 impl VhostUserMsgValidator for VhostUserMemory { 516 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool517 fn is_valid(&self) -> bool { 518 if self.padding1 != 0 { 519 return false; 520 } else if self.num_regions == 0 || self.num_regions > MAX_ATTACHED_FD_ENTRIES as u32 { 521 return false; 522 } 523 true 524 } 525 } 526 527 /// Memory region descriptors as payload for the SET_MEM_TABLE request. 528 #[repr(C, packed)] 529 #[derive(Default, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 530 pub struct VhostUserMemoryRegion { 531 /// Guest physical address of the memory region. 532 pub guest_phys_addr: u64, 533 /// Size of the memory region. 534 pub memory_size: u64, 535 /// Virtual address in the current process. 536 pub user_addr: u64, 537 /// Offset where region starts in the mapped memory. 538 pub mmap_offset: u64, 539 } 540 541 impl VhostUserMemoryRegion { 542 /// Create a new instance. new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self543 pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { 544 VhostUserMemoryRegion { 545 guest_phys_addr, 546 memory_size, 547 user_addr, 548 mmap_offset, 549 } 550 } 551 } 552 553 impl VhostUserMsgValidator for VhostUserMemoryRegion { is_valid(&self) -> bool554 fn is_valid(&self) -> bool { 555 if self.memory_size == 0 556 || self.guest_phys_addr.checked_add(self.memory_size).is_none() 557 || self.user_addr.checked_add(self.memory_size).is_none() 558 || self.mmap_offset.checked_add(self.memory_size).is_none() 559 { 560 return false; 561 } 562 true 563 } 564 } 565 566 /// Payload of the VhostUserMemory message. 567 pub type VhostUserMemoryPayload = Vec<VhostUserMemoryRegion>; 568 569 /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG 570 /// requests. 571 #[repr(C)] 572 #[derive(Default, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 573 pub struct VhostUserSingleMemoryRegion { 574 /// Padding for correct alignment 575 padding: u64, 576 /// Guest physical address of the memory region. 577 pub guest_phys_addr: u64, 578 /// Size of the memory region. 579 pub memory_size: u64, 580 /// Virtual address in the current process. 581 pub user_addr: u64, 582 /// Offset where region starts in the mapped memory. 583 pub mmap_offset: u64, 584 } 585 586 impl VhostUserSingleMemoryRegion { 587 /// Create a new instance. new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self588 pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { 589 VhostUserSingleMemoryRegion { 590 padding: 0, 591 guest_phys_addr, 592 memory_size, 593 user_addr, 594 mmap_offset, 595 } 596 } 597 } 598 599 impl VhostUserMsgValidator for VhostUserSingleMemoryRegion { is_valid(&self) -> bool600 fn is_valid(&self) -> bool { 601 if self.memory_size == 0 602 || self.guest_phys_addr.checked_add(self.memory_size).is_none() 603 || self.user_addr.checked_add(self.memory_size).is_none() 604 || self.mmap_offset.checked_add(self.memory_size).is_none() 605 { 606 return false; 607 } 608 true 609 } 610 } 611 612 /// Vring state descriptor. 613 #[repr(C, packed)] 614 #[derive(Default, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 615 pub struct VhostUserVringState { 616 /// Vring index. 617 pub index: u32, 618 /// A common 32bit value to encapsulate vring state etc. 619 pub num: u32, 620 } 621 622 impl VhostUserVringState { 623 /// Create a new instance. new(index: u32, num: u32) -> Self624 pub fn new(index: u32, num: u32) -> Self { 625 VhostUserVringState { index, num } 626 } 627 } 628 629 impl VhostUserMsgValidator for VhostUserVringState {} 630 631 // Bit mask for vring address flags. 632 bitflags! { 633 /// Flags for vring address. 634 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 635 #[repr(transparent)] 636 pub struct VhostUserVringAddrFlags: u32 { 637 /// Support log of vring operations. 638 /// Modifications to "used" vring should be logged. 639 const VHOST_VRING_F_LOG = 0x1; 640 } 641 } 642 643 /// Vring address descriptor. 644 #[repr(C, packed)] 645 #[derive(Default, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 646 pub struct VhostUserVringAddr { 647 /// Vring index. 648 pub index: u32, 649 /// Vring flags defined by VhostUserVringAddrFlags. 650 pub flags: u32, 651 /// Ring address of the vring descriptor table. 652 pub descriptor: u64, 653 /// Ring address of the vring used ring. 654 pub used: u64, 655 /// Ring address of the vring available ring. 656 pub available: u64, 657 /// Guest address for logging. 658 pub log: u64, 659 } 660 661 impl VhostUserVringAddr { 662 /// Create a new instance. new( index: u32, flags: VhostUserVringAddrFlags, descriptor: u64, used: u64, available: u64, log: u64, ) -> Self663 pub fn new( 664 index: u32, 665 flags: VhostUserVringAddrFlags, 666 descriptor: u64, 667 used: u64, 668 available: u64, 669 log: u64, 670 ) -> Self { 671 VhostUserVringAddr { 672 index, 673 flags: flags.bits(), 674 descriptor, 675 used, 676 available, 677 log, 678 } 679 } 680 681 /// Create a new instance from `VringConfigData`. from_config_data(index: u32, config_data: &VringConfigData) -> Self682 pub fn from_config_data(index: u32, config_data: &VringConfigData) -> Self { 683 let log_addr = config_data.log_addr.unwrap_or(0); 684 VhostUserVringAddr { 685 index, 686 flags: config_data.flags, 687 descriptor: config_data.desc_table_addr, 688 used: config_data.used_ring_addr, 689 available: config_data.avail_ring_addr, 690 log: log_addr, 691 } 692 } 693 } 694 695 impl VhostUserMsgValidator for VhostUserVringAddr { 696 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool697 fn is_valid(&self) -> bool { 698 if (self.flags & !VhostUserVringAddrFlags::all().bits()) != 0 { 699 return false; 700 } else if self.descriptor & 0xf != 0 { 701 return false; 702 } else if self.available & 0x1 != 0 { 703 return false; 704 } else if self.used & 0x3 != 0 { 705 return false; 706 } 707 true 708 } 709 } 710 711 // Bit mask for the vhost-user device configuration message. 712 bitflags! { 713 /// Flags for the device configuration message. 714 #[derive(Copy, Clone, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] 715 #[repr(transparent)] 716 pub struct VhostUserConfigFlags: u32 { 717 /// Vhost frontend messages used for writeable fields. 718 const WRITABLE = 0x1; 719 /// Vhost frontend messages used for live migration. 720 const LIVE_MIGRATION = 0x2; 721 } 722 } 723 724 /// Message to read/write device configuration space. 725 #[repr(C, packed)] 726 #[derive(Default, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 727 pub struct VhostUserConfig { 728 /// Offset of virtio device's configuration space. 729 pub offset: u32, 730 /// Configuration space access size in bytes. 731 pub size: u32, 732 /// Flags for the device configuration operation. 733 pub flags: u32, 734 } 735 736 impl VhostUserConfig { 737 /// Create a new instance. new(offset: u32, size: u32, flags: VhostUserConfigFlags) -> Self738 pub fn new(offset: u32, size: u32, flags: VhostUserConfigFlags) -> Self { 739 VhostUserConfig { 740 offset, 741 size, 742 flags: flags.bits(), 743 } 744 } 745 } 746 747 impl VhostUserMsgValidator for VhostUserConfig { 748 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool749 fn is_valid(&self) -> bool { 750 let end_addr = match self.size.checked_add(self.offset) { 751 Some(addr) => addr, 752 None => return false, 753 }; 754 if (self.flags & !VhostUserConfigFlags::all().bits()) != 0 { 755 return false; 756 } else if self.size == 0 || end_addr > VHOST_USER_CONFIG_SIZE { 757 return false; 758 } 759 true 760 } 761 } 762 763 /// Payload for the VhostUserConfig message. 764 pub type VhostUserConfigPayload = Vec<u8>; 765 766 /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG 767 /// requests. 768 /// This struct is defined by qemu and compiles with arch-dependent padding. 769 /// Interestingly, all our supported archs (arm, aarch64, x86_64) has same 770 /// data layout for this type. 771 #[repr(C)] 772 #[derive(Default, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 773 pub struct VhostUserInflight { 774 /// Size of the area to track inflight I/O. 775 pub mmap_size: u64, 776 /// Offset of this area from the start of the supplied file descriptor. 777 pub mmap_offset: u64, 778 /// Number of virtqueues. 779 pub num_queues: u16, 780 /// Size of virtqueues. 781 pub queue_size: u16, 782 /// implicit padding on 64-bit platforms 783 pub _padding: [u8; 4], 784 } 785 786 impl VhostUserInflight { 787 /// Create a new instance. new(mmap_size: u64, mmap_offset: u64, num_queues: u16, queue_size: u16) -> Self788 pub fn new(mmap_size: u64, mmap_offset: u64, num_queues: u16, queue_size: u16) -> Self { 789 VhostUserInflight { 790 mmap_size, 791 mmap_offset, 792 num_queues, 793 queue_size, 794 ..Default::default() 795 } 796 } 797 } 798 799 impl VhostUserMsgValidator for VhostUserInflight { is_valid(&self) -> bool800 fn is_valid(&self) -> bool { 801 if self.num_queues == 0 || self.queue_size == 0 { 802 return false; 803 } 804 true 805 } 806 } 807 808 /// VHOST_USER_SET_DEVICE_STATE_FD request payload. 809 #[repr(C)] 810 #[derive(Default, Clone, Copy, FromBytes, Immutable, IntoBytes, KnownLayout)] 811 pub struct DeviceStateTransferParameters { 812 /// Direction in which the state is transferred 813 pub transfer_direction: u32, 814 /// State in which the VM guest and devices are. 815 pub migration_phase: u32, 816 } 817 818 impl VhostUserMsgValidator for DeviceStateTransferParameters { is_valid(&self) -> bool819 fn is_valid(&self) -> bool { 820 // Validated elsewhere. 821 true 822 } 823 } 824 825 /* 826 * TODO: support dirty log, live migration and IOTLB operations. 827 #[repr(C, packed)] 828 pub struct VhostUserVringArea { 829 pub index: u32, 830 pub flags: u32, 831 pub size: u64, 832 pub offset: u64, 833 } 834 835 #[repr(C, packed)] 836 pub struct VhostUserLog { 837 pub size: u64, 838 pub offset: u64, 839 } 840 841 #[repr(C, packed)] 842 pub struct VhostUserIotlb { 843 pub iova: u64, 844 pub size: u64, 845 pub user_addr: u64, 846 pub permission: u8, 847 pub optype: u8, 848 } 849 */ 850 851 /// Flags for SHMEM_MAP messages. 852 #[repr(transparent)] 853 #[derive( 854 FromBytes, 855 Immutable, 856 IntoBytes, 857 KnownLayout, 858 Copy, 859 Clone, 860 Debug, 861 Default, 862 Eq, 863 Hash, 864 Ord, 865 PartialEq, 866 PartialOrd, 867 )] 868 pub struct VhostUserShmemMapMsgFlags(u8); 869 870 bitflags! { 871 impl VhostUserShmemMapMsgFlags: u8 { 872 /// Empty permission. 873 const EMPTY = 0x0; 874 /// Read permission. 875 const MAP_R = 0x1; 876 /// Write permission. 877 const MAP_W = 0x2; 878 } 879 } 880 881 impl From<Protection> for VhostUserShmemMapMsgFlags { from(prot: Protection) -> Self882 fn from(prot: Protection) -> Self { 883 let mut flags = Self::EMPTY; 884 flags.set(Self::MAP_R, prot.allows(&Protection::read())); 885 flags.set(Self::MAP_W, prot.allows(&Protection::write())); 886 flags 887 } 888 } 889 890 impl From<VhostUserShmemMapMsgFlags> for Protection { from(flags: VhostUserShmemMapMsgFlags) -> Self891 fn from(flags: VhostUserShmemMapMsgFlags) -> Self { 892 let mut prot = Protection::default(); 893 if flags.contains(VhostUserShmemMapMsgFlags::MAP_R) { 894 prot = prot.set_read(); 895 } 896 if flags.contains(VhostUserShmemMapMsgFlags::MAP_W) { 897 prot = prot.set_write(); 898 } 899 prot 900 } 901 } 902 903 /// Backend request message to map a file into a shared memory region. 904 #[repr(C, packed)] 905 #[derive(Default, Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)] 906 pub struct VhostUserShmemMapMsg { 907 /// Flags for the mmap operation 908 pub flags: VhostUserShmemMapMsgFlags, 909 /// Shared memory region id. 910 pub shmid: u8, 911 padding: [u8; 6], 912 /// Offset into the shared memory region. 913 pub shm_offset: u64, 914 /// File offset. 915 pub fd_offset: u64, 916 /// Size of region to map. 917 pub len: u64, 918 } 919 920 impl VhostUserMsgValidator for VhostUserShmemMapMsg { is_valid(&self) -> bool921 fn is_valid(&self) -> bool { 922 (self.flags.bits() & !VhostUserShmemMapMsgFlags::all().bits()) == 0 923 && self.fd_offset.checked_add(self.len).is_some() 924 && self.shm_offset.checked_add(self.len).is_some() 925 } 926 } 927 928 impl VhostUserShmemMapMsg { 929 /// New instance of VhostUserShmemMapMsg struct new( shmid: u8, shm_offset: u64, fd_offset: u64, len: u64, flags: VhostUserShmemMapMsgFlags, ) -> Self930 pub fn new( 931 shmid: u8, 932 shm_offset: u64, 933 fd_offset: u64, 934 len: u64, 935 flags: VhostUserShmemMapMsgFlags, 936 ) -> Self { 937 Self { 938 flags, 939 shmid, 940 padding: [0; 6], 941 shm_offset, 942 fd_offset, 943 len, 944 } 945 } 946 } 947 948 /// Backend request message to map GPU memory into a shared memory region. 949 #[repr(C, packed)] 950 #[derive(Default, Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)] 951 pub struct VhostUserGpuMapMsg { 952 /// Shared memory region id. 953 pub shmid: u8, 954 padding: [u8; 7], 955 /// Offset into the shared memory region. 956 pub shm_offset: u64, 957 /// Size of region to map. 958 pub len: u64, 959 /// Index of the memory type. 960 pub memory_idx: u32, 961 /// Type of share handle. 962 pub handle_type: u32, 963 /// Device UUID 964 pub device_uuid: [u8; 16], 965 /// Driver UUID 966 pub driver_uuid: [u8; 16], 967 } 968 969 impl VhostUserMsgValidator for VhostUserGpuMapMsg { is_valid(&self) -> bool970 fn is_valid(&self) -> bool { 971 self.len > 0 972 } 973 } 974 975 impl VhostUserGpuMapMsg { 976 /// New instance of VhostUserGpuMapMsg struct new( shmid: u8, shm_offset: u64, len: u64, memory_idx: u32, handle_type: u32, device_uuid: [u8; 16], driver_uuid: [u8; 16], ) -> Self977 pub fn new( 978 shmid: u8, 979 shm_offset: u64, 980 len: u64, 981 memory_idx: u32, 982 handle_type: u32, 983 device_uuid: [u8; 16], 984 driver_uuid: [u8; 16], 985 ) -> Self { 986 Self { 987 shmid, 988 padding: [0; 7], 989 shm_offset, 990 len, 991 memory_idx, 992 handle_type, 993 device_uuid, 994 driver_uuid, 995 } 996 } 997 } 998 999 /// Backend request message to map external memory into a shared memory region. 1000 #[repr(C, packed)] 1001 #[derive(Default, Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)] 1002 pub struct VhostUserExternalMapMsg { 1003 /// Shared memory region id. 1004 pub shmid: u8, 1005 padding: [u8; 7], 1006 /// Offset into the shared memory region. 1007 pub shm_offset: u64, 1008 /// Size of region to map. 1009 pub len: u64, 1010 /// Pointer to the memory. 1011 pub ptr: u64, 1012 } 1013 1014 impl VhostUserMsgValidator for VhostUserExternalMapMsg { is_valid(&self) -> bool1015 fn is_valid(&self) -> bool { 1016 self.len > 0 1017 } 1018 } 1019 1020 impl VhostUserExternalMapMsg { 1021 /// New instance of VhostUserExternalMapMsg struct new(shmid: u8, shm_offset: u64, len: u64, ptr: u64) -> Self1022 pub fn new(shmid: u8, shm_offset: u64, len: u64, ptr: u64) -> Self { 1023 Self { 1024 shmid, 1025 padding: [0; 7], 1026 shm_offset, 1027 len, 1028 ptr, 1029 } 1030 } 1031 } 1032 1033 /// Backend request message to unmap part of a shared memory region. 1034 #[repr(C, packed)] 1035 #[derive(Default, Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)] 1036 pub struct VhostUserShmemUnmapMsg { 1037 /// Shared memory region id. 1038 pub shmid: u8, 1039 padding: [u8; 7], 1040 /// Offset into the shared memory region. 1041 pub shm_offset: u64, 1042 /// Size of region to unmap. 1043 pub len: u64, 1044 } 1045 1046 impl VhostUserMsgValidator for VhostUserShmemUnmapMsg { is_valid(&self) -> bool1047 fn is_valid(&self) -> bool { 1048 self.shm_offset.checked_add(self.len).is_some() 1049 } 1050 } 1051 1052 impl VhostUserShmemUnmapMsg { 1053 /// New instance of VhostUserShmemUnmapMsg struct new(shmid: u8, shm_offset: u64, len: u64) -> Self1054 pub fn new(shmid: u8, shm_offset: u64, len: u64) -> Self { 1055 Self { 1056 shmid, 1057 padding: [0; 7], 1058 shm_offset, 1059 len, 1060 } 1061 } 1062 } 1063 1064 /// Inflight I/O descriptor state for split virtqueues 1065 #[repr(C, packed)] 1066 #[derive(Clone, Copy, Default)] 1067 pub struct DescStateSplit { 1068 /// Indicate whether this descriptor (only head) is inflight or not. 1069 pub inflight: u8, 1070 /// Padding 1071 padding: [u8; 5], 1072 /// List of last batch of used descriptors, only when batching is used for submitting 1073 pub next: u16, 1074 /// Preserve order of fetching available descriptors, only for head descriptor 1075 pub counter: u64, 1076 } 1077 1078 impl DescStateSplit { 1079 /// New instance of DescStateSplit struct new() -> Self1080 pub fn new() -> Self { 1081 Self::default() 1082 } 1083 } 1084 1085 /// Inflight I/O queue region for split virtqueues 1086 #[repr(C, packed)] 1087 pub struct QueueRegionSplit { 1088 /// Features flags of this region 1089 pub features: u64, 1090 /// Version of this region 1091 pub version: u16, 1092 /// Number of DescStateSplit entries 1093 pub desc_num: u16, 1094 /// List to track last batch of used descriptors 1095 pub last_batch_head: u16, 1096 /// Idx value of used ring 1097 pub used_idx: u16, 1098 /// Pointer to an array of DescStateSplit entries 1099 pub desc: u64, 1100 } 1101 1102 impl QueueRegionSplit { 1103 /// New instance of QueueRegionSplit struct new(features: u64, queue_size: u16) -> Self1104 pub fn new(features: u64, queue_size: u16) -> Self { 1105 QueueRegionSplit { 1106 features, 1107 version: 1, 1108 desc_num: queue_size, 1109 last_batch_head: 0, 1110 used_idx: 0, 1111 desc: 0, 1112 } 1113 } 1114 } 1115 1116 /// Inflight I/O descriptor state for packed virtqueues 1117 #[repr(C, packed)] 1118 #[derive(Clone, Copy, Default)] 1119 pub struct DescStatePacked { 1120 /// Indicate whether this descriptor (only head) is inflight or not. 1121 pub inflight: u8, 1122 /// Padding 1123 padding: u8, 1124 /// Link to next free entry 1125 pub next: u16, 1126 /// Link to last entry of descriptor list, only for head 1127 pub last: u16, 1128 /// Length of descriptor list, only for head 1129 pub num: u16, 1130 /// Preserve order of fetching avail descriptors, only for head 1131 pub counter: u64, 1132 /// Buffer ID 1133 pub id: u16, 1134 /// Descriptor flags 1135 pub flags: u16, 1136 /// Buffer length 1137 pub len: u32, 1138 /// Buffer address 1139 pub addr: u64, 1140 } 1141 1142 impl DescStatePacked { 1143 /// New instance of DescStatePacked struct new() -> Self1144 pub fn new() -> Self { 1145 Self::default() 1146 } 1147 } 1148 1149 /// Inflight I/O queue region for packed virtqueues 1150 #[repr(C, packed)] 1151 pub struct QueueRegionPacked { 1152 /// Features flags of this region 1153 pub features: u64, 1154 /// version of this region 1155 pub version: u16, 1156 /// size of descriptor state array 1157 pub desc_num: u16, 1158 /// head of free DescStatePacked entry list 1159 pub free_head: u16, 1160 /// old head of free DescStatePacked entry list 1161 pub old_free_head: u16, 1162 /// used idx of descriptor ring 1163 pub used_idx: u16, 1164 /// old used idx of descriptor ring 1165 pub old_used_idx: u16, 1166 /// device ring wrap counter 1167 pub used_wrap_counter: u8, 1168 /// old device ring wrap counter 1169 pub old_used_wrap_counter: u8, 1170 /// Padding 1171 padding: [u8; 7], 1172 /// Pointer to array tracking state of each descriptor from descriptor ring 1173 pub desc: u64, 1174 } 1175 1176 impl QueueRegionPacked { 1177 /// New instance of QueueRegionPacked struct new(features: u64, queue_size: u16) -> Self1178 pub fn new(features: u64, queue_size: u16) -> Self { 1179 QueueRegionPacked { 1180 features, 1181 version: 1, 1182 desc_num: queue_size, 1183 free_head: 0, 1184 old_free_head: 0, 1185 used_idx: 0, 1186 old_used_idx: 0, 1187 used_wrap_counter: 0, 1188 old_used_wrap_counter: 0, 1189 padding: [0; 7], 1190 desc: 0, 1191 } 1192 } 1193 } 1194 1195 /// Virtio shared memory descriptor. 1196 #[repr(C, packed)] 1197 #[derive(Default, Copy, Clone, FromBytes, Immutable, IntoBytes, KnownLayout)] 1198 pub struct VhostSharedMemoryRegion { 1199 /// The shared memory region's shmid. 1200 pub id: u8, 1201 /// Padding 1202 padding: [u8; 7], 1203 /// The length of the shared memory region. 1204 pub length: u64, 1205 } 1206 1207 impl VhostSharedMemoryRegion { 1208 /// New instance of VhostSharedMemoryRegion struct new(id: u8, length: u64) -> Self1209 pub fn new(id: u8, length: u64) -> Self { 1210 VhostSharedMemoryRegion { 1211 id, 1212 padding: [0; 7], 1213 length, 1214 } 1215 } 1216 } 1217 1218 #[derive(Debug, PartialEq, Eq)] 1219 pub enum VhostUserTransferDirection { 1220 Save, 1221 Load, 1222 } 1223 1224 #[derive(Debug, PartialEq, Eq)] 1225 pub enum VhostUserMigrationPhase { 1226 Stopped, 1227 } 1228 1229 #[cfg(test)] 1230 mod tests { 1231 use super::*; 1232 1233 #[test] check_frontend_request_code()1234 fn check_frontend_request_code() { 1235 FrontendReq::try_from(0).expect_err("invalid value"); 1236 FrontendReq::try_from(46).expect_err("invalid value"); 1237 FrontendReq::try_from(10000).expect_err("invalid value"); 1238 1239 let code = FrontendReq::try_from(FrontendReq::GET_FEATURES as u32).unwrap(); 1240 assert_eq!(code, code.clone()); 1241 } 1242 1243 #[test] check_backend_request_code()1244 fn check_backend_request_code() { 1245 BackendReq::try_from(0).expect_err("invalid value"); 1246 BackendReq::try_from(14).expect_err("invalid value"); 1247 BackendReq::try_from(10000).expect_err("invalid value"); 1248 1249 let code = BackendReq::try_from(BackendReq::CONFIG_CHANGE_MSG as u32).unwrap(); 1250 assert_eq!(code, code.clone()); 1251 } 1252 1253 #[test] msg_header_ops()1254 fn msg_header_ops() { 1255 let mut hdr = VhostUserMsgHeader::new(FrontendReq::GET_FEATURES, 0, 0x100); 1256 assert_eq!(hdr.get_code(), Ok(FrontendReq::GET_FEATURES)); 1257 hdr.set_code(FrontendReq::SET_FEATURES); 1258 assert_eq!(hdr.get_code(), Ok(FrontendReq::SET_FEATURES)); 1259 1260 assert_eq!(hdr.get_version(), 0x1); 1261 1262 assert!(!hdr.is_reply()); 1263 hdr.set_reply(true); 1264 assert!(hdr.is_reply()); 1265 hdr.set_reply(false); 1266 1267 assert!(!hdr.is_need_reply()); 1268 hdr.set_need_reply(true); 1269 assert!(hdr.is_need_reply()); 1270 hdr.set_need_reply(false); 1271 1272 assert_eq!(hdr.get_size(), 0x100); 1273 hdr.set_size(0x200); 1274 assert_eq!(hdr.get_size(), 0x200); 1275 1276 assert!(!hdr.is_need_reply()); 1277 assert!(!hdr.is_reply()); 1278 assert_eq!(hdr.get_version(), 0x1); 1279 1280 // Check version 1281 hdr.set_version(0x0); 1282 assert!(!hdr.is_valid()); 1283 hdr.set_version(0x2); 1284 assert!(!hdr.is_valid()); 1285 hdr.set_version(0x1); 1286 assert!(hdr.is_valid()); 1287 1288 // Test Debug, Clone, PartiaEq trait 1289 assert_eq!(hdr, hdr.clone()); 1290 assert_eq!(hdr.clone().get_code(), hdr.get_code()); 1291 assert_eq!(format!("{:?}", hdr.clone()), format!("{:?}", hdr)); 1292 } 1293 1294 #[test] test_vhost_user_message_u64()1295 fn test_vhost_user_message_u64() { 1296 let val = VhostUserU64::default(); 1297 let val1 = VhostUserU64::new(0); 1298 1299 let a = val.value; 1300 let b = val1.value; 1301 assert_eq!(a, b); 1302 let a = VhostUserU64::new(1).value; 1303 assert_eq!(a, 1); 1304 } 1305 1306 #[test] check_user_memory()1307 fn check_user_memory() { 1308 let mut msg = VhostUserMemory::new(1); 1309 assert!(msg.is_valid()); 1310 msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32; 1311 assert!(msg.is_valid()); 1312 1313 msg.num_regions += 1; 1314 assert!(!msg.is_valid()); 1315 msg.num_regions = 0xFFFFFFFF; 1316 assert!(!msg.is_valid()); 1317 msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32; 1318 msg.padding1 = 1; 1319 assert!(!msg.is_valid()); 1320 } 1321 1322 #[test] check_user_memory_region()1323 fn check_user_memory_region() { 1324 let mut msg = VhostUserMemoryRegion { 1325 guest_phys_addr: 0, 1326 memory_size: 0x1000, 1327 user_addr: 0, 1328 mmap_offset: 0, 1329 }; 1330 assert!(msg.is_valid()); 1331 msg.guest_phys_addr = 0xFFFFFFFFFFFFEFFF; 1332 assert!(msg.is_valid()); 1333 msg.guest_phys_addr = 0xFFFFFFFFFFFFF000; 1334 assert!(!msg.is_valid()); 1335 msg.guest_phys_addr = 0xFFFFFFFFFFFF0000; 1336 msg.memory_size = 0; 1337 assert!(!msg.is_valid()); 1338 let a = msg.guest_phys_addr; 1339 let b = msg.guest_phys_addr; 1340 assert_eq!(a, b); 1341 1342 let msg = VhostUserMemoryRegion::default(); 1343 let a = msg.guest_phys_addr; 1344 assert_eq!(a, 0); 1345 let a = msg.memory_size; 1346 assert_eq!(a, 0); 1347 let a = msg.user_addr; 1348 assert_eq!(a, 0); 1349 let a = msg.mmap_offset; 1350 assert_eq!(a, 0); 1351 } 1352 1353 #[test] test_vhost_user_state()1354 fn test_vhost_user_state() { 1355 let state = VhostUserVringState::new(5, 8); 1356 1357 let a = state.index; 1358 assert_eq!(a, 5); 1359 let a = state.num; 1360 assert_eq!(a, 8); 1361 assert!(state.is_valid()); 1362 1363 let state = VhostUserVringState::default(); 1364 let a = state.index; 1365 assert_eq!(a, 0); 1366 let a = state.num; 1367 assert_eq!(a, 0); 1368 assert!(state.is_valid()); 1369 } 1370 1371 #[test] test_vhost_user_addr()1372 fn test_vhost_user_addr() { 1373 let mut addr = VhostUserVringAddr::new( 1374 2, 1375 VhostUserVringAddrFlags::VHOST_VRING_F_LOG, 1376 0x1000, 1377 0x2000, 1378 0x3000, 1379 0x4000, 1380 ); 1381 1382 let a = addr.index; 1383 assert_eq!(a, 2); 1384 let a = addr.flags; 1385 assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits()); 1386 let a = addr.descriptor; 1387 assert_eq!(a, 0x1000); 1388 let a = addr.used; 1389 assert_eq!(a, 0x2000); 1390 let a = addr.available; 1391 assert_eq!(a, 0x3000); 1392 let a = addr.log; 1393 assert_eq!(a, 0x4000); 1394 assert!(addr.is_valid()); 1395 1396 addr.descriptor = 0x1001; 1397 assert!(!addr.is_valid()); 1398 addr.descriptor = 0x1000; 1399 1400 addr.available = 0x3001; 1401 assert!(!addr.is_valid()); 1402 addr.available = 0x3000; 1403 1404 addr.used = 0x2001; 1405 assert!(!addr.is_valid()); 1406 addr.used = 0x2000; 1407 assert!(addr.is_valid()); 1408 } 1409 1410 #[test] test_vhost_user_state_from_config()1411 fn test_vhost_user_state_from_config() { 1412 let config = VringConfigData { 1413 queue_size: 128, 1414 flags: VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits(), 1415 desc_table_addr: 0x1000, 1416 used_ring_addr: 0x2000, 1417 avail_ring_addr: 0x3000, 1418 log_addr: Some(0x4000), 1419 }; 1420 let addr = VhostUserVringAddr::from_config_data(2, &config); 1421 1422 let a = addr.index; 1423 assert_eq!(a, 2); 1424 let a = addr.flags; 1425 assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits()); 1426 let a = addr.descriptor; 1427 assert_eq!(a, 0x1000); 1428 let a = addr.used; 1429 assert_eq!(a, 0x2000); 1430 let a = addr.available; 1431 assert_eq!(a, 0x3000); 1432 let a = addr.log; 1433 assert_eq!(a, 0x4000); 1434 assert!(addr.is_valid()); 1435 } 1436 1437 #[test] check_user_vring_addr()1438 fn check_user_vring_addr() { 1439 let mut msg = 1440 VhostUserVringAddr::new(0, VhostUserVringAddrFlags::all(), 0x0, 0x0, 0x0, 0x0); 1441 assert!(msg.is_valid()); 1442 1443 msg.descriptor = 1; 1444 assert!(!msg.is_valid()); 1445 msg.descriptor = 0; 1446 1447 msg.available = 1; 1448 assert!(!msg.is_valid()); 1449 msg.available = 0; 1450 1451 msg.used = 1; 1452 assert!(!msg.is_valid()); 1453 msg.used = 0; 1454 1455 msg.flags |= 0x80000000; 1456 assert!(!msg.is_valid()); 1457 msg.flags &= !0x80000000; 1458 } 1459 1460 #[test] check_user_config_msg()1461 fn check_user_config_msg() { 1462 let mut msg = 1463 VhostUserConfig::new(0, VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE); 1464 1465 assert!(msg.is_valid()); 1466 msg.size = 0; 1467 assert!(!msg.is_valid()); 1468 msg.size = 1; 1469 assert!(msg.is_valid()); 1470 msg.offset = u32::MAX; 1471 assert!(!msg.is_valid()); 1472 msg.offset = VHOST_USER_CONFIG_SIZE; 1473 assert!(!msg.is_valid()); 1474 msg.offset = VHOST_USER_CONFIG_SIZE - 1; 1475 assert!(msg.is_valid()); 1476 msg.size = 2; 1477 assert!(!msg.is_valid()); 1478 msg.size = 1; 1479 msg.flags |= VhostUserConfigFlags::LIVE_MIGRATION.bits(); 1480 assert!(msg.is_valid()); 1481 msg.flags |= 0x4; 1482 assert!(!msg.is_valid()); 1483 } 1484 } 1485