1 // Copyright (C) 2019 Alibaba Cloud Computing. All rights reserved. 2 // SPDX-License-Identifier: Apache-2.0 3 4 //! Define communication messages for the vhost-user protocol. 5 //! 6 //! For message definition, please refer to the [vhost-user spec](https://github.com/qemu/qemu/blob/f7526eece29cd2e36a63b6703508b24453095eb8/docs/interop/vhost-user.txt). 7 8 #![allow(dead_code)] 9 #![allow(non_camel_case_types)] 10 #![allow(clippy::upper_case_acronyms)] 11 12 use std::convert::TryInto; 13 use std::fmt::Debug; 14 use std::marker::PhantomData; 15 16 use base::Protection; 17 use bitflags::bitflags; 18 use data_model::DataInit; 19 use zerocopy::AsBytes; 20 use zerocopy::FromBytes; 21 22 use crate::VringConfigData; 23 24 /// The vhost-user specification uses a field of u32 to store message length. 25 /// On the other hand, preallocated buffers are needed to receive messages from the Unix domain 26 /// socket. To preallocating a 4GB buffer for each vhost-user message is really just an overhead. 27 /// Among all defined vhost-user messages, only the VhostUserConfig and VhostUserMemory has variable 28 /// message size. For the VhostUserConfig, a maximum size of 4K is enough because the user 29 /// configuration space for virtio devices is (4K - 0x100) bytes at most. For the VhostUserMemory, 30 /// 4K should be enough too because it can support 255 memory regions at most. 31 pub const MAX_MSG_SIZE: usize = 0x1000; 32 33 /// The VhostUserMemory message has variable message size and variable number of attached file 34 /// descriptors. Each user memory region entry in the message payload occupies 32 bytes, 35 /// so setting maximum number of attached file descriptors based on the maximum message size. 36 /// But rust only implements Default and AsMut traits for arrays with 0 - 32 entries, so further 37 /// reduce the maximum number... 38 // pub const MAX_ATTACHED_FD_ENTRIES: usize = (MAX_MSG_SIZE - 8) / 32; 39 pub const MAX_ATTACHED_FD_ENTRIES: usize = 32; 40 41 /// Starting position (inclusion) of the device configuration space in virtio devices. 42 pub const VHOST_USER_CONFIG_OFFSET: u32 = 0x100; 43 44 /// Ending position (exclusion) of the device configuration space in virtio devices. 45 pub const VHOST_USER_CONFIG_SIZE: u32 = 0x1000; 46 47 /// Maximum number of vrings supported. 48 pub const VHOST_USER_MAX_VRINGS: u64 = 0x8000u64; 49 50 /// Used for the payload in Vhost Master messages. 51 pub trait Req: 52 Clone + Copy + Debug + PartialEq + Eq + PartialOrd + Ord + Into<u32> + Send + Sync 53 { 54 /// Is the entity valid. is_valid(&self) -> bool55 fn is_valid(&self) -> bool; 56 } 57 58 /// Type of requests sending from masters to slaves. 59 #[repr(u32)] 60 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] 61 pub enum MasterReq { 62 /// Null operation. 63 NOOP = 0, 64 /// Get from the underlying vhost implementation the features bit mask. 65 GET_FEATURES = 1, 66 /// Enable features in the underlying vhost implementation using a bit mask. 67 SET_FEATURES = 2, 68 /// Set the current Master as an owner of the session. 69 SET_OWNER = 3, 70 /// No longer used. 71 RESET_OWNER = 4, 72 /// Set the memory map regions on the slave so it can translate the vring addresses. 73 SET_MEM_TABLE = 5, 74 /// Set logging shared memory space. 75 SET_LOG_BASE = 6, 76 /// Set the logging file descriptor, which is passed as ancillary data. 77 SET_LOG_FD = 7, 78 /// Set the size of the queue. 79 SET_VRING_NUM = 8, 80 /// Set the addresses of the different aspects of the vring. 81 SET_VRING_ADDR = 9, 82 /// Set the base offset in the available vring. 83 SET_VRING_BASE = 10, 84 /// Get the available vring base offset. 85 GET_VRING_BASE = 11, 86 /// Set the event file descriptor for adding buffers to the vring. 87 SET_VRING_KICK = 12, 88 /// Set the event file descriptor to signal when buffers are used. 89 SET_VRING_CALL = 13, 90 /// Set the event file descriptor to signal when error occurs. 91 SET_VRING_ERR = 14, 92 /// Get the protocol feature bit mask from the underlying vhost implementation. 93 GET_PROTOCOL_FEATURES = 15, 94 /// Enable protocol features in the underlying vhost implementation. 95 SET_PROTOCOL_FEATURES = 16, 96 /// Query how many queues the backend supports. 97 GET_QUEUE_NUM = 17, 98 /// Signal slave to enable or disable corresponding vring. 99 SET_VRING_ENABLE = 18, 100 /// Ask vhost user backend to broadcast a fake RARP to notify the migration is terminated 101 /// for guest that does not support GUEST_ANNOUNCE. 102 SEND_RARP = 19, 103 /// Set host MTU value exposed to the guest. 104 NET_SET_MTU = 20, 105 /// Set the socket file descriptor for slave initiated requests. 106 SET_SLAVE_REQ_FD = 21, 107 /// Send IOTLB messages with struct vhost_iotlb_msg as payload. 108 IOTLB_MSG = 22, 109 /// Set the endianness of a VQ for legacy devices. 110 SET_VRING_ENDIAN = 23, 111 /// Fetch the contents of the virtio device configuration space. 112 GET_CONFIG = 24, 113 /// Change the contents of the virtio device configuration space. 114 SET_CONFIG = 25, 115 /// Create a session for crypto operation. 116 CREATE_CRYPTO_SESSION = 26, 117 /// Close a session for crypto operation. 118 CLOSE_CRYPTO_SESSION = 27, 119 /// Advise slave that a migration with postcopy enabled is underway. 120 POSTCOPY_ADVISE = 28, 121 /// Advise slave that a transition to postcopy mode has happened. 122 POSTCOPY_LISTEN = 29, 123 /// Advise that postcopy migration has now completed. 124 POSTCOPY_END = 30, 125 /// Get a shared buffer from slave. 126 GET_INFLIGHT_FD = 31, 127 /// Send the shared inflight buffer back to slave. 128 SET_INFLIGHT_FD = 32, 129 /// Sets the GPU protocol socket file descriptor. 130 GPU_SET_SOCKET = 33, 131 /// Ask the vhost user backend to disable all rings and reset all internal 132 /// device state to the initial state. 133 RESET_DEVICE = 34, 134 /// Indicate that a buffer was added to the vring instead of signalling it 135 /// using the vring’s kick file descriptor. 136 VRING_KICK = 35, 137 /// Return a u64 payload containing the maximum number of memory slots. 138 GET_MAX_MEM_SLOTS = 36, 139 /// Update the memory tables by adding the region described. 140 ADD_MEM_REG = 37, 141 /// Update the memory tables by removing the region described. 142 REM_MEM_REG = 38, 143 /// Notify the backend with updated device status as defined in the VIRTIO 144 /// specification. 145 SET_STATUS = 39, 146 /// Query the backend for its device status as defined in the VIRTIO 147 /// specification. 148 GET_STATUS = 40, 149 /// Get a list of the device's shared memory regions. 150 GET_SHARED_MEMORY_REGIONS = 41, 151 /// Upper bound of valid commands. 152 MAX_CMD = 42, 153 } 154 155 impl From<MasterReq> for u32 { from(req: MasterReq) -> u32156 fn from(req: MasterReq) -> u32 { 157 req as u32 158 } 159 } 160 161 impl Req for MasterReq { is_valid(&self) -> bool162 fn is_valid(&self) -> bool { 163 (*self > MasterReq::NOOP) && (*self < MasterReq::MAX_CMD) 164 } 165 } 166 167 /// Type of requests sending from slaves to masters. 168 #[repr(u32)] 169 #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] 170 pub enum SlaveReq { 171 /// Null operation. 172 NOOP = 0, 173 /// Send IOTLB messages with struct vhost_iotlb_msg as payload. 174 IOTLB_MSG = 1, 175 /// Notify that the virtio device's configuration space has changed. 176 CONFIG_CHANGE_MSG = 2, 177 /// Set host notifier for a specified queue. 178 VRING_HOST_NOTIFIER_MSG = 3, 179 /// Indicate that a buffer was used from the vring. 180 VRING_CALL = 4, 181 /// Indicate that an error occurred on the specific vring. 182 VRING_ERR = 5, 183 /// Indicates a request to map a fd into a shared memory region. 184 SHMEM_MAP = 6, 185 /// Indicates a request to unmap part of a shared memory region. 186 SHMEM_UNMAP = 7, 187 /// Virtio-fs draft: map file content into the window. 188 FS_MAP = 8, 189 /// Virtio-fs draft: unmap file content from the window. 190 FS_UNMAP = 9, 191 /// Virtio-fs draft: sync file content. 192 FS_SYNC = 10, 193 /// Virtio-fs draft: perform a read/write from an fd directly to GPA. 194 FS_IO = 11, 195 /// Indicates a request to map GPU memory into a shared memory region. 196 GPU_MAP = 12, 197 /// Upper bound of valid commands. 198 MAX_CMD = 13, 199 } 200 201 impl From<SlaveReq> for u32 { from(req: SlaveReq) -> u32202 fn from(req: SlaveReq) -> u32 { 203 req as u32 204 } 205 } 206 207 impl Req for SlaveReq { is_valid(&self) -> bool208 fn is_valid(&self) -> bool { 209 (*self > SlaveReq::NOOP) && (*self < SlaveReq::MAX_CMD) 210 } 211 } 212 213 /// Vhost message Validator. 214 pub trait VhostUserMsgValidator { 215 /// Validate message syntax only. 216 /// It doesn't validate message semantics such as protocol version number and dependency 217 /// on feature flags etc. is_valid(&self) -> bool218 fn is_valid(&self) -> bool { 219 true 220 } 221 } 222 223 // Bit mask for common message flags. 224 bitflags! { 225 /// Common message flags for vhost-user requests and replies. 226 pub struct VhostUserHeaderFlag: u32 { 227 /// Bits[0..2] is message version number. 228 const VERSION = 0x3; 229 /// Mark message as reply. 230 const REPLY = 0x4; 231 /// Sender anticipates a reply message from the peer. 232 const NEED_REPLY = 0x8; 233 /// All valid bits. 234 const ALL_FLAGS = 0xc; 235 /// All reserved bits. 236 const RESERVED_BITS = !0xf; 237 } 238 } 239 240 /// Common message header for vhost-user requests and replies. 241 /// A vhost-user message consists of 3 header fields and an optional payload. All numbers are in the 242 /// machine native byte order. 243 #[repr(packed)] 244 #[derive(Copy, FromBytes)] 245 pub struct VhostUserMsgHeader<R: Req> { 246 request: u32, 247 flags: u32, 248 size: u32, 249 _r: PhantomData<R>, 250 } 251 // Safe because it only has data and has no implicit padding. 252 unsafe impl<R: Req> DataInit for VhostUserMsgHeader<R> {} 253 254 impl<R: Req> Debug for VhostUserMsgHeader<R> { fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result255 fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { 256 f.debug_struct("Point") 257 .field("request", &{ self.request }) 258 .field("flags", &{ self.flags }) 259 .field("size", &{ self.size }) 260 .finish() 261 } 262 } 263 264 impl<R: Req> Clone for VhostUserMsgHeader<R> { clone(&self) -> VhostUserMsgHeader<R>265 fn clone(&self) -> VhostUserMsgHeader<R> { 266 *self 267 } 268 } 269 270 impl<R: Req> PartialEq for VhostUserMsgHeader<R> { eq(&self, other: &Self) -> bool271 fn eq(&self, other: &Self) -> bool { 272 self.request == other.request && self.flags == other.flags && self.size == other.size 273 } 274 } 275 276 impl<R: Req> VhostUserMsgHeader<R> { 277 /// Create a new instance of `VhostUserMsgHeader`. new(request: R, flags: u32, size: u32) -> Self278 pub fn new(request: R, flags: u32, size: u32) -> Self { 279 // Default to protocol version 1 280 let fl = (flags & VhostUserHeaderFlag::ALL_FLAGS.bits()) | 0x1; 281 VhostUserMsgHeader { 282 request: request.into(), 283 flags: fl, 284 size, 285 _r: PhantomData, 286 } 287 } 288 289 /// Get message type. get_code(&self) -> R290 pub fn get_code(&self) -> R { 291 // It's safe because R is marked as repr(u32). 292 unsafe { std::mem::transmute_copy::<u32, R>(&{ self.request }) } 293 } 294 295 /// Set message type. set_code(&mut self, request: R)296 pub fn set_code(&mut self, request: R) { 297 self.request = request.into(); 298 } 299 300 /// Get message version number. get_version(&self) -> u32301 pub fn get_version(&self) -> u32 { 302 self.flags & 0x3 303 } 304 305 /// Set message version number. set_version(&mut self, ver: u32)306 pub fn set_version(&mut self, ver: u32) { 307 self.flags &= !0x3; 308 self.flags |= ver & 0x3; 309 } 310 311 /// Check whether it's a reply message. is_reply(&self) -> bool312 pub fn is_reply(&self) -> bool { 313 (self.flags & VhostUserHeaderFlag::REPLY.bits()) != 0 314 } 315 316 /// Mark message as reply. set_reply(&mut self, is_reply: bool)317 pub fn set_reply(&mut self, is_reply: bool) { 318 if is_reply { 319 self.flags |= VhostUserHeaderFlag::REPLY.bits(); 320 } else { 321 self.flags &= !VhostUserHeaderFlag::REPLY.bits(); 322 } 323 } 324 325 /// Check whether reply for this message is requested. is_need_reply(&self) -> bool326 pub fn is_need_reply(&self) -> bool { 327 (self.flags & VhostUserHeaderFlag::NEED_REPLY.bits()) != 0 328 } 329 330 /// Mark that reply for this message is needed. set_need_reply(&mut self, need_reply: bool)331 pub fn set_need_reply(&mut self, need_reply: bool) { 332 if need_reply { 333 self.flags |= VhostUserHeaderFlag::NEED_REPLY.bits(); 334 } else { 335 self.flags &= !VhostUserHeaderFlag::NEED_REPLY.bits(); 336 } 337 } 338 339 /// Check whether it's the reply message for the request `req`. is_reply_for(&self, req: &VhostUserMsgHeader<R>) -> bool340 pub fn is_reply_for(&self, req: &VhostUserMsgHeader<R>) -> bool { 341 self.is_reply() && !req.is_reply() && self.get_code() == req.get_code() 342 } 343 344 /// Get message size. get_size(&self) -> u32345 pub fn get_size(&self) -> u32 { 346 self.size 347 } 348 349 /// Set message size. set_size(&mut self, size: u32)350 pub fn set_size(&mut self, size: u32) { 351 self.size = size; 352 } 353 } 354 355 impl<R: Req> Default for VhostUserMsgHeader<R> { default() -> Self356 fn default() -> Self { 357 VhostUserMsgHeader { 358 request: 0, 359 flags: 0x1, 360 size: 0, 361 _r: PhantomData, 362 } 363 } 364 } 365 366 impl From<[u8; 12]> for VhostUserMsgHeader<MasterReq> { from(buf: [u8; 12]) -> Self367 fn from(buf: [u8; 12]) -> Self { 368 // Convert 4-length slice into [u8; 4]. This must succeed. 369 let req = u32::from_le_bytes(buf[0..4].try_into().unwrap()); 370 // Safe because `MasterReq` is defined with `#[repr(u32)]`. 371 let req = unsafe { std::mem::transmute_copy::<u32, MasterReq>(&req) }; 372 373 let flags = u32::from_le_bytes(buf[4..8].try_into().unwrap()); 374 let size = u32::from_le_bytes(buf[8..12].try_into().unwrap()); 375 Self::new(req, flags, size) 376 } 377 } 378 379 impl<T: Req> VhostUserMsgValidator for VhostUserMsgHeader<T> { 380 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool381 fn is_valid(&self) -> bool { 382 if !self.get_code().is_valid() { 383 return false; 384 } else if self.size as usize > MAX_MSG_SIZE { 385 return false; 386 } else if self.get_version() != 0x1 { 387 return false; 388 } else if (self.flags & VhostUserHeaderFlag::RESERVED_BITS.bits()) != 0 { 389 return false; 390 } 391 true 392 } 393 } 394 395 // Bit mask for transport specific flags in VirtIO feature set defined by vhost-user. 396 bitflags! { 397 /// Transport specific flags in VirtIO feature set defined by vhost-user. 398 pub struct VhostUserVirtioFeatures: u64 { 399 /// Feature flag for the protocol feature. 400 const PROTOCOL_FEATURES = 0x4000_0000; 401 } 402 } 403 404 // Bit mask for vhost-user protocol feature flags. 405 bitflags! { 406 /// Vhost-user protocol feature flags. 407 pub struct VhostUserProtocolFeatures: u64 { 408 /// Support multiple queues. 409 const MQ = 0x0000_0001; 410 /// Support logging through shared memory fd. 411 const LOG_SHMFD = 0x0000_0002; 412 /// Support broadcasting fake RARP packet. 413 const RARP = 0x0000_0004; 414 /// Support sending reply messages for requests with NEED_REPLY flag set. 415 const REPLY_ACK = 0x0000_0008; 416 /// Support setting MTU for virtio-net devices. 417 const MTU = 0x0000_0010; 418 /// Allow the slave to send requests to the master by an optional communication channel. 419 const SLAVE_REQ = 0x0000_0020; 420 /// Support setting slave endian by SET_VRING_ENDIAN. 421 const CROSS_ENDIAN = 0x0000_0040; 422 /// Support crypto operations. 423 const CRYPTO_SESSION = 0x0000_0080; 424 /// Support sending userfault_fd from slaves to masters. 425 const PAGEFAULT = 0x0000_0100; 426 /// Support Virtio device configuration. 427 const CONFIG = 0x0000_0200; 428 /// Allow the slave to send fds (at most 8 descriptors in each message) to the master. 429 const SLAVE_SEND_FD = 0x0000_0400; 430 /// Allow the slave to register a host notifier. 431 const HOST_NOTIFIER = 0x0000_0800; 432 /// Support inflight shmfd. 433 const INFLIGHT_SHMFD = 0x0000_1000; 434 /// Support resetting the device. 435 const RESET_DEVICE = 0x0000_2000; 436 /// Support inband notifications. 437 const INBAND_NOTIFICATIONS = 0x0000_4000; 438 /// Support configuring memory slots. 439 const CONFIGURE_MEM_SLOTS = 0x0000_8000; 440 /// Support reporting status. 441 const STATUS = 0x0001_0000; 442 /// Support shared memory regions. 443 const SHARED_MEMORY_REGIONS = 0x0002_0000; 444 } 445 } 446 447 /// A generic message to encapsulate a 64-bit value. 448 #[repr(packed)] 449 #[derive(Default, Clone, Copy)] 450 pub struct VhostUserU64 { 451 /// The encapsulated 64-bit common value. 452 pub value: u64, 453 } 454 // Safe because it only has data and has no implicit padding. 455 unsafe impl DataInit for VhostUserU64 {} 456 457 impl VhostUserU64 { 458 /// Create a new instance. new(value: u64) -> Self459 pub fn new(value: u64) -> Self { 460 VhostUserU64 { value } 461 } 462 } 463 464 impl VhostUserMsgValidator for VhostUserU64 {} 465 466 /// A generic message for empty message. 467 #[derive(Default, Clone, Copy)] 468 pub struct VhostUserEmptyMessage; 469 470 // Safe because it has no data 471 unsafe impl DataInit for VhostUserEmptyMessage {} 472 473 impl VhostUserMsgValidator for VhostUserEmptyMessage {} 474 475 /// Memory region descriptor for the SET_MEM_TABLE request. 476 #[repr(packed)] 477 #[derive(Default, Clone, Copy)] 478 pub struct VhostUserMemory { 479 /// Number of memory regions in the payload. 480 pub num_regions: u32, 481 /// Padding for alignment. 482 pub padding1: u32, 483 } 484 // Safe because it only has data and has no implicit padding. 485 unsafe impl DataInit for VhostUserMemory {} 486 487 impl VhostUserMemory { 488 /// Create a new instance. new(cnt: u32) -> Self489 pub fn new(cnt: u32) -> Self { 490 VhostUserMemory { 491 num_regions: cnt, 492 padding1: 0, 493 } 494 } 495 } 496 497 impl VhostUserMsgValidator for VhostUserMemory { 498 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool499 fn is_valid(&self) -> bool { 500 if self.padding1 != 0 { 501 return false; 502 } else if self.num_regions == 0 || self.num_regions > MAX_ATTACHED_FD_ENTRIES as u32 { 503 return false; 504 } 505 true 506 } 507 } 508 509 /// Memory region descriptors as payload for the SET_MEM_TABLE request. 510 #[repr(packed)] 511 #[derive(Default, Clone, Copy)] 512 pub struct VhostUserMemoryRegion { 513 /// Guest physical address of the memory region. 514 pub guest_phys_addr: u64, 515 /// Size of the memory region. 516 pub memory_size: u64, 517 /// Virtual address in the current process. 518 pub user_addr: u64, 519 /// Offset where region starts in the mapped memory. 520 pub mmap_offset: u64, 521 } 522 // Safe because it only has data and has no implicit padding. 523 unsafe impl DataInit for VhostUserMemoryRegion {} 524 525 impl VhostUserMemoryRegion { 526 /// Create a new instance. new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self527 pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { 528 VhostUserMemoryRegion { 529 guest_phys_addr, 530 memory_size, 531 user_addr, 532 mmap_offset, 533 } 534 } 535 } 536 537 impl VhostUserMsgValidator for VhostUserMemoryRegion { is_valid(&self) -> bool538 fn is_valid(&self) -> bool { 539 if self.memory_size == 0 540 || self.guest_phys_addr.checked_add(self.memory_size).is_none() 541 || self.user_addr.checked_add(self.memory_size).is_none() 542 || self.mmap_offset.checked_add(self.memory_size).is_none() 543 { 544 return false; 545 } 546 true 547 } 548 } 549 550 /// Payload of the VhostUserMemory message. 551 pub type VhostUserMemoryPayload = Vec<VhostUserMemoryRegion>; 552 553 /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG 554 /// requests. 555 #[repr(C)] 556 #[derive(Default, Clone, Copy)] 557 pub struct VhostUserSingleMemoryRegion { 558 /// Padding for correct alignment 559 padding: u64, 560 /// Guest physical address of the memory region. 561 pub guest_phys_addr: u64, 562 /// Size of the memory region. 563 pub memory_size: u64, 564 /// Virtual address in the current process. 565 pub user_addr: u64, 566 /// Offset where region starts in the mapped memory. 567 pub mmap_offset: u64, 568 } 569 // Safe because it only has data and has no implicit padding. 570 unsafe impl DataInit for VhostUserSingleMemoryRegion {} 571 572 impl VhostUserSingleMemoryRegion { 573 /// Create a new instance. new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self574 pub fn new(guest_phys_addr: u64, memory_size: u64, user_addr: u64, mmap_offset: u64) -> Self { 575 VhostUserSingleMemoryRegion { 576 padding: 0, 577 guest_phys_addr, 578 memory_size, 579 user_addr, 580 mmap_offset, 581 } 582 } 583 } 584 585 impl VhostUserMsgValidator for VhostUserSingleMemoryRegion { is_valid(&self) -> bool586 fn is_valid(&self) -> bool { 587 if self.memory_size == 0 588 || self.guest_phys_addr.checked_add(self.memory_size).is_none() 589 || self.user_addr.checked_add(self.memory_size).is_none() 590 || self.mmap_offset.checked_add(self.memory_size).is_none() 591 { 592 return false; 593 } 594 true 595 } 596 } 597 598 /// Vring state descriptor. 599 #[repr(packed)] 600 #[derive(Default, Clone, Copy)] 601 pub struct VhostUserVringState { 602 /// Vring index. 603 pub index: u32, 604 /// A common 32bit value to encapsulate vring state etc. 605 pub num: u32, 606 } 607 608 // Safe because it only has data and has no implicit padding. 609 unsafe impl DataInit for VhostUserVringState {} 610 611 impl VhostUserVringState { 612 /// Create a new instance. new(index: u32, num: u32) -> Self613 pub fn new(index: u32, num: u32) -> Self { 614 VhostUserVringState { index, num } 615 } 616 } 617 618 impl VhostUserMsgValidator for VhostUserVringState {} 619 620 // Bit mask for vring address flags. 621 bitflags! { 622 /// Flags for vring address. 623 pub struct VhostUserVringAddrFlags: u32 { 624 /// Support log of vring operations. 625 /// Modifications to "used" vring should be logged. 626 const VHOST_VRING_F_LOG = 0x1; 627 } 628 } 629 630 /// Vring address descriptor. 631 #[repr(packed)] 632 #[derive(Default, Clone, Copy)] 633 pub struct VhostUserVringAddr { 634 /// Vring index. 635 pub index: u32, 636 /// Vring flags defined by VhostUserVringAddrFlags. 637 pub flags: u32, 638 /// Ring address of the vring descriptor table. 639 pub descriptor: u64, 640 /// Ring address of the vring used ring. 641 pub used: u64, 642 /// Ring address of the vring available ring. 643 pub available: u64, 644 /// Guest address for logging. 645 pub log: u64, 646 } 647 648 // Safe because it only has data and has no implicit padding. 649 unsafe impl DataInit for VhostUserVringAddr {} 650 651 impl VhostUserVringAddr { 652 /// Create a new instance. new( index: u32, flags: VhostUserVringAddrFlags, descriptor: u64, used: u64, available: u64, log: u64, ) -> Self653 pub fn new( 654 index: u32, 655 flags: VhostUserVringAddrFlags, 656 descriptor: u64, 657 used: u64, 658 available: u64, 659 log: u64, 660 ) -> Self { 661 VhostUserVringAddr { 662 index, 663 flags: flags.bits(), 664 descriptor, 665 used, 666 available, 667 log, 668 } 669 } 670 671 /// Create a new instance from `VringConfigData`. from_config_data(index: u32, config_data: &VringConfigData) -> Self672 pub fn from_config_data(index: u32, config_data: &VringConfigData) -> Self { 673 let log_addr = config_data.log_addr.unwrap_or(0); 674 VhostUserVringAddr { 675 index, 676 flags: config_data.flags, 677 descriptor: config_data.desc_table_addr, 678 used: config_data.used_ring_addr, 679 available: config_data.avail_ring_addr, 680 log: log_addr, 681 } 682 } 683 } 684 685 impl VhostUserMsgValidator for VhostUserVringAddr { 686 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool687 fn is_valid(&self) -> bool { 688 if (self.flags & !VhostUserVringAddrFlags::all().bits()) != 0 { 689 return false; 690 } else if self.descriptor & 0xf != 0 { 691 return false; 692 } else if self.available & 0x1 != 0 { 693 return false; 694 } else if self.used & 0x3 != 0 { 695 return false; 696 } 697 true 698 } 699 } 700 701 // Bit mask for the vhost-user device configuration message. 702 bitflags! { 703 /// Flags for the device configuration message. 704 pub struct VhostUserConfigFlags: u32 { 705 /// Vhost master messages used for writeable fields. 706 const WRITABLE = 0x1; 707 /// Vhost master messages used for live migration. 708 const LIVE_MIGRATION = 0x2; 709 } 710 } 711 712 /// Message to read/write device configuration space. 713 #[repr(packed)] 714 #[derive(Default, Clone, Copy)] 715 pub struct VhostUserConfig { 716 /// Offset of virtio device's configuration space. 717 pub offset: u32, 718 /// Configuration space access size in bytes. 719 pub size: u32, 720 /// Flags for the device configuration operation. 721 pub flags: u32, 722 } 723 // Safe because it only has data and has no implicit padding. 724 unsafe impl DataInit for VhostUserConfig {} 725 726 impl VhostUserConfig { 727 /// Create a new instance. new(offset: u32, size: u32, flags: VhostUserConfigFlags) -> Self728 pub fn new(offset: u32, size: u32, flags: VhostUserConfigFlags) -> Self { 729 VhostUserConfig { 730 offset, 731 size, 732 flags: flags.bits(), 733 } 734 } 735 } 736 737 impl VhostUserMsgValidator for VhostUserConfig { 738 #[allow(clippy::if_same_then_else)] is_valid(&self) -> bool739 fn is_valid(&self) -> bool { 740 let end_addr = match self.size.checked_add(self.offset) { 741 Some(addr) => addr, 742 None => return false, 743 }; 744 if (self.flags & !VhostUserConfigFlags::all().bits()) != 0 { 745 return false; 746 } else if self.size == 0 || end_addr > VHOST_USER_CONFIG_SIZE { 747 return false; 748 } 749 true 750 } 751 } 752 753 /// Payload for the VhostUserConfig message. 754 pub type VhostUserConfigPayload = Vec<u8>; 755 756 /// Single memory region descriptor as payload for ADD_MEM_REG and REM_MEM_REG 757 /// requests. 758 #[repr(C)] 759 #[derive(Default, Clone, Copy)] 760 pub struct VhostUserInflight { 761 /// Size of the area to track inflight I/O. 762 pub mmap_size: u64, 763 /// Offset of this area from the start of the supplied file descriptor. 764 pub mmap_offset: u64, 765 /// Number of virtqueues. 766 pub num_queues: u16, 767 /// Size of virtqueues. 768 pub queue_size: u16, 769 } 770 771 // Safe because it only has data and has no implicit padding. 772 unsafe impl DataInit for VhostUserInflight {} 773 774 impl VhostUserInflight { 775 /// Create a new instance. new(mmap_size: u64, mmap_offset: u64, num_queues: u16, queue_size: u16) -> Self776 pub fn new(mmap_size: u64, mmap_offset: u64, num_queues: u16, queue_size: u16) -> Self { 777 VhostUserInflight { 778 mmap_size, 779 mmap_offset, 780 num_queues, 781 queue_size, 782 } 783 } 784 } 785 786 impl VhostUserMsgValidator for VhostUserInflight { is_valid(&self) -> bool787 fn is_valid(&self) -> bool { 788 if self.num_queues == 0 || self.queue_size == 0 { 789 return false; 790 } 791 true 792 } 793 } 794 795 /* 796 * TODO: support dirty log, live migration and IOTLB operations. 797 #[repr(packed)] 798 pub struct VhostUserVringArea { 799 pub index: u32, 800 pub flags: u32, 801 pub size: u64, 802 pub offset: u64, 803 } 804 805 #[repr(packed)] 806 pub struct VhostUserLog { 807 pub size: u64, 808 pub offset: u64, 809 } 810 811 #[repr(packed)] 812 pub struct VhostUserIotlb { 813 pub iova: u64, 814 pub size: u64, 815 pub user_addr: u64, 816 pub permission: u8, 817 pub optype: u8, 818 } 819 */ 820 821 // Bit mask for flags in virtio-fs slave messages 822 bitflags! { 823 #[derive(Default)] 824 /// Flags for virtio-fs slave messages. 825 pub struct VhostUserFSSlaveMsgFlags: u64 { 826 /// Empty permission. 827 const EMPTY = 0x0; 828 /// Read permission. 829 const MAP_R = 0x1; 830 /// Write permission. 831 const MAP_W = 0x2; 832 } 833 } 834 835 /// Max entries in one virtio-fs slave request. 836 pub const VHOST_USER_FS_SLAVE_ENTRIES: usize = 8; 837 838 /// Slave request message to update the MMIO window. 839 #[repr(packed)] 840 #[derive(Default, Copy, Clone)] 841 pub struct VhostUserFSSlaveMsg { 842 /// File offset. 843 pub fd_offset: [u64; VHOST_USER_FS_SLAVE_ENTRIES], 844 /// Offset into the DAX window. 845 pub cache_offset: [u64; VHOST_USER_FS_SLAVE_ENTRIES], 846 /// Size of region to map. 847 pub len: [u64; VHOST_USER_FS_SLAVE_ENTRIES], 848 /// Flags for the mmap operation 849 pub flags: [VhostUserFSSlaveMsgFlags; VHOST_USER_FS_SLAVE_ENTRIES], 850 } 851 // Safe because it only has data and has no implicit padding. 852 unsafe impl DataInit for VhostUserFSSlaveMsg {} 853 854 impl VhostUserMsgValidator for VhostUserFSSlaveMsg { is_valid(&self) -> bool855 fn is_valid(&self) -> bool { 856 for i in 0..VHOST_USER_FS_SLAVE_ENTRIES { 857 if ({ self.flags[i] }.bits() & !VhostUserFSSlaveMsgFlags::all().bits()) != 0 858 || self.fd_offset[i].checked_add(self.len[i]).is_none() 859 || self.cache_offset[i].checked_add(self.len[i]).is_none() 860 { 861 return false; 862 } 863 } 864 true 865 } 866 } 867 868 bitflags! { 869 #[derive(Default)] 870 /// Flags for SHMEM_MAP messages. 871 pub struct VhostUserShmemMapMsgFlags: u8 { 872 /// Empty permission. 873 const EMPTY = 0x0; 874 /// Read permission. 875 const MAP_R = 0x1; 876 /// Write permission. 877 const MAP_W = 0x2; 878 } 879 } 880 881 impl From<Protection> for VhostUserShmemMapMsgFlags { from(prot: Protection) -> Self882 fn from(prot: Protection) -> Self { 883 let mut flags = Self::EMPTY; 884 flags.set(Self::MAP_R, prot.allows(&Protection::read())); 885 flags.set(Self::MAP_W, prot.allows(&Protection::write())); 886 flags 887 } 888 } 889 890 impl From<VhostUserShmemMapMsgFlags> for Protection { from(flags: VhostUserShmemMapMsgFlags) -> Self891 fn from(flags: VhostUserShmemMapMsgFlags) -> Self { 892 let mut prot = Protection::from(0); 893 if flags.contains(VhostUserShmemMapMsgFlags::MAP_R) { 894 prot = prot.set_read(); 895 } 896 if flags.contains(VhostUserShmemMapMsgFlags::MAP_W) { 897 prot = prot.set_write(); 898 } 899 prot 900 } 901 } 902 903 /// Slave request message to map a file into a shared memory region. 904 #[repr(C, packed)] 905 #[derive(Default, Copy, Clone)] 906 pub struct VhostUserShmemMapMsg { 907 /// Flags for the mmap operation 908 pub flags: VhostUserShmemMapMsgFlags, 909 /// Shared memory region id. 910 pub shmid: u8, 911 padding: [u8; 6], 912 /// Offset into the shared memory region. 913 pub shm_offset: u64, 914 /// File offset. 915 pub fd_offset: u64, 916 /// Size of region to map. 917 pub len: u64, 918 } 919 // Safe because it only has data and has no implicit padding. 920 unsafe impl DataInit for VhostUserShmemMapMsg {} 921 922 impl VhostUserMsgValidator for VhostUserShmemMapMsg { is_valid(&self) -> bool923 fn is_valid(&self) -> bool { 924 (self.flags.bits() & !VhostUserFSSlaveMsgFlags::all().bits() as u8) == 0 925 && self.fd_offset.checked_add(self.len).is_some() 926 && self.shm_offset.checked_add(self.len).is_some() 927 } 928 } 929 930 impl VhostUserShmemMapMsg { 931 /// New instance of VhostUserShmemMapMsg struct new( shmid: u8, shm_offset: u64, fd_offset: u64, len: u64, flags: VhostUserShmemMapMsgFlags, ) -> Self932 pub fn new( 933 shmid: u8, 934 shm_offset: u64, 935 fd_offset: u64, 936 len: u64, 937 flags: VhostUserShmemMapMsgFlags, 938 ) -> Self { 939 Self { 940 flags, 941 shmid, 942 padding: [0; 6], 943 shm_offset, 944 fd_offset, 945 len, 946 } 947 } 948 } 949 950 /// Slave request message to map GPU memory into a shared memory region. 951 #[repr(C, packed)] 952 #[derive(Default, Copy, Clone)] 953 pub struct VhostUserGpuMapMsg { 954 /// Shared memory region id. 955 pub shmid: u8, 956 padding: [u8; 7], 957 /// Offset into the shared memory region. 958 pub shm_offset: u64, 959 /// Size of region to map. 960 pub len: u64, 961 /// Index of the memory type. 962 pub memory_idx: u32, 963 /// Type of share handle. 964 pub handle_type: u32, 965 /// Device UUID 966 pub device_uuid: [u8; 16], 967 /// Driver UUID 968 pub driver_uuid: [u8; 16], 969 } 970 // Safe because it only has data and has no implicit padding. 971 unsafe impl DataInit for VhostUserGpuMapMsg {} 972 973 impl VhostUserMsgValidator for VhostUserGpuMapMsg { is_valid(&self) -> bool974 fn is_valid(&self) -> bool { 975 self.len > 0 976 } 977 } 978 979 impl VhostUserGpuMapMsg { 980 /// New instance of VhostUserGpuMapMsg struct new( shmid: u8, shm_offset: u64, len: u64, memory_idx: u32, handle_type: u32, device_uuid: [u8; 16], driver_uuid: [u8; 16], ) -> Self981 pub fn new( 982 shmid: u8, 983 shm_offset: u64, 984 len: u64, 985 memory_idx: u32, 986 handle_type: u32, 987 device_uuid: [u8; 16], 988 driver_uuid: [u8; 16], 989 ) -> Self { 990 Self { 991 shmid, 992 padding: [0; 7], 993 shm_offset, 994 len, 995 memory_idx, 996 handle_type, 997 device_uuid, 998 driver_uuid, 999 } 1000 } 1001 } 1002 1003 /// Slave request message to unmap part of a shared memory region. 1004 #[repr(C, packed)] 1005 #[derive(Default, Copy, Clone)] 1006 pub struct VhostUserShmemUnmapMsg { 1007 /// Shared memory region id. 1008 pub shmid: u8, 1009 padding: [u8; 7], 1010 /// Offset into the shared memory region. 1011 pub shm_offset: u64, 1012 /// Size of region to unmap. 1013 pub len: u64, 1014 } 1015 // Safe because it only has data and has no implicit padding. 1016 unsafe impl DataInit for VhostUserShmemUnmapMsg {} 1017 1018 impl VhostUserMsgValidator for VhostUserShmemUnmapMsg { is_valid(&self) -> bool1019 fn is_valid(&self) -> bool { 1020 self.shm_offset.checked_add(self.len).is_some() 1021 } 1022 } 1023 1024 impl VhostUserShmemUnmapMsg { 1025 /// New instance of VhostUserShmemUnmapMsg struct new(shmid: u8, shm_offset: u64, len: u64) -> Self1026 pub fn new(shmid: u8, shm_offset: u64, len: u64) -> Self { 1027 Self { 1028 shmid, 1029 padding: [0; 7], 1030 shm_offset, 1031 len, 1032 } 1033 } 1034 } 1035 1036 /// Inflight I/O descriptor state for split virtqueues 1037 #[repr(packed)] 1038 #[derive(Clone, Copy, Default)] 1039 pub struct DescStateSplit { 1040 /// Indicate whether this descriptor (only head) is inflight or not. 1041 pub inflight: u8, 1042 /// Padding 1043 padding: [u8; 5], 1044 /// List of last batch of used descriptors, only when batching is used for submitting 1045 pub next: u16, 1046 /// Preserve order of fetching available descriptors, only for head descriptor 1047 pub counter: u64, 1048 } 1049 1050 impl DescStateSplit { 1051 /// New instance of DescStateSplit struct new() -> Self1052 pub fn new() -> Self { 1053 Self::default() 1054 } 1055 } 1056 1057 /// Inflight I/O queue region for split virtqueues 1058 #[repr(packed)] 1059 pub struct QueueRegionSplit { 1060 /// Features flags of this region 1061 pub features: u64, 1062 /// Version of this region 1063 pub version: u16, 1064 /// Number of DescStateSplit entries 1065 pub desc_num: u16, 1066 /// List to track last batch of used descriptors 1067 pub last_batch_head: u16, 1068 /// Idx value of used ring 1069 pub used_idx: u16, 1070 /// Pointer to an array of DescStateSplit entries 1071 pub desc: u64, 1072 } 1073 1074 impl QueueRegionSplit { 1075 /// New instance of QueueRegionSplit struct new(features: u64, queue_size: u16) -> Self1076 pub fn new(features: u64, queue_size: u16) -> Self { 1077 QueueRegionSplit { 1078 features, 1079 version: 1, 1080 desc_num: queue_size, 1081 last_batch_head: 0, 1082 used_idx: 0, 1083 desc: 0, 1084 } 1085 } 1086 } 1087 1088 /// Inflight I/O descriptor state for packed virtqueues 1089 #[repr(packed)] 1090 #[derive(Clone, Copy, Default)] 1091 pub struct DescStatePacked { 1092 /// Indicate whether this descriptor (only head) is inflight or not. 1093 pub inflight: u8, 1094 /// Padding 1095 padding: u8, 1096 /// Link to next free entry 1097 pub next: u16, 1098 /// Link to last entry of descriptor list, only for head 1099 pub last: u16, 1100 /// Length of descriptor list, only for head 1101 pub num: u16, 1102 /// Preserve order of fetching avail descriptors, only for head 1103 pub counter: u64, 1104 /// Buffer ID 1105 pub id: u16, 1106 /// Descriptor flags 1107 pub flags: u16, 1108 /// Buffer length 1109 pub len: u32, 1110 /// Buffer address 1111 pub addr: u64, 1112 } 1113 1114 impl DescStatePacked { 1115 /// New instance of DescStatePacked struct new() -> Self1116 pub fn new() -> Self { 1117 Self::default() 1118 } 1119 } 1120 1121 /// Inflight I/O queue region for packed virtqueues 1122 #[repr(packed)] 1123 pub struct QueueRegionPacked { 1124 /// Features flags of this region 1125 pub features: u64, 1126 /// version of this region 1127 pub version: u16, 1128 /// size of descriptor state array 1129 pub desc_num: u16, 1130 /// head of free DescStatePacked entry list 1131 pub free_head: u16, 1132 /// old head of free DescStatePacked entry list 1133 pub old_free_head: u16, 1134 /// used idx of descriptor ring 1135 pub used_idx: u16, 1136 /// old used idx of descriptor ring 1137 pub old_used_idx: u16, 1138 /// device ring wrap counter 1139 pub used_wrap_counter: u8, 1140 /// old device ring wrap counter 1141 pub old_used_wrap_counter: u8, 1142 /// Padding 1143 padding: [u8; 7], 1144 /// Pointer to array tracking state of each descriptor from descriptor ring 1145 pub desc: u64, 1146 } 1147 1148 impl QueueRegionPacked { 1149 /// New instance of QueueRegionPacked struct new(features: u64, queue_size: u16) -> Self1150 pub fn new(features: u64, queue_size: u16) -> Self { 1151 QueueRegionPacked { 1152 features, 1153 version: 1, 1154 desc_num: queue_size, 1155 free_head: 0, 1156 old_free_head: 0, 1157 used_idx: 0, 1158 old_used_idx: 0, 1159 used_wrap_counter: 0, 1160 old_used_wrap_counter: 0, 1161 padding: [0; 7], 1162 desc: 0, 1163 } 1164 } 1165 } 1166 1167 /// Virtio shared memory descriptor. 1168 #[repr(packed)] 1169 #[derive(Default, Copy, Clone, FromBytes, AsBytes)] 1170 pub struct VhostSharedMemoryRegion { 1171 /// The shared memory region's shmid. 1172 pub id: u8, 1173 /// Padding 1174 padding: [u8; 7], 1175 /// The length of the shared memory region. 1176 pub length: u64, 1177 } 1178 1179 impl VhostSharedMemoryRegion { 1180 /// New instance of VhostSharedMemoryRegion struct new(id: u8, length: u64) -> Self1181 pub fn new(id: u8, length: u64) -> Self { 1182 VhostSharedMemoryRegion { 1183 id, 1184 padding: [0; 7], 1185 length, 1186 } 1187 } 1188 } 1189 1190 #[cfg(test)] 1191 mod tests { 1192 use std::mem; 1193 1194 use super::*; 1195 1196 #[test] check_master_request_code()1197 fn check_master_request_code() { 1198 let code = MasterReq::NOOP; 1199 assert!(!code.is_valid()); 1200 let code = MasterReq::MAX_CMD; 1201 assert!(!code.is_valid()); 1202 assert!(code > MasterReq::NOOP); 1203 let code = MasterReq::GET_FEATURES; 1204 assert!(code.is_valid()); 1205 assert_eq!(code, code.clone()); 1206 let code: MasterReq = unsafe { std::mem::transmute::<u32, MasterReq>(10000u32) }; 1207 assert!(!code.is_valid()); 1208 } 1209 1210 #[test] check_slave_request_code()1211 fn check_slave_request_code() { 1212 let code = SlaveReq::NOOP; 1213 assert!(!code.is_valid()); 1214 let code = SlaveReq::MAX_CMD; 1215 assert!(!code.is_valid()); 1216 assert!(code > SlaveReq::NOOP); 1217 let code = SlaveReq::CONFIG_CHANGE_MSG; 1218 assert!(code.is_valid()); 1219 assert_eq!(code, code.clone()); 1220 let code: SlaveReq = unsafe { std::mem::transmute::<u32, SlaveReq>(10000u32) }; 1221 assert!(!code.is_valid()); 1222 } 1223 1224 #[test] msg_header_ops()1225 fn msg_header_ops() { 1226 let mut hdr = VhostUserMsgHeader::new(MasterReq::GET_FEATURES, 0, 0x100); 1227 assert_eq!(hdr.get_code(), MasterReq::GET_FEATURES); 1228 hdr.set_code(MasterReq::SET_FEATURES); 1229 assert_eq!(hdr.get_code(), MasterReq::SET_FEATURES); 1230 1231 assert_eq!(hdr.get_version(), 0x1); 1232 1233 assert!(!hdr.is_reply()); 1234 hdr.set_reply(true); 1235 assert!(hdr.is_reply()); 1236 hdr.set_reply(false); 1237 1238 assert!(!hdr.is_need_reply()); 1239 hdr.set_need_reply(true); 1240 assert!(hdr.is_need_reply()); 1241 hdr.set_need_reply(false); 1242 1243 assert_eq!(hdr.get_size(), 0x100); 1244 hdr.set_size(0x200); 1245 assert_eq!(hdr.get_size(), 0x200); 1246 1247 assert!(!hdr.is_need_reply()); 1248 assert!(!hdr.is_reply()); 1249 assert_eq!(hdr.get_version(), 0x1); 1250 1251 // Check message length 1252 assert!(hdr.is_valid()); 1253 hdr.set_size(0x2000); 1254 assert!(!hdr.is_valid()); 1255 hdr.set_size(0x100); 1256 assert_eq!(hdr.get_size(), 0x100); 1257 assert!(hdr.is_valid()); 1258 hdr.set_size((MAX_MSG_SIZE - mem::size_of::<VhostUserMsgHeader<MasterReq>>()) as u32); 1259 assert!(hdr.is_valid()); 1260 hdr.set_size(0x0); 1261 assert!(hdr.is_valid()); 1262 1263 // Check version 1264 hdr.set_version(0x0); 1265 assert!(!hdr.is_valid()); 1266 hdr.set_version(0x2); 1267 assert!(!hdr.is_valid()); 1268 hdr.set_version(0x1); 1269 assert!(hdr.is_valid()); 1270 1271 // Test Debug, Clone, PartiaEq trait 1272 assert_eq!(hdr, hdr.clone()); 1273 assert_eq!(hdr.clone().get_code(), hdr.get_code()); 1274 assert_eq!(format!("{:?}", hdr.clone()), format!("{:?}", hdr)); 1275 } 1276 1277 #[test] test_vhost_user_message_u64()1278 fn test_vhost_user_message_u64() { 1279 let val = VhostUserU64::default(); 1280 let val1 = VhostUserU64::new(0); 1281 1282 let a = val.value; 1283 let b = val1.value; 1284 assert_eq!(a, b); 1285 let a = VhostUserU64::new(1).value; 1286 assert_eq!(a, 1); 1287 } 1288 1289 #[test] check_user_memory()1290 fn check_user_memory() { 1291 let mut msg = VhostUserMemory::new(1); 1292 assert!(msg.is_valid()); 1293 msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32; 1294 assert!(msg.is_valid()); 1295 1296 msg.num_regions += 1; 1297 assert!(!msg.is_valid()); 1298 msg.num_regions = 0xFFFFFFFF; 1299 assert!(!msg.is_valid()); 1300 msg.num_regions = MAX_ATTACHED_FD_ENTRIES as u32; 1301 msg.padding1 = 1; 1302 assert!(!msg.is_valid()); 1303 } 1304 1305 #[test] check_user_memory_region()1306 fn check_user_memory_region() { 1307 let mut msg = VhostUserMemoryRegion { 1308 guest_phys_addr: 0, 1309 memory_size: 0x1000, 1310 user_addr: 0, 1311 mmap_offset: 0, 1312 }; 1313 assert!(msg.is_valid()); 1314 msg.guest_phys_addr = 0xFFFFFFFFFFFFEFFF; 1315 assert!(msg.is_valid()); 1316 msg.guest_phys_addr = 0xFFFFFFFFFFFFF000; 1317 assert!(!msg.is_valid()); 1318 msg.guest_phys_addr = 0xFFFFFFFFFFFF0000; 1319 msg.memory_size = 0; 1320 assert!(!msg.is_valid()); 1321 let a = msg.guest_phys_addr; 1322 let b = msg.guest_phys_addr; 1323 assert_eq!(a, b); 1324 1325 let msg = VhostUserMemoryRegion::default(); 1326 let a = msg.guest_phys_addr; 1327 assert_eq!(a, 0); 1328 let a = msg.memory_size; 1329 assert_eq!(a, 0); 1330 let a = msg.user_addr; 1331 assert_eq!(a, 0); 1332 let a = msg.mmap_offset; 1333 assert_eq!(a, 0); 1334 } 1335 1336 #[test] test_vhost_user_state()1337 fn test_vhost_user_state() { 1338 let state = VhostUserVringState::new(5, 8); 1339 1340 let a = state.index; 1341 assert_eq!(a, 5); 1342 let a = state.num; 1343 assert_eq!(a, 8); 1344 assert!(state.is_valid()); 1345 1346 let state = VhostUserVringState::default(); 1347 let a = state.index; 1348 assert_eq!(a, 0); 1349 let a = state.num; 1350 assert_eq!(a, 0); 1351 assert!(state.is_valid()); 1352 } 1353 1354 #[test] test_vhost_user_addr()1355 fn test_vhost_user_addr() { 1356 let mut addr = VhostUserVringAddr::new( 1357 2, 1358 VhostUserVringAddrFlags::VHOST_VRING_F_LOG, 1359 0x1000, 1360 0x2000, 1361 0x3000, 1362 0x4000, 1363 ); 1364 1365 let a = addr.index; 1366 assert_eq!(a, 2); 1367 let a = addr.flags; 1368 assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits()); 1369 let a = addr.descriptor; 1370 assert_eq!(a, 0x1000); 1371 let a = addr.used; 1372 assert_eq!(a, 0x2000); 1373 let a = addr.available; 1374 assert_eq!(a, 0x3000); 1375 let a = addr.log; 1376 assert_eq!(a, 0x4000); 1377 assert!(addr.is_valid()); 1378 1379 addr.descriptor = 0x1001; 1380 assert!(!addr.is_valid()); 1381 addr.descriptor = 0x1000; 1382 1383 addr.available = 0x3001; 1384 assert!(!addr.is_valid()); 1385 addr.available = 0x3000; 1386 1387 addr.used = 0x2001; 1388 assert!(!addr.is_valid()); 1389 addr.used = 0x2000; 1390 assert!(addr.is_valid()); 1391 } 1392 1393 #[test] test_vhost_user_state_from_config()1394 fn test_vhost_user_state_from_config() { 1395 let config = VringConfigData { 1396 queue_max_size: 256, 1397 queue_size: 128, 1398 flags: VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits, 1399 desc_table_addr: 0x1000, 1400 used_ring_addr: 0x2000, 1401 avail_ring_addr: 0x3000, 1402 log_addr: Some(0x4000), 1403 }; 1404 let addr = VhostUserVringAddr::from_config_data(2, &config); 1405 1406 let a = addr.index; 1407 assert_eq!(a, 2); 1408 let a = addr.flags; 1409 assert_eq!(a, VhostUserVringAddrFlags::VHOST_VRING_F_LOG.bits()); 1410 let a = addr.descriptor; 1411 assert_eq!(a, 0x1000); 1412 let a = addr.used; 1413 assert_eq!(a, 0x2000); 1414 let a = addr.available; 1415 assert_eq!(a, 0x3000); 1416 let a = addr.log; 1417 assert_eq!(a, 0x4000); 1418 assert!(addr.is_valid()); 1419 } 1420 1421 #[test] check_user_vring_addr()1422 fn check_user_vring_addr() { 1423 let mut msg = 1424 VhostUserVringAddr::new(0, VhostUserVringAddrFlags::all(), 0x0, 0x0, 0x0, 0x0); 1425 assert!(msg.is_valid()); 1426 1427 msg.descriptor = 1; 1428 assert!(!msg.is_valid()); 1429 msg.descriptor = 0; 1430 1431 msg.available = 1; 1432 assert!(!msg.is_valid()); 1433 msg.available = 0; 1434 1435 msg.used = 1; 1436 assert!(!msg.is_valid()); 1437 msg.used = 0; 1438 1439 msg.flags |= 0x80000000; 1440 assert!(!msg.is_valid()); 1441 msg.flags &= !0x80000000; 1442 } 1443 1444 #[test] check_user_config_msg()1445 fn check_user_config_msg() { 1446 let mut msg = 1447 VhostUserConfig::new(0, VHOST_USER_CONFIG_SIZE, VhostUserConfigFlags::WRITABLE); 1448 1449 assert!(msg.is_valid()); 1450 msg.size = 0; 1451 assert!(!msg.is_valid()); 1452 msg.size = 1; 1453 assert!(msg.is_valid()); 1454 msg.offset = u32::MAX; 1455 assert!(!msg.is_valid()); 1456 msg.offset = VHOST_USER_CONFIG_SIZE; 1457 assert!(!msg.is_valid()); 1458 msg.offset = VHOST_USER_CONFIG_SIZE - 1; 1459 assert!(msg.is_valid()); 1460 msg.size = 2; 1461 assert!(!msg.is_valid()); 1462 msg.size = 1; 1463 msg.flags |= VhostUserConfigFlags::LIVE_MIGRATION.bits(); 1464 assert!(msg.is_valid()); 1465 msg.flags |= 0x4; 1466 assert!(!msg.is_valid()); 1467 } 1468 1469 #[test] test_vhost_user_fs_slave()1470 fn test_vhost_user_fs_slave() { 1471 let mut fs_slave = VhostUserFSSlaveMsg::default(); 1472 1473 assert!(fs_slave.is_valid()); 1474 1475 fs_slave.fd_offset[0] = 0xffff_ffff_ffff_ffff; 1476 fs_slave.len[0] = 0x1; 1477 assert!(!fs_slave.is_valid()); 1478 1479 assert_ne!( 1480 VhostUserFSSlaveMsgFlags::MAP_R, 1481 VhostUserFSSlaveMsgFlags::MAP_W 1482 ); 1483 assert_eq!(VhostUserFSSlaveMsgFlags::EMPTY.bits(), 0); 1484 } 1485 } 1486