1 // Copyright 2023 The ChromiumOS Authors 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 //! virtqueue interface 6 7 #![deny(missing_docs)] 8 9 use std::ops::Deref; 10 use std::ops::DerefMut; 11 12 pub mod packed_descriptor_chain; 13 mod packed_queue; 14 pub mod split_descriptor_chain; 15 mod split_queue; 16 17 use std::num::Wrapping; 18 19 use anyhow::bail; 20 use anyhow::Context; 21 use anyhow::Result; 22 use base::warn; 23 use base::Event; 24 use cros_async::AsyncError; 25 use cros_async::EventAsync; 26 use futures::channel::oneshot; 27 use futures::select_biased; 28 use futures::FutureExt; 29 use packed_queue::PackedQueue; 30 use serde::Deserialize; 31 use serde::Serialize; 32 use snapshot::AnySnapshot; 33 use split_queue::SplitQueue; 34 use virtio_sys::virtio_config::VIRTIO_F_RING_PACKED; 35 use vm_memory::GuestAddress; 36 use vm_memory::GuestMemory; 37 38 use crate::virtio::DescriptorChain; 39 use crate::virtio::Interrupt; 40 use crate::virtio::VIRTIO_MSI_NO_VECTOR; 41 42 /// A virtio queue's parameters. 43 /// 44 /// `QueueConfig` can be converted into a running `Queue` by calling [`QueueConfig::activate()`]. 45 pub struct QueueConfig { 46 /// Whether this queue has already been activated. 47 activated: bool, 48 49 /// The maximal size in elements offered by the device 50 max_size: u16, 51 52 /// The queue size in elements the driver selected. This is always guaranteed to be a power of 53 /// two less than or equal to `max_size`, as required for split virtqueues. These invariants 54 /// are enforced by `set_size()`. 55 size: u16, 56 57 /// Indicates if the queue is finished with configuration 58 ready: bool, 59 60 /// MSI-X vector for the queue. Don't care for INTx 61 vector: u16, 62 63 /// Ring features (e.g. `VIRTIO_RING_F_EVENT_IDX`, `VIRTIO_F_RING_PACKED`) offered by the 64 /// device 65 features: u64, 66 67 // Device feature bits accepted by the driver 68 acked_features: u64, 69 70 /// Guest physical address of the descriptor table 71 desc_table: GuestAddress, 72 73 /// Guest physical address of the available ring (driver area) 74 /// 75 /// TODO(b/290657008): update field and accessor names to match the current virtio spec 76 avail_ring: GuestAddress, 77 78 /// Guest physical address of the used ring (device area) 79 used_ring: GuestAddress, 80 81 /// Initial available ring index when the queue is activated. 82 next_avail: Wrapping<u16>, 83 84 /// Initial used ring index when the queue is activated. 85 next_used: Wrapping<u16>, 86 } 87 88 #[derive(Serialize, Deserialize)] 89 struct QueueConfigSnapshot { 90 activated: bool, 91 max_size: u16, 92 size: u16, 93 ready: bool, 94 vector: u16, 95 features: u64, 96 acked_features: u64, 97 desc_table: GuestAddress, 98 avail_ring: GuestAddress, 99 used_ring: GuestAddress, 100 next_avail: Wrapping<u16>, 101 next_used: Wrapping<u16>, 102 } 103 104 impl QueueConfig { 105 /// Constructs a virtio queue configuration with the given `max_size`. new(max_size: u16, features: u64) -> Self106 pub fn new(max_size: u16, features: u64) -> Self { 107 assert!(max_size > 0); 108 assert!(max_size <= Queue::MAX_SIZE); 109 QueueConfig { 110 activated: false, 111 max_size, 112 size: max_size, 113 ready: false, 114 vector: VIRTIO_MSI_NO_VECTOR, 115 desc_table: GuestAddress(0), 116 avail_ring: GuestAddress(0), 117 used_ring: GuestAddress(0), 118 features, 119 acked_features: 0, 120 next_used: Wrapping(0), 121 next_avail: Wrapping(0), 122 } 123 } 124 125 /// Returns the maximum size of this queue. max_size(&self) -> u16126 pub fn max_size(&self) -> u16 { 127 self.max_size 128 } 129 130 /// Returns the currently configured size of the queue. size(&self) -> u16131 pub fn size(&self) -> u16 { 132 self.size 133 } 134 135 /// Sets the queue size. set_size(&mut self, val: u16)136 pub fn set_size(&mut self, val: u16) { 137 if self.ready { 138 warn!("ignoring write to size on ready queue"); 139 return; 140 } 141 142 if val > self.max_size { 143 warn!( 144 "requested queue size {} is larger than max_size {}", 145 val, self.max_size 146 ); 147 return; 148 } 149 150 self.size = val; 151 } 152 153 /// Returns the currently configured interrupt vector. vector(&self) -> u16154 pub fn vector(&self) -> u16 { 155 self.vector 156 } 157 158 /// Sets the interrupt vector for this queue. set_vector(&mut self, val: u16)159 pub fn set_vector(&mut self, val: u16) { 160 if self.ready { 161 warn!("ignoring write to vector on ready queue"); 162 return; 163 } 164 165 self.vector = val; 166 } 167 168 /// Getter for descriptor area desc_table(&self) -> GuestAddress169 pub fn desc_table(&self) -> GuestAddress { 170 self.desc_table 171 } 172 173 /// Setter for descriptor area set_desc_table(&mut self, val: GuestAddress)174 pub fn set_desc_table(&mut self, val: GuestAddress) { 175 if self.ready { 176 warn!("ignoring write to desc_table on ready queue"); 177 return; 178 } 179 180 self.desc_table = val; 181 } 182 183 /// Getter for driver area avail_ring(&self) -> GuestAddress184 pub fn avail_ring(&self) -> GuestAddress { 185 self.avail_ring 186 } 187 188 /// Setter for driver area set_avail_ring(&mut self, val: GuestAddress)189 pub fn set_avail_ring(&mut self, val: GuestAddress) { 190 if self.ready { 191 warn!("ignoring write to avail_ring on ready queue"); 192 return; 193 } 194 195 self.avail_ring = val; 196 } 197 198 /// Getter for device area used_ring(&self) -> GuestAddress199 pub fn used_ring(&self) -> GuestAddress { 200 self.used_ring 201 } 202 203 /// Setter for device area set_used_ring(&mut self, val: GuestAddress)204 pub fn set_used_ring(&mut self, val: GuestAddress) { 205 if self.ready { 206 warn!("ignoring write to used_ring on ready queue"); 207 return; 208 } 209 210 self.used_ring = val; 211 } 212 213 /// Getter for next_avail index next_avail(&self) -> Wrapping<u16>214 pub fn next_avail(&self) -> Wrapping<u16> { 215 self.next_avail 216 } 217 218 /// Setter for next_avail index set_next_avail(&mut self, val: Wrapping<u16>)219 pub fn set_next_avail(&mut self, val: Wrapping<u16>) { 220 if self.ready { 221 warn!("ignoring write to next_avail on ready queue"); 222 return; 223 } 224 225 self.next_avail = val; 226 } 227 228 /// Getter for next_used index next_used(&self) -> Wrapping<u16>229 pub fn next_used(&self) -> Wrapping<u16> { 230 self.next_used 231 } 232 233 /// Setter for next_used index set_next_used(&mut self, val: Wrapping<u16>)234 pub fn set_next_used(&mut self, val: Wrapping<u16>) { 235 if self.ready { 236 warn!("ignoring write to next_used on ready queue"); 237 return; 238 } 239 240 self.next_used = val; 241 } 242 243 /// Returns the features that have been acknowledged by the driver. acked_features(&self) -> u64244 pub fn acked_features(&self) -> u64 { 245 self.acked_features 246 } 247 248 /// Acknowledges that this set of features should be enabled on this queue. ack_features(&mut self, features: u64)249 pub fn ack_features(&mut self, features: u64) { 250 self.acked_features |= features & self.features; 251 } 252 253 /// Return whether the driver has enabled this queue. ready(&self) -> bool254 pub fn ready(&self) -> bool { 255 self.ready 256 } 257 258 /// Signal that the driver has completed queue configuration. set_ready(&mut self, enable: bool)259 pub fn set_ready(&mut self, enable: bool) { 260 self.ready = enable; 261 } 262 263 /// Convert the queue configuration into an active queue. activate( &mut self, mem: &GuestMemory, event: Event, interrupt: Interrupt, ) -> Result<Queue>264 pub fn activate( 265 &mut self, 266 mem: &GuestMemory, 267 event: Event, 268 interrupt: Interrupt, 269 ) -> Result<Queue> { 270 if !self.ready { 271 bail!("attempted to activate a non-ready queue"); 272 } 273 274 if self.activated { 275 bail!("queue is already activated"); 276 } 277 // If VIRTIO_F_RING_PACKED feature bit is set, create a packed queue, otherwise create a 278 // split queue 279 let queue: Queue = if ((self.acked_features >> VIRTIO_F_RING_PACKED) & 1) != 0 { 280 let pq = PackedQueue::new(self, mem, event, interrupt) 281 .context("Failed to create a packed queue.")?; 282 Queue::PackedVirtQueue(pq) 283 } else { 284 let sq = SplitQueue::new(self, mem, event, interrupt) 285 .context("Failed to create a split queue.")?; 286 Queue::SplitVirtQueue(sq) 287 }; 288 289 self.activated = true; 290 Ok(queue) 291 } 292 293 /// Reset queue to a clean state reset(&mut self)294 pub fn reset(&mut self) { 295 self.activated = false; 296 self.ready = false; 297 self.size = self.max_size; 298 self.vector = VIRTIO_MSI_NO_VECTOR; 299 self.desc_table = GuestAddress(0); 300 self.avail_ring = GuestAddress(0); 301 self.used_ring = GuestAddress(0); 302 self.next_avail = Wrapping(0); 303 self.next_used = Wrapping(0); 304 self.acked_features = 0; 305 } 306 307 /// Take snapshot of queue configuration snapshot(&self) -> Result<AnySnapshot>308 pub fn snapshot(&self) -> Result<AnySnapshot> { 309 AnySnapshot::to_any(QueueConfigSnapshot { 310 activated: self.activated, 311 max_size: self.max_size, 312 size: self.size, 313 ready: self.ready, 314 vector: self.vector, 315 features: self.features, 316 acked_features: self.acked_features, 317 desc_table: self.desc_table, 318 avail_ring: self.avail_ring, 319 used_ring: self.used_ring, 320 next_avail: self.next_avail, 321 next_used: self.next_used, 322 }) 323 .context("error serializing") 324 } 325 326 /// Restore queue configuration from snapshot restore(&mut self, data: AnySnapshot) -> Result<()>327 pub fn restore(&mut self, data: AnySnapshot) -> Result<()> { 328 let snap: QueueConfigSnapshot = 329 AnySnapshot::from_any(data).context("error deserializing")?; 330 self.activated = snap.activated; 331 self.max_size = snap.max_size; 332 self.size = snap.size; 333 self.ready = snap.ready; 334 self.vector = snap.vector; 335 self.features = snap.features; 336 self.acked_features = snap.acked_features; 337 self.desc_table = snap.desc_table; 338 self.avail_ring = snap.avail_ring; 339 self.used_ring = snap.used_ring; 340 self.next_avail = snap.next_avail; 341 self.next_used = snap.next_used; 342 Ok(()) 343 } 344 } 345 346 /// Usage: define_queue_method!(method_name, return_type[, mut][, arg1: arg1_type, arg2: arg2_type, 347 /// ...]) 348 /// 349 /// - `method_name`: The name of the method to be defined (as an identifier). 350 /// - `return_type`: The return type of the method. 351 /// - `mut` (optional): Include this keyword if the method requires a mutable reference to `self` 352 /// (`&mut self`). 353 /// - `arg1: arg1_type, arg2: arg2_type, ...` (optional): Include method parameters as a 354 /// comma-separated list of `name: type` pairs, if the method takes any arguments. 355 macro_rules! define_queue_method { 356 ( 357 $(#[$doc:meta])* 358 $method:ident, $return_type:ty, $( $var:ident : $vartype:ty ),* 359 ) => { 360 $(#[$doc])* 361 pub fn $method(&self, $($var: $vartype),*) -> $return_type { 362 match self { 363 Queue::SplitVirtQueue(sq) => sq.$method($($var),*), 364 Queue::PackedVirtQueue(pq) => pq.$method($($var),*), 365 } 366 } 367 }; 368 ( 369 $(#[$doc:meta])* 370 $method:ident, $return_type:ty, mut, $( $var:ident : $vartype:ty ),* 371 ) => { 372 $(#[$doc])* 373 pub fn $method(&mut self, $($var: $vartype),*) -> $return_type { 374 match self { 375 Queue::SplitVirtQueue(sq) => sq.$method($($var),*), 376 Queue::PackedVirtQueue(pq) => pq.$method($($var),*), 377 } 378 } 379 }; 380 } 381 382 /// Virtqueue interface representing different types of virtqueues 383 /// The struct of each queue type is wrapped in the enum variants 384 #[derive(Debug)] 385 pub enum Queue { 386 /// Split virtqueue type in virtio v1.2 spec: <https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-350007> 387 SplitVirtQueue(SplitQueue), 388 /// Packed virtqueue type in virtio v1.2 spec: <https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-720008> 389 PackedVirtQueue(PackedQueue), 390 } 391 392 impl Queue { 393 /// Largest valid number of entries in a virtqueue. 394 pub const MAX_SIZE: u16 = 32768; 395 396 /// Asynchronously read the next descriptor chain from the queue. 397 /// Returns a `DescriptorChain` when it is `await`ed. next_async( &mut self, eventfd: &mut EventAsync, ) -> std::result::Result<DescriptorChain, AsyncError>398 pub async fn next_async( 399 &mut self, 400 eventfd: &mut EventAsync, 401 ) -> std::result::Result<DescriptorChain, AsyncError> { 402 loop { 403 // Check if there are more descriptors available. 404 if let Some(chain) = self.pop() { 405 return Ok(chain); 406 } 407 eventfd.next_val().await?; 408 } 409 } 410 411 /// Get the first available descriptor chain without removing it from the queue. 412 /// Call `pop()` on the returned [`PeekedDescriptorChain`] to remove it from the queue. peek(&mut self) -> Option<PeekedDescriptorChain>413 pub fn peek(&mut self) -> Option<PeekedDescriptorChain> { 414 let desc_chain = match self { 415 Queue::SplitVirtQueue(q) => q.peek(), 416 Queue::PackedVirtQueue(q) => q.peek(), 417 }?; 418 419 Some(PeekedDescriptorChain::new(self, desc_chain)) 420 } 421 422 /// If a new DescriptorChain is available, returns one and removes it from the queue. pop(&mut self) -> Option<DescriptorChain>423 pub fn pop(&mut self) -> Option<DescriptorChain> { 424 self.peek().map(PeekedDescriptorChain::pop) 425 } 426 427 /// Returns `None` if stop_rx receives a value; otherwise returns the result 428 /// of waiting for the next descriptor. next_async_interruptable( &mut self, queue_event: &mut EventAsync, mut stop_rx: &mut oneshot::Receiver<()>, ) -> std::result::Result<Option<DescriptorChain>, AsyncError>429 pub async fn next_async_interruptable( 430 &mut self, 431 queue_event: &mut EventAsync, 432 mut stop_rx: &mut oneshot::Receiver<()>, 433 ) -> std::result::Result<Option<DescriptorChain>, AsyncError> { 434 select_biased! { 435 avail_desc_res = self.next_async(queue_event).fuse() => { 436 Ok(Some(avail_desc_res?)) 437 } 438 _ = stop_rx => Ok(None), 439 } 440 } 441 442 /// inject interrupt into guest on this queue 443 /// return true: interrupt is injected into guest for this queue 444 /// false: interrupt isn't injected trigger_interrupt(&mut self) -> bool445 pub fn trigger_interrupt(&mut self) -> bool { 446 match self { 447 Queue::SplitVirtQueue(sq) => sq.trigger_interrupt(), 448 Queue::PackedVirtQueue(pq) => pq.trigger_interrupt(), 449 } 450 } 451 452 /// Restore queue from snapshot restore( queue_config: &QueueConfig, queue_value: AnySnapshot, mem: &GuestMemory, event: Event, interrupt: Interrupt, ) -> anyhow::Result<Queue>453 pub fn restore( 454 queue_config: &QueueConfig, 455 queue_value: AnySnapshot, 456 mem: &GuestMemory, 457 event: Event, 458 interrupt: Interrupt, 459 ) -> anyhow::Result<Queue> { 460 if queue_config.acked_features & 1 << VIRTIO_F_RING_PACKED != 0 { 461 PackedQueue::restore(queue_value, mem, event, interrupt).map(Queue::PackedVirtQueue) 462 } else { 463 SplitQueue::restore(queue_value, mem, event, interrupt).map(Queue::SplitVirtQueue) 464 } 465 } 466 467 /// "Reclaim" a queue that was given to a vhost-user backend and is now being taken back using 468 /// VHOST_USER_GET_VRING_BASE. 469 /// 470 /// The `Queue` will have stale fields if the vhost-user backend fulfilled any virtqueue 471 /// requests. This function updates the `Queue` to pick up where the backend left off. vhost_user_reclaim(&mut self, vring_base: u16)472 pub fn vhost_user_reclaim(&mut self, vring_base: u16) { 473 match self { 474 Queue::SplitVirtQueue(q) => q.vhost_user_reclaim(vring_base), 475 Queue::PackedVirtQueue(q) => q.vhost_user_reclaim(vring_base), 476 } 477 } 478 479 /// Getter for the next index of the available ring that device will process. 480 /// 481 /// Not to be confused with the available ring's index field, which is the next index for the 482 /// driver to fill. next_avail_to_process(&self) -> u16483 pub fn next_avail_to_process(&self) -> u16 { 484 match self { 485 Queue::SplitVirtQueue(q) => q.next_avail_to_process(), 486 Queue::PackedVirtQueue(q) => q.next_avail_to_process(), 487 } 488 } 489 490 define_queue_method!( 491 /// Getter for vector field 492 vector, 493 u16, 494 ); 495 496 define_queue_method!( 497 /// Getter for descriptor area 498 desc_table, 499 GuestAddress, 500 ); 501 502 define_queue_method!( 503 /// Getter for driver area 504 avail_ring, 505 GuestAddress, 506 ); 507 508 define_queue_method!( 509 /// Getter for device area 510 used_ring, 511 GuestAddress, 512 ); 513 514 define_queue_method!( 515 /// Return the actual size of the queue, as the driver may not set up a 516 /// queue as big as the device allows. 517 size, 518 u16, 519 ); 520 521 define_queue_method!( 522 /// Get a reference to the queue's event. 523 event, 524 &Event, 525 ); 526 527 define_queue_method!( 528 /// Get a reference to the queue's interrupt. 529 interrupt, 530 &Interrupt, 531 ); 532 533 define_queue_method!( 534 /// Puts an available descriptor head into the used ring for use by the guest. 535 add_used, 536 (), 537 mut, 538 desc_chain: DescriptorChain, 539 len: u32 540 ); 541 542 define_queue_method!( 543 /// Take snapshot of queue's current status 544 snapshot, 545 Result<AnySnapshot>, 546 ); 547 } 548 549 /// A `DescriptorChain` that has been peeked from a `Queue` but not popped yet. 550 /// 551 /// Call [`pop()`](Self::pop) to pop this descriptor chain from the `Queue` and receive the 552 /// contained `DescriptorChain` object. 553 /// 554 /// This object holds a mutable reference to the `Queue` to ensure it is not possible to pop or peek 555 /// another descriptor while a peek is already active. Either `pop()` or drop this object before 556 /// attempting to manipulate the `Queue` again. 557 pub struct PeekedDescriptorChain<'q> { 558 queue: &'q mut Queue, 559 desc_chain: DescriptorChain, 560 } 561 562 impl<'q> PeekedDescriptorChain<'q> { 563 /// Create a `PeekedDescriptorChain` that holds a mutable reference to its `Queue`. 564 /// Use [`Queue::peek()`] rather than calling this function. new(queue: &'q mut Queue, desc_chain: DescriptorChain) -> Self565 fn new(queue: &'q mut Queue, desc_chain: DescriptorChain) -> Self { 566 PeekedDescriptorChain { queue, desc_chain } 567 } 568 569 /// Pop this descriptor chain from the queue. pop(self) -> DescriptorChain570 pub fn pop(self) -> DescriptorChain { 571 match self.queue { 572 Queue::SplitVirtQueue(q) => q.pop_peeked(&self.desc_chain), 573 Queue::PackedVirtQueue(q) => q.pop_peeked(&self.desc_chain), 574 } 575 self.desc_chain 576 } 577 } 578 579 impl Deref for PeekedDescriptorChain<'_> { 580 type Target = DescriptorChain; 581 deref(&self) -> &Self::Target582 fn deref(&self) -> &Self::Target { 583 &self.desc_chain 584 } 585 } 586 587 impl DerefMut for PeekedDescriptorChain<'_> { deref_mut(&mut self) -> &mut Self::Target588 fn deref_mut(&mut self) -> &mut Self::Target { 589 &mut self.desc_chain 590 } 591 } 592