• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #![deny(missing_docs)]
6 
7 use std::num::Wrapping;
8 use std::sync::atomic::fence;
9 use std::sync::atomic::Ordering;
10 
11 use anyhow::bail;
12 use anyhow::Result;
13 use base::error;
14 use base::warn;
15 use base::Event;
16 use serde::Deserialize;
17 use serde::Serialize;
18 use snapshot::AnySnapshot;
19 use virtio_sys::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
20 use vm_memory::GuestAddress;
21 use vm_memory::GuestMemory;
22 
23 use crate::virtio::descriptor_chain::DescriptorChain;
24 use crate::virtio::descriptor_chain::VIRTQ_DESC_F_AVAIL;
25 use crate::virtio::descriptor_chain::VIRTQ_DESC_F_USED;
26 use crate::virtio::descriptor_chain::VIRTQ_DESC_F_WRITE;
27 use crate::virtio::queue::packed_descriptor_chain::PackedDesc;
28 use crate::virtio::queue::packed_descriptor_chain::PackedDescEvent;
29 use crate::virtio::queue::packed_descriptor_chain::PackedDescriptorChain;
30 use crate::virtio::queue::packed_descriptor_chain::PackedNotificationType;
31 use crate::virtio::queue::packed_descriptor_chain::RING_EVENT_FLAGS_DESC;
32 use crate::virtio::Interrupt;
33 use crate::virtio::QueueConfig;
34 
35 #[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
36 struct PackedQueueIndex {
37     wrap_counter: bool,
38     index: Wrapping<u16>,
39 }
40 impl PackedQueueIndex {
new(wrap_counter: bool, index: u16) -> Self41     pub fn new(wrap_counter: bool, index: u16) -> Self {
42         Self {
43             wrap_counter,
44             index: Wrapping(index),
45         }
46     }
47 
new_from_desc(desc: u16) -> Self48     pub fn new_from_desc(desc: u16) -> Self {
49         let wrap_counter: bool = (desc >> 15) == 1;
50         let mask: u16 = 0x7fff;
51         let index = desc & mask;
52         Self::new(wrap_counter, index)
53     }
54 
to_desc(self) -> PackedDescEvent55     pub fn to_desc(self) -> PackedDescEvent {
56         let flag = RING_EVENT_FLAGS_DESC;
57         let mut desc = self.index.0;
58         if self.wrap_counter {
59             desc |= 1 << 15;
60         }
61         PackedDescEvent {
62             desc: desc.into(),
63             flag: flag.into(),
64         }
65     }
66 
add_index(&mut self, index_value: u16, size: u16)67     fn add_index(&mut self, index_value: u16, size: u16) {
68         let new_index = self.index.0 + index_value;
69         if new_index < size {
70             self.index = Wrapping(new_index);
71         } else {
72             self.index = Wrapping(new_index - size);
73             self.wrap_counter = !self.wrap_counter;
74         }
75     }
76 }
77 
78 impl Default for PackedQueueIndex {
default() -> Self79     fn default() -> Self {
80         Self::new(true, 0)
81     }
82 }
83 
84 #[derive(Debug)]
85 pub struct PackedQueue {
86     mem: GuestMemory,
87 
88     event: Event,
89     interrupt: Interrupt,
90 
91     // The queue size in elements the driver selected
92     size: u16,
93 
94     // MSI-X vector for the queue. Don't care for INTx
95     vector: u16,
96 
97     // Internal index counter to keep track of where to poll
98     avail_index: PackedQueueIndex,
99     use_index: PackedQueueIndex,
100     signalled_used_index: PackedQueueIndex,
101 
102     // Device feature bits accepted by the driver
103     features: u64,
104 
105     // Guest physical address of the descriptor table
106     desc_table: GuestAddress,
107 
108     // Write-only by the device, Including information for reducing the number of device events
109     device_event_suppression: GuestAddress,
110 
111     // Read-only by the device, Includes information for reducing the number of driver events
112     driver_event_suppression: GuestAddress,
113 }
114 
115 #[derive(Serialize, Deserialize)]
116 pub struct PackedQueueSnapshot {
117     size: u16,
118     vector: u16,
119     avail_index: PackedQueueIndex,
120     use_index: PackedQueueIndex,
121     signalled_used_index: PackedQueueIndex,
122     features: u64,
123     desc_table: GuestAddress,
124     device_event_suppression: GuestAddress,
125     driver_event_suppression: GuestAddress,
126 }
127 
128 impl PackedQueue {
129     /// Constructs an empty virtio queue with the given `max_size`.
new( config: &QueueConfig, mem: &GuestMemory, event: Event, interrupt: Interrupt, ) -> Result<Self>130     pub fn new(
131         config: &QueueConfig,
132         mem: &GuestMemory,
133         event: Event,
134         interrupt: Interrupt,
135     ) -> Result<Self> {
136         let size = config.size();
137 
138         let desc_table = config.desc_table();
139         let driver_area = config.avail_ring();
140         let device_area = config.used_ring();
141 
142         // Validate addresses and queue size to ensure that address calculation won't overflow.
143         let ring_sizes = Self::area_sizes(size, desc_table, driver_area, device_area);
144         let rings = ring_sizes.iter().zip(vec![
145             "descriptor table",
146             "driver_event_suppression",
147             "device_event_suppression",
148         ]);
149 
150         for ((addr, size), name) in rings {
151             if addr.checked_add(*size as u64).is_none() {
152                 bail!(
153                     "virtio queue {} goes out of bounds: start:0x{:08x} size:0x{:08x}",
154                     name,
155                     addr.offset(),
156                     size,
157                 );
158             }
159         }
160 
161         Ok(PackedQueue {
162             mem: mem.clone(),
163             event,
164             interrupt,
165             size,
166             vector: config.vector(),
167             desc_table: config.desc_table(),
168             driver_event_suppression: config.avail_ring(),
169             device_event_suppression: config.used_ring(),
170             features: config.acked_features(),
171             avail_index: PackedQueueIndex::default(),
172             use_index: PackedQueueIndex::default(),
173             signalled_used_index: PackedQueueIndex::default(),
174         })
175     }
176 
vhost_user_reclaim(&mut self, _vring_base: u16)177     pub fn vhost_user_reclaim(&mut self, _vring_base: u16) {
178         // TODO: b/331466964 - Need more than `vring_base` to reclaim a packed virtqueue.
179         unimplemented!()
180     }
181 
next_avail_to_process(&self) -> u16182     pub fn next_avail_to_process(&self) -> u16 {
183         self.avail_index.index.0
184     }
185 
186     /// Return the actual size of the queue, as the driver may not set up a
187     /// queue as big as the device allows.
size(&self) -> u16188     pub fn size(&self) -> u16 {
189         self.size
190     }
191 
192     /// Getter for vector field
vector(&self) -> u16193     pub fn vector(&self) -> u16 {
194         self.vector
195     }
196 
197     /// Getter for descriptor area
desc_table(&self) -> GuestAddress198     pub fn desc_table(&self) -> GuestAddress {
199         self.desc_table
200     }
201 
202     /// Getter for driver area
avail_ring(&self) -> GuestAddress203     pub fn avail_ring(&self) -> GuestAddress {
204         self.driver_event_suppression
205     }
206 
207     /// Getter for device area
used_ring(&self) -> GuestAddress208     pub fn used_ring(&self) -> GuestAddress {
209         self.device_event_suppression
210     }
211 
212     /// Get a reference to the queue's "kick event"
event(&self) -> &Event213     pub fn event(&self) -> &Event {
214         &self.event
215     }
216 
217     /// Get a reference to the queue's interrupt
interrupt(&self) -> &Interrupt218     pub fn interrupt(&self) -> &Interrupt {
219         &self.interrupt
220     }
221 
area_sizes( queue_size: u16, desc_table: GuestAddress, driver_area: GuestAddress, device_area: GuestAddress, ) -> Vec<(GuestAddress, usize)>222     fn area_sizes(
223         queue_size: u16,
224         desc_table: GuestAddress,
225         driver_area: GuestAddress,
226         device_area: GuestAddress,
227     ) -> Vec<(GuestAddress, usize)> {
228         vec![
229             (desc_table, 16 * queue_size as usize),
230             (driver_area, 4),
231             (device_area, 4),
232         ]
233     }
234 
235     /// Set the device event suppression
236     ///
237     /// This field is used to specify the timing of when the driver notifies the
238     /// device that the descriptor table is ready to be processed.
set_avail_event(&mut self, event: PackedDescEvent)239     fn set_avail_event(&mut self, event: PackedDescEvent) {
240         fence(Ordering::SeqCst);
241         self.mem
242             .write_obj_at_addr_volatile(event, self.device_event_suppression)
243             .unwrap();
244     }
245 
246     // Get the driver event suppression.
247     // This field is used to specify the timing of when the device notifies the
248     // driver that the descriptor table is ready to be processed.
get_driver_event(&self) -> PackedDescEvent249     fn get_driver_event(&self) -> PackedDescEvent {
250         fence(Ordering::SeqCst);
251 
252         let desc: PackedDescEvent = self
253             .mem
254             .read_obj_from_addr_volatile(self.driver_event_suppression)
255             .unwrap();
256         desc
257     }
258 
259     /// Get the first available descriptor chain without removing it from the queue.
260     /// Call `pop_peeked` to remove the returned descriptor chain from the queue.
peek(&mut self) -> Option<DescriptorChain>261     pub fn peek(&mut self) -> Option<DescriptorChain> {
262         let desc_addr = self
263             .desc_table
264             .checked_add((self.avail_index.index.0 as u64) * 16)
265             .expect("peeked address will not overflow");
266 
267         let desc = self
268             .mem
269             .read_obj_from_addr::<PackedDesc>(desc_addr)
270             .inspect_err(|_e| {
271                 error!("failed to read desc {:#x}", desc_addr.offset());
272             })
273             .ok()?;
274 
275         if !desc.is_available(self.avail_index.wrap_counter as u16) {
276             return None;
277         }
278 
279         // This fence ensures that subsequent reads from the descriptor do not
280         // get reordered and happen only after verifying the descriptor table is
281         // available.
282         fence(Ordering::SeqCst);
283 
284         let chain = PackedDescriptorChain::new(
285             &self.mem,
286             self.desc_table,
287             self.size,
288             self.avail_index.wrap_counter,
289             self.avail_index.index.0,
290         );
291 
292         match DescriptorChain::new(chain, &self.mem, self.avail_index.index.0) {
293             Ok(descriptor_chain) => Some(descriptor_chain),
294             Err(e) => {
295                 error!("{:#}", e);
296                 None
297             }
298         }
299     }
300 
301     /// Remove the first available descriptor chain from the queue.
302     /// This function should only be called immediately following `peek` and must be passed a
303     /// reference to the same `DescriptorChain` returned by the most recent `peek`.
pop_peeked(&mut self, descriptor_chain: &DescriptorChain)304     pub(super) fn pop_peeked(&mut self, descriptor_chain: &DescriptorChain) {
305         self.avail_index
306             .add_index(descriptor_chain.count, self.size());
307         if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 {
308             self.set_avail_event(self.avail_index.to_desc());
309         }
310     }
311 
312     /// Write to first descriptor in descriptor chain to mark descriptor chain as used
add_used(&mut self, desc_chain: DescriptorChain, len: u32)313     pub fn add_used(&mut self, desc_chain: DescriptorChain, len: u32) {
314         let desc_index = desc_chain.index();
315         if desc_index >= self.size {
316             error!(
317                 "attempted to add out of bounds descriptor to used ring: {}",
318                 desc_index
319             );
320             return;
321         }
322 
323         let chain_id = desc_chain
324             .id
325             .expect("Packed descriptor chain should have id");
326 
327         let desc_addr = self
328             .desc_table
329             .checked_add(self.use_index.index.0 as u64 * 16)
330             .expect("Descriptor address should not overflow.");
331 
332         // Write to len field
333         self.mem
334             .write_obj_at_addr(
335                 len,
336                 desc_addr
337                     .checked_add(8)
338                     .expect("Descriptor address should not overflow."),
339             )
340             .unwrap();
341 
342         // Write to id field
343         self.mem
344             .write_obj_at_addr(
345                 chain_id,
346                 desc_addr
347                     .checked_add(12)
348                     .expect("Descriptor address should not overflow."),
349             )
350             .unwrap();
351 
352         let wrap_counter = self.use_index.wrap_counter;
353 
354         let mut flags: u16 = 0;
355         if wrap_counter {
356             flags = flags | VIRTQ_DESC_F_USED | VIRTQ_DESC_F_AVAIL;
357         }
358         if len > 0 {
359             flags |= VIRTQ_DESC_F_WRITE;
360         }
361 
362         // Writing to flags should come at the very end to avoid showing the
363         // driver fragmented descriptor data
364         fence(Ordering::SeqCst);
365 
366         self.mem
367             .write_obj_at_addr_volatile(flags, desc_addr.unchecked_add(14))
368             .unwrap();
369 
370         self.use_index.add_index(desc_chain.count, self.size());
371     }
372 
373     /// Returns if the queue should have an interrupt sent based on its state.
queue_wants_interrupt(&mut self) -> bool374     fn queue_wants_interrupt(&mut self) -> bool {
375         let driver_event = self.get_driver_event();
376         match driver_event.notification_type() {
377             PackedNotificationType::Enable => true,
378             PackedNotificationType::Disable => false,
379             PackedNotificationType::Desc(desc) => {
380                 if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) == 0 {
381                     warn!("This is undefined behavior. We should actually send error in this case");
382                     return true;
383                 }
384 
385                 // Reserved current use_index for next notify
386                 let old = self.signalled_used_index;
387                 self.signalled_used_index = self.use_index;
388 
389                 // Get desc_event_off and desc_event_wrap from driver event suppress area
390                 let event_index: PackedQueueIndex = PackedQueueIndex::new_from_desc(desc);
391 
392                 let event_idx = event_index.index;
393                 let old_idx = old.index;
394                 let new_idx = self.use_index.index;
395 
396                 // In qemu's implementation, there's an additional calculation,
397                 // need to verify its correctness.
398                 // if event_index.wrap_counter != self.use_index.wrap_counter {
399                 //     event_idx -= self.size() as u16;
400                 // }
401 
402                 (new_idx - event_idx - Wrapping(1)) < (new_idx - old_idx)
403             }
404         };
405         true
406     }
407 
408     /// inject interrupt into guest on this queue
409     /// return true: interrupt is injected into guest for this queue
410     ///        false: interrupt isn't injected
trigger_interrupt(&mut self) -> bool411     pub fn trigger_interrupt(&mut self) -> bool {
412         if self.queue_wants_interrupt() {
413             self.interrupt.signal_used_queue(self.vector);
414             true
415         } else {
416             false
417         }
418     }
419 
420     /// Acknowledges that this set of features should be enabled on this queue.
ack_features(&mut self, features: u64)421     pub fn ack_features(&mut self, features: u64) {
422         self.features |= features;
423     }
424 
425     /// TODO: b/290307056 - Implement snapshot for packed virtqueue,
426     /// add tests to validate.
snapshot(&self) -> Result<AnySnapshot>427     pub fn snapshot(&self) -> Result<AnySnapshot> {
428         bail!("Snapshot for packed virtqueue not implemented.");
429     }
430 
431     /// TODO: b/290307056 - Implement restore for packed virtqueue,
432     /// add tests to validate.
restore( _queue_value: AnySnapshot, _mem: &GuestMemory, _event: Event, _interrupt: Interrupt, ) -> Result<PackedQueue>433     pub fn restore(
434         _queue_value: AnySnapshot,
435         _mem: &GuestMemory,
436         _event: Event,
437         _interrupt: Interrupt,
438     ) -> Result<PackedQueue> {
439         bail!("Restore for packed virtqueue not implemented.");
440     }
441 }
442