• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::num::Wrapping;
6 use std::sync::atomic::fence;
7 use std::sync::atomic::Ordering;
8 
9 use anyhow::bail;
10 use anyhow::Context;
11 use anyhow::Result;
12 use base::error;
13 use base::Event;
14 use data_model::Le32;
15 use serde::Deserialize;
16 use serde::Serialize;
17 use virtio_sys::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
18 use vm_memory::GuestAddress;
19 use vm_memory::GuestMemory;
20 use zerocopy::AsBytes;
21 use zerocopy::FromBytes;
22 use zerocopy::FromZeroes;
23 
24 use crate::virtio::DescriptorChain;
25 use crate::virtio::Interrupt;
26 use crate::virtio::QueueConfig;
27 use crate::virtio::SplitDescriptorChain;
28 
29 #[allow(dead_code)]
30 const VIRTQ_USED_F_NO_NOTIFY: u16 = 0x1;
31 #[allow(dead_code)]
32 const VIRTQ_AVAIL_F_NO_INTERRUPT: u16 = 0x1;
33 
34 /// An activated virtio queue with split queue layout.
35 #[derive(Debug)]
36 pub struct SplitQueue {
37     mem: GuestMemory,
38 
39     event: Event,
40 
41     /// The queue size in elements the driver selected. This is always guaranteed to be a power of
42     /// two, as required for split virtqueues.
43     size: u16,
44 
45     /// MSI-X vector for the queue. Don't care for INTx
46     vector: u16,
47 
48     /// Guest physical address of the descriptor table
49     desc_table: GuestAddress,
50 
51     /// Guest physical address of the available ring
52     avail_ring: GuestAddress,
53 
54     /// Guest physical address of the used ring
55     used_ring: GuestAddress,
56 
57     next_avail: Wrapping<u16>,
58     next_used: Wrapping<u16>,
59 
60     // Device feature bits accepted by the driver
61     features: u64,
62     last_used: Wrapping<u16>,
63 }
64 
65 #[derive(Serialize, Deserialize)]
66 pub struct SplitQueueSnapshot {
67     size: u16,
68     vector: u16,
69     desc_table: GuestAddress,
70     avail_ring: GuestAddress,
71     used_ring: GuestAddress,
72     next_avail: Wrapping<u16>,
73     next_used: Wrapping<u16>,
74     features: u64,
75     last_used: Wrapping<u16>,
76 }
77 
78 #[repr(C)]
79 #[derive(AsBytes, FromZeroes, FromBytes)]
80 struct virtq_used_elem {
81     id: Le32,
82     len: Le32,
83 }
84 
85 impl SplitQueue {
86     /// Constructs an activated split virtio queue with the given configuration.
new(config: &QueueConfig, mem: &GuestMemory, event: Event) -> Result<SplitQueue>87     pub fn new(config: &QueueConfig, mem: &GuestMemory, event: Event) -> Result<SplitQueue> {
88         let size = config.size();
89         if !size.is_power_of_two() {
90             bail!("split queue size {size} is not a power of 2");
91         }
92 
93         let desc_table = config.desc_table();
94         let avail_ring = config.avail_ring();
95         let used_ring = config.used_ring();
96 
97         // Validate addresses and queue size to ensure that address calculation won't overflow.
98         let ring_sizes = Self::ring_sizes(size, desc_table, avail_ring, used_ring);
99         let rings = ring_sizes
100             .iter()
101             .zip(vec!["descriptor table", "available ring", "used ring"]);
102 
103         for ((addr, size), name) in rings {
104             if addr.checked_add(*size as u64).is_none() {
105                 bail!(
106                     "virtio queue {} goes out of bounds: start:0x{:08x} size:0x{:08x}",
107                     name,
108                     addr.offset(),
109                     size,
110                 );
111             }
112         }
113 
114         Ok(SplitQueue {
115             mem: mem.clone(),
116             event,
117             size,
118             vector: config.vector(),
119             desc_table: config.desc_table(),
120             avail_ring: config.avail_ring(),
121             used_ring: config.used_ring(),
122             features: config.acked_features(),
123             next_avail: config.next_avail(),
124             next_used: config.next_used(),
125             last_used: config.next_used(),
126         })
127     }
128 
129     /// Return the actual size of the queue, as the driver may not set up a
130     /// queue as big as the device allows.
size(&self) -> u16131     pub fn size(&self) -> u16 {
132         self.size
133     }
134 
135     /// Getter for vector field
vector(&self) -> u16136     pub fn vector(&self) -> u16 {
137         self.vector
138     }
139 
140     /// Getter for descriptor area
desc_table(&self) -> GuestAddress141     pub fn desc_table(&self) -> GuestAddress {
142         self.desc_table
143     }
144 
145     /// Getter for driver area
avail_ring(&self) -> GuestAddress146     pub fn avail_ring(&self) -> GuestAddress {
147         self.avail_ring
148     }
149 
150     /// Getter for device area
used_ring(&self) -> GuestAddress151     pub fn used_ring(&self) -> GuestAddress {
152         self.used_ring
153     }
154 
155     /// Get a reference to the queue's "kick event"
event(&self) -> &Event156     pub fn event(&self) -> &Event {
157         &self.event
158     }
159 
160     // Return `index` modulo the currently configured queue size.
wrap_queue_index(&self, index: Wrapping<u16>) -> u16161     fn wrap_queue_index(&self, index: Wrapping<u16>) -> u16 {
162         // We know that `self.size` is a power of two (enforced by `new()`), so the modulus can
163         // be calculated with a bitmask rather than actual division.
164         debug_assert!(self.size.is_power_of_two());
165         index.0 & self.size.wrapping_sub(1)
166     }
167 
ring_sizes( queue_size: u16, desc_table: GuestAddress, avail_ring: GuestAddress, used_ring: GuestAddress, ) -> Vec<(GuestAddress, usize)>168     fn ring_sizes(
169         queue_size: u16,
170         desc_table: GuestAddress,
171         avail_ring: GuestAddress,
172         used_ring: GuestAddress,
173     ) -> Vec<(GuestAddress, usize)> {
174         let queue_size = queue_size as usize;
175         vec![
176             (desc_table, 16 * queue_size),
177             (avail_ring, 6 + 2 * queue_size),
178             (used_ring, 6 + 8 * queue_size),
179         ]
180     }
181 
182     // Get the index of the first available descriptor chain in the available ring
183     // (the next one that the driver will fill).
184     //
185     // All available ring entries between `self.next_avail` and `get_avail_index()` are available
186     // to be processed by the device.
get_avail_index(&self) -> Wrapping<u16>187     fn get_avail_index(&self) -> Wrapping<u16> {
188         fence(Ordering::SeqCst);
189 
190         let avail_index_addr = self.avail_ring.unchecked_add(2);
191         let avail_index: u16 = self
192             .mem
193             .read_obj_from_addr_volatile(avail_index_addr)
194             .unwrap();
195 
196         Wrapping(avail_index)
197     }
198 
199     // Set the `avail_event` field in the used ring.
200     //
201     // This allows the device to inform the driver that driver-to-device notification
202     // (kicking the ring) is not necessary until the driver reaches the `avail_index` descriptor.
203     //
204     // This value is only used if the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
set_avail_event(&mut self, avail_index: Wrapping<u16>)205     fn set_avail_event(&mut self, avail_index: Wrapping<u16>) {
206         fence(Ordering::SeqCst);
207 
208         let avail_event_addr = self.used_ring.unchecked_add(4 + 8 * u64::from(self.size));
209         self.mem
210             .write_obj_at_addr_volatile(avail_index.0, avail_event_addr)
211             .unwrap();
212     }
213 
214     // Query the value of a single-bit flag in the available ring.
215     //
216     // Returns `true` if `flag` is currently set (by the driver) in the available ring flags.
get_avail_flag(&self, flag: u16) -> bool217     fn get_avail_flag(&self, flag: u16) -> bool {
218         fence(Ordering::SeqCst);
219 
220         let avail_flags: u16 = self
221             .mem
222             .read_obj_from_addr_volatile(self.avail_ring)
223             .unwrap();
224 
225         avail_flags & flag == flag
226     }
227 
228     // Get the `used_event` field in the available ring.
229     //
230     // The returned value is the index of the next descriptor chain entry for which the driver
231     // needs to be notified upon use.  Entries before this index may be used without notifying
232     // the driver.
233     //
234     // This value is only valid if the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
get_used_event(&self) -> Wrapping<u16>235     fn get_used_event(&self) -> Wrapping<u16> {
236         fence(Ordering::SeqCst);
237 
238         let used_event_addr = self.avail_ring.unchecked_add(4 + 2 * u64::from(self.size));
239         let used_event: u16 = self
240             .mem
241             .read_obj_from_addr_volatile(used_event_addr)
242             .unwrap();
243 
244         Wrapping(used_event)
245     }
246 
247     // Set the `idx` field in the used ring.
248     //
249     // This indicates to the driver that all entries up to (but not including) `used_index` have
250     // been used by the device and may be processed by the driver.
set_used_index(&mut self, used_index: Wrapping<u16>)251     fn set_used_index(&mut self, used_index: Wrapping<u16>) {
252         fence(Ordering::SeqCst);
253 
254         let used_index_addr = self.used_ring.unchecked_add(2);
255         self.mem
256             .write_obj_at_addr_volatile(used_index.0, used_index_addr)
257             .unwrap();
258     }
259 
260     /// Get the first available descriptor chain without removing it from the queue.
261     /// Call `pop_peeked` to remove the returned descriptor chain from the queue.
peek(&mut self) -> Option<DescriptorChain>262     pub fn peek(&mut self) -> Option<DescriptorChain> {
263         let avail_index = self.get_avail_index();
264         if self.next_avail == avail_index {
265             return None;
266         }
267 
268         // This fence ensures that subsequent reads from the descriptor do not
269         // get reordered and happen only after fetching the available_index and
270         // checking that there is a slot available.
271         fence(Ordering::SeqCst);
272 
273         let desc_idx_addr_offset = 4 + (u64::from(self.wrap_queue_index(self.next_avail)) * 2);
274         let desc_idx_addr = self.avail_ring.checked_add(desc_idx_addr_offset)?;
275 
276         // This index is checked below in checked_new.
277         let descriptor_index: u16 = self.mem.read_obj_from_addr_volatile(desc_idx_addr).unwrap();
278 
279         let chain =
280             SplitDescriptorChain::new(&self.mem, self.desc_table, self.size, descriptor_index);
281         DescriptorChain::new(chain, &self.mem, descriptor_index)
282             .map_err(|e| {
283                 error!("{:#}", e);
284                 e
285             })
286             .ok()
287     }
288 
289     /// Remove the first available descriptor chain from the queue.
290     /// This function should only be called immediately following `peek` and must be passed a
291     /// reference to the same `DescriptorChain` returned by the most recent `peek`.
pop_peeked(&mut self, _descriptor_chain: &DescriptorChain)292     pub(super) fn pop_peeked(&mut self, _descriptor_chain: &DescriptorChain) {
293         self.next_avail += Wrapping(1);
294         if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 {
295             self.set_avail_event(self.next_avail);
296         }
297     }
298 
299     /// Puts an available descriptor head into the used ring for use by the guest.
add_used(&mut self, desc_chain: DescriptorChain, len: u32)300     pub fn add_used(&mut self, desc_chain: DescriptorChain, len: u32) {
301         let desc_index = desc_chain.index();
302         debug_assert!(desc_index < self.size);
303 
304         let used_ring = self.used_ring;
305         let next_used = self.wrap_queue_index(self.next_used) as usize;
306         let used_elem = used_ring.unchecked_add((4 + next_used * 8) as u64);
307 
308         let elem = virtq_used_elem {
309             id: Le32::from(u32::from(desc_index)),
310             len: Le32::from(len),
311         };
312 
313         // This write can't fail as we are guaranteed to be within the descriptor ring.
314         self.mem
315             .write_obj_at_addr_volatile(elem, used_elem)
316             .unwrap();
317 
318         self.next_used += Wrapping(1);
319         self.set_used_index(self.next_used);
320     }
321 
322     /// Returns if the queue should have an interrupt sent based on its state.
323     ///
324     /// This function implements `VIRTIO_RING_F_EVENT_IDX`, otherwise known as
325     /// interrupt suppression. The virtio spec provides the driver with a field,
326     /// `used_event`, which says that once we write that descriptor (or several
327     /// in the case of a flurry of `add_used` calls), we should send a
328     /// notification. Because the values involved wrap around `u16::MAX`, and to
329     /// avoid checking the condition on every `add_used` call, the math is a
330     /// little complicated.
331     ///
332     /// The critical inequality is:
333     /// ```text
334     ///      (next_used - 1) - used_event < next_used - last_used
335     /// ```
336     ///
337     /// For illustration purposes, we label it as `A < B`, where
338     /// `A = (next_used -1) - used_event`, and `B = next_used - last_used`.
339     ///
340     /// `A` and `B` represent two distances, measured in a wrapping ring of size
341     /// `u16::MAX`. In the "send intr" case, the inequality is true. In the
342     /// "don't send intr" case, the inequality is false. We must be very careful
343     /// in assigning a direction to the ring, so that when we
344     /// graph the subtraction operations, we are measuring the right distance
345     /// (similar to how DC circuits are analyzed).
346     ///
347     /// The two distances are as follows:
348     ///  * `A` is the distance between the driver's requested notification point, and the current
349     ///    position in the ring.
350     ///
351     ///  * `B` is the distance between the last time we notified the guest, and the current position
352     ///    in the ring.
353     ///
354     /// If we graph these distances for the situation where we want to notify
355     /// the guest, and when we don't want to notify the guest, we see that
356     /// `A < B` becomes true the moment `next_used - 1` passes `used_event`. See
357     /// the graphs at the bottom of this comment block for a more visual
358     /// explanation.
359     ///
360     /// Once an interrupt is sent, we have a final useful property: last_used
361     /// moves up next_used, which causes the inequality to be false. Thus, we
362     /// won't send notifications again until `used_event` is moved forward by
363     /// the driver.
364     ///
365     /// Finally, let's talk about a couple of ways to write this inequality
366     /// that don't work, and critically, explain *why*.
367     ///
368     /// First, a naive reading of the virtio spec might lead us to ask: why not
369     /// just use the following inequality:
370     /// ```text
371     ///      next_used - 1 >= used_event
372     /// ```
373     ///
374     /// because that's much simpler, right? The trouble is that the ring wraps,
375     /// so it could be that a smaller index is actually ahead of a larger one.
376     /// That's why we have to use distances in the ring instead.
377     ///
378     /// Second, one might look at the correct inequality:
379     /// ```text
380     ///      (next_used - 1) - used_event < next_used - last_used
381     /// ```
382     ///
383     /// And try to simplify it to:
384     /// ```text
385     ///      last_used - 1 < used_event
386     /// ```
387     ///
388     /// Functionally, this won't work because next_used isn't present at all
389     /// anymore. (Notifications will never be sent.) But why is that? The algebra
390     /// here *appears* to work out, but all semantic meaning is lost. There are
391     /// two explanations for why this happens:
392     /// * The intuitive one: the terms in the inequality are not actually separable; in other words,
393     ///   (next_used - last_used) is an inseparable term, so subtracting next_used from both sides
394     ///   of the original inequality and zeroing them out is semantically invalid. But why aren't
395     ///   they separable? See below.
396     /// * The theoretical one: canceling like terms relies a vector space law: a + x = b + x => a =
397     ///   b (cancellation law). For congruences / equality under modulo, this law is satisfied, but
398     ///   for inequalities under mod, it is not; therefore, we cannot cancel like terms.
399     ///
400     /// ```text
401     /// ┌──────────────────────────────────┐
402     /// │                                  │
403     /// │                                  │
404     /// │                                  │
405     /// │           ┌────────────  next_used - 1
406     /// │           │A                   x
407     /// │           │       ┌────────────x────────────┐
408     /// │           │       │            x            │
409     /// │           │       │                         │
410     /// │           │       │               │         │
411     /// │           │       │               │         │
412     /// │     used_event  xxxx        + ◄───┘       xxxxx last_used
413     /// │                   │                         │      │
414     /// │                   │        Send intr        │      │
415     /// │                   │                         │      │
416     /// │                   └─────────────────────────┘      │
417     /// │                                                    │
418     /// │ B                                                  │
419     /// └────────────────────────────────────────────────────┘
420     ///
421     ///             ┌───────────────────────────────────────────────────┐
422     ///             │                                                 A │
423     ///             │       ┌────────────────────────┐                  │
424     ///             │       │                        │                  │
425     ///             │       │                        │                  │
426     ///             │       │              │         │                  │
427     ///             │       │              │         │                  │
428     ///       used_event  xxxx             │       xxxxx last_used      │
429     ///                     │        + ◄───┘         │       │          │
430     ///                     │                        │       │          │
431     ///                     │     Don't send intr    │       │          │
432     ///                     │                        │       │          │
433     ///                     └───────────x────────────┘       │          │
434     ///                                 x                    │          │
435     ///                              next_used - 1           │          │
436     ///                              │  │                  B │          │
437     ///                              │  └────────────────────┘          │
438     ///                              │                                  │
439     ///                              └──────────────────────────────────┘
440     /// ```
queue_wants_interrupt(&self) -> bool441     fn queue_wants_interrupt(&self) -> bool {
442         if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 {
443             let used_event = self.get_used_event();
444             self.next_used - used_event - Wrapping(1) < self.next_used - self.last_used
445         } else {
446             !self.get_avail_flag(VIRTQ_AVAIL_F_NO_INTERRUPT)
447         }
448     }
449 
450     /// inject interrupt into guest on this queue
451     /// return true: interrupt is injected into guest for this queue
452     ///        false: interrupt isn't injected
trigger_interrupt(&mut self, interrupt: &Interrupt) -> bool453     pub fn trigger_interrupt(&mut self, interrupt: &Interrupt) -> bool {
454         if self.queue_wants_interrupt() {
455             self.last_used = self.next_used;
456             interrupt.signal_used_queue(self.vector);
457             true
458         } else {
459             false
460         }
461     }
462 
snapshot(&self) -> anyhow::Result<serde_json::Value>463     pub fn snapshot(&self) -> anyhow::Result<serde_json::Value> {
464         serde_json::to_value(SplitQueueSnapshot {
465             size: self.size,
466             vector: self.vector,
467             desc_table: self.desc_table,
468             avail_ring: self.avail_ring,
469             used_ring: self.used_ring,
470             next_avail: self.next_avail,
471             next_used: self.next_used,
472             features: self.features,
473             last_used: self.last_used,
474         })
475         .context("failed to serialize MsixConfigSnapshot")
476     }
477 
restore( queue_value: serde_json::Value, mem: &GuestMemory, event: Event, ) -> anyhow::Result<SplitQueue>478     pub fn restore(
479         queue_value: serde_json::Value,
480         mem: &GuestMemory,
481         event: Event,
482     ) -> anyhow::Result<SplitQueue> {
483         let s: SplitQueueSnapshot = serde_json::from_value(queue_value)?;
484         let queue = SplitQueue {
485             mem: mem.clone(),
486             event,
487             size: s.size,
488             vector: s.vector,
489             desc_table: s.desc_table,
490             avail_ring: s.avail_ring,
491             used_ring: s.used_ring,
492             next_avail: s.next_avail,
493             next_used: s.next_used,
494             features: s.features,
495             last_used: s.last_used,
496         };
497         Ok(queue)
498     }
499 }
500 
501 #[cfg(test)]
502 mod tests {
503     use std::convert::TryInto;
504 
505     use data_model::Le16;
506     use data_model::Le32;
507     use data_model::Le64;
508     use memoffset::offset_of;
509     use zerocopy::AsBytes;
510     use zerocopy::FromBytes;
511 
512     use super::*;
513     use crate::virtio::create_descriptor_chain;
514     use crate::virtio::Desc;
515     use crate::virtio::Interrupt;
516     use crate::virtio::Queue;
517     use crate::IrqLevelEvent;
518 
519     const GUEST_MEMORY_SIZE: u64 = 0x10000;
520     const DESC_OFFSET: u64 = 0;
521     const AVAIL_OFFSET: u64 = 0x200;
522     const USED_OFFSET: u64 = 0x400;
523     const QUEUE_SIZE: usize = 0x10;
524     const BUFFER_OFFSET: u64 = 0x8000;
525     const BUFFER_LEN: u32 = 0x400;
526 
527     #[derive(Copy, Clone, Debug, FromZeroes, FromBytes, AsBytes)]
528     #[repr(C)]
529     struct Avail {
530         flags: Le16,
531         idx: Le16,
532         ring: [Le16; QUEUE_SIZE],
533         used_event: Le16,
534     }
535 
536     impl Default for Avail {
default() -> Self537         fn default() -> Self {
538             Avail {
539                 flags: Le16::from(0u16),
540                 idx: Le16::from(0u16),
541                 ring: [Le16::from(0u16); QUEUE_SIZE],
542                 used_event: Le16::from(0u16),
543             }
544         }
545     }
546 
547     #[derive(Copy, Clone, Debug, FromZeroes, FromBytes, AsBytes)]
548     #[repr(C)]
549     struct UsedElem {
550         id: Le32,
551         len: Le32,
552     }
553 
554     impl Default for UsedElem {
default() -> Self555         fn default() -> Self {
556             UsedElem {
557                 id: Le32::from(0u32),
558                 len: Le32::from(0u32),
559             }
560         }
561     }
562 
563     #[derive(Copy, Clone, Debug, FromZeroes, FromBytes, AsBytes)]
564     #[repr(C, packed)]
565     struct Used {
566         flags: Le16,
567         idx: Le16,
568         used_elem_ring: [UsedElem; QUEUE_SIZE],
569         avail_event: Le16,
570     }
571 
572     impl Default for Used {
default() -> Self573         fn default() -> Self {
574             Used {
575                 flags: Le16::from(0u16),
576                 idx: Le16::from(0u16),
577                 used_elem_ring: [UsedElem::default(); QUEUE_SIZE],
578                 avail_event: Le16::from(0u16),
579             }
580         }
581     }
582 
setup_vq(queue: &mut QueueConfig, mem: &GuestMemory) -> Queue583     fn setup_vq(queue: &mut QueueConfig, mem: &GuestMemory) -> Queue {
584         let desc = Desc {
585             addr: Le64::from(BUFFER_OFFSET),
586             len: Le32::from(BUFFER_LEN),
587             flags: Le16::from(0u16),
588             next: Le16::from(1u16),
589         };
590         let _ = mem.write_obj_at_addr(desc, GuestAddress(DESC_OFFSET));
591 
592         let avail = Avail::default();
593         let _ = mem.write_obj_at_addr(avail, GuestAddress(AVAIL_OFFSET));
594 
595         let used = Used::default();
596         let _ = mem.write_obj_at_addr(used, GuestAddress(USED_OFFSET));
597 
598         queue.set_desc_table(GuestAddress(DESC_OFFSET));
599         queue.set_avail_ring(GuestAddress(AVAIL_OFFSET));
600         queue.set_used_ring(GuestAddress(USED_OFFSET));
601         queue.ack_features((1u64) << VIRTIO_RING_F_EVENT_IDX);
602         queue.set_ready(true);
603 
604         queue
605             .activate(mem, Event::new().unwrap())
606             .expect("QueueConfig::activate failed")
607     }
608 
fake_desc_chain(mem: &GuestMemory) -> DescriptorChain609     fn fake_desc_chain(mem: &GuestMemory) -> DescriptorChain {
610         create_descriptor_chain(mem, GuestAddress(0), GuestAddress(0), Vec::new(), 0)
611             .expect("failed to create descriptor chain")
612     }
613 
614     #[test]
queue_event_id_guest_fast()615     fn queue_event_id_guest_fast() {
616         let mut queue =
617             QueueConfig::new(QUEUE_SIZE.try_into().unwrap(), 1 << VIRTIO_RING_F_EVENT_IDX);
618         let memory_start_addr = GuestAddress(0x0);
619         let mem = GuestMemory::new(&[(memory_start_addr, GUEST_MEMORY_SIZE)]).unwrap();
620         let mut queue = setup_vq(&mut queue, &mem);
621 
622         let interrupt = Interrupt::new(
623             IrqLevelEvent::new().unwrap(),
624             None,
625             10,
626             #[cfg(target_arch = "x86_64")]
627             None,
628         );
629 
630         // Offset of used_event within Avail structure
631         let used_event_offset = offset_of!(Avail, used_event) as u64;
632         let used_event_address = GuestAddress(AVAIL_OFFSET + used_event_offset);
633 
634         // Assume driver submit 0x100 req to device,
635         // device has handled them, so increase self.next_used to 0x100
636         let mut device_generate: Wrapping<u16> = Wrapping(0x100);
637         for _ in 0..device_generate.0 {
638             queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
639         }
640 
641         // At this moment driver hasn't handled any interrupts yet, so it
642         // should inject interrupt.
643         assert_eq!(queue.trigger_interrupt(&interrupt), true);
644 
645         // Driver handle all the interrupts and update avail.used_event to 0x100
646         let mut driver_handled = device_generate;
647         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
648 
649         // At this moment driver have handled all the interrupts, and
650         // device doesn't generate more data, so interrupt isn't needed.
651         assert_eq!(queue.trigger_interrupt(&interrupt), false);
652 
653         // Assume driver submit another u16::MAX - 0x100 req to device,
654         // Device has handled all of them, so increase self.next_used to u16::MAX
655         for _ in device_generate.0..u16::max_value() {
656             queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
657         }
658         device_generate = Wrapping(u16::max_value());
659 
660         // At this moment driver just handled 0x100 interrupts, so it
661         // should inject interrupt.
662         assert_eq!(queue.trigger_interrupt(&interrupt), true);
663 
664         // driver handle all the interrupts and update avail.used_event to u16::MAX
665         driver_handled = device_generate;
666         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
667 
668         // At this moment driver have handled all the interrupts, and
669         // device doesn't generate more data, so interrupt isn't needed.
670         assert_eq!(queue.trigger_interrupt(&interrupt), false);
671 
672         // Assume driver submit another 1 request,
673         // device has handled it, so wrap self.next_used to 0
674         queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
675         device_generate += Wrapping(1);
676 
677         // At this moment driver has handled all the previous interrupts, so it
678         // should inject interrupt again.
679         assert_eq!(queue.trigger_interrupt(&interrupt), true);
680 
681         // driver handle that interrupts and update avail.used_event to 0
682         driver_handled = device_generate;
683         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
684 
685         // At this moment driver have handled all the interrupts, and
686         // device doesn't generate more data, so interrupt isn't needed.
687         assert_eq!(queue.trigger_interrupt(&interrupt), false);
688     }
689 
690     #[test]
queue_event_id_guest_slow()691     fn queue_event_id_guest_slow() {
692         let mut queue =
693             QueueConfig::new(QUEUE_SIZE.try_into().unwrap(), 1 << VIRTIO_RING_F_EVENT_IDX);
694         let memory_start_addr = GuestAddress(0x0);
695         let mem = GuestMemory::new(&[(memory_start_addr, GUEST_MEMORY_SIZE)]).unwrap();
696         let mut queue = setup_vq(&mut queue, &mem);
697 
698         let interrupt = Interrupt::new(
699             IrqLevelEvent::new().unwrap(),
700             None,
701             10,
702             #[cfg(target_arch = "x86_64")]
703             None,
704         );
705 
706         // Offset of used_event within Avail structure
707         let used_event_offset = offset_of!(Avail, used_event) as u64;
708         let used_event_address = GuestAddress(AVAIL_OFFSET + used_event_offset);
709 
710         // Assume driver submit 0x100 req to device,
711         // device have handled 0x100 req, so increase self.next_used to 0x100
712         let mut device_generate: Wrapping<u16> = Wrapping(0x100);
713         for _ in 0..device_generate.0 {
714             queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
715         }
716 
717         // At this moment driver hasn't handled any interrupts yet, so it
718         // should inject interrupt.
719         assert_eq!(queue.trigger_interrupt(&interrupt), true);
720 
721         // Driver handle part of the interrupts and update avail.used_event to 0x80
722         let mut driver_handled = Wrapping(0x80);
723         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
724 
725         // At this moment driver hasn't finished last interrupt yet,
726         // so interrupt isn't needed.
727         assert_eq!(queue.trigger_interrupt(&interrupt), false);
728 
729         // Assume driver submit another 1 request,
730         // device has handled it, so increment self.next_used.
731         queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
732         device_generate += Wrapping(1);
733 
734         // At this moment driver hasn't finished last interrupt yet,
735         // so interrupt isn't needed.
736         assert_eq!(queue.trigger_interrupt(&interrupt), false);
737 
738         // Assume driver submit another u16::MAX - 0x101 req to device,
739         // Device has handled all of them, so increase self.next_used to u16::MAX
740         for _ in device_generate.0..u16::max_value() {
741             queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
742         }
743         device_generate = Wrapping(u16::max_value());
744 
745         // At this moment driver hasn't finished last interrupt yet,
746         // so interrupt isn't needed.
747         assert_eq!(queue.trigger_interrupt(&interrupt), false);
748 
749         // driver handle most of the interrupts and update avail.used_event to u16::MAX - 1,
750         driver_handled = device_generate - Wrapping(1);
751         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
752 
753         // Assume driver submit another 1 request,
754         // device has handled it, so wrap self.next_used to 0
755         queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
756         device_generate += Wrapping(1);
757 
758         // At this moment driver has already finished the last interrupt(0x100),
759         // and device service other request, so new interrupt is needed.
760         assert_eq!(queue.trigger_interrupt(&interrupt), true);
761 
762         // Assume driver submit another 1 request,
763         // device has handled it, so increment self.next_used to 1
764         queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
765         device_generate += Wrapping(1);
766 
767         // At this moment driver hasn't finished last interrupt((Wrapping(0)) yet,
768         // so interrupt isn't needed.
769         assert_eq!(queue.trigger_interrupt(&interrupt), false);
770 
771         // driver handle all the remain interrupts and wrap avail.used_event to 0x1.
772         driver_handled = device_generate;
773         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
774 
775         // At this moment driver has handled all the interrupts, and
776         // device doesn't generate more data, so interrupt isn't needed.
777         assert_eq!(queue.trigger_interrupt(&interrupt), false);
778 
779         // Assume driver submit another 1 request,
780         // device has handled it, so increase self.next_used.
781         queue.add_used(fake_desc_chain(&mem), BUFFER_LEN);
782         device_generate += Wrapping(1);
783 
784         // At this moment driver has finished all the previous interrupts, so it
785         // should inject interrupt again.
786         assert_eq!(queue.trigger_interrupt(&interrupt), true);
787     }
788 }
789