• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::cmp::min;
6 use std::num::Wrapping;
7 use std::sync::atomic::{fence, Ordering};
8 
9 use base::error;
10 use cros_async::{AsyncError, EventAsync};
11 use virtio_sys::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
12 use vm_memory::{GuestAddress, GuestMemory};
13 
14 use super::{SignalableInterrupt, VIRTIO_MSI_NO_VECTOR};
15 
16 const VIRTQ_DESC_F_NEXT: u16 = 0x1;
17 const VIRTQ_DESC_F_WRITE: u16 = 0x2;
18 #[allow(dead_code)]
19 const VIRTQ_DESC_F_INDIRECT: u16 = 0x4;
20 
21 const VIRTQ_USED_F_NO_NOTIFY: u16 = 0x1;
22 #[allow(dead_code)]
23 const VIRTQ_AVAIL_F_NO_INTERRUPT: u16 = 0x1;
24 
25 /// An iterator over a single descriptor chain.  Not to be confused with AvailIter,
26 /// which iterates over the descriptor chain heads in a queue.
27 pub struct DescIter {
28     next: Option<DescriptorChain>,
29 }
30 
31 impl DescIter {
32     /// Returns an iterator that only yields the readable descriptors in the chain.
readable(self) -> impl Iterator<Item = DescriptorChain>33     pub fn readable(self) -> impl Iterator<Item = DescriptorChain> {
34         self.take_while(DescriptorChain::is_read_only)
35     }
36 
37     /// Returns an iterator that only yields the writable descriptors in the chain.
writable(self) -> impl Iterator<Item = DescriptorChain>38     pub fn writable(self) -> impl Iterator<Item = DescriptorChain> {
39         self.skip_while(DescriptorChain::is_read_only)
40     }
41 }
42 
43 impl Iterator for DescIter {
44     type Item = DescriptorChain;
45 
next(&mut self) -> Option<Self::Item>46     fn next(&mut self) -> Option<Self::Item> {
47         if let Some(current) = self.next.take() {
48             self.next = current.next_descriptor();
49             Some(current)
50         } else {
51             None
52         }
53     }
54 }
55 
56 /// A virtio descriptor chain.
57 #[derive(Clone)]
58 pub struct DescriptorChain {
59     mem: GuestMemory,
60     desc_table: GuestAddress,
61     queue_size: u16,
62     ttl: u16, // used to prevent infinite chain cycles
63 
64     /// Index into the descriptor table
65     pub index: u16,
66 
67     /// Guest physical address of device specific data
68     pub addr: GuestAddress,
69 
70     /// Length of device specific data
71     pub len: u32,
72 
73     /// Includes next, write, and indirect bits
74     pub flags: u16,
75 
76     /// Index into the descriptor table of the next descriptor if flags has
77     /// the next bit set
78     pub next: u16,
79 }
80 
81 impl DescriptorChain {
checked_new( mem: &GuestMemory, desc_table: GuestAddress, queue_size: u16, index: u16, required_flags: u16, ) -> Option<DescriptorChain>82     pub(crate) fn checked_new(
83         mem: &GuestMemory,
84         desc_table: GuestAddress,
85         queue_size: u16,
86         index: u16,
87         required_flags: u16,
88     ) -> Option<DescriptorChain> {
89         if index >= queue_size {
90             return None;
91         }
92 
93         let desc_head = mem.checked_offset(desc_table, (index as u64) * 16)?;
94         // These reads can't fail unless Guest memory is hopelessly broken.
95         let addr = GuestAddress(mem.read_obj_from_addr::<u64>(desc_head).unwrap() as u64);
96         mem.checked_offset(desc_head, 16)?;
97         let len: u32 = mem.read_obj_from_addr(desc_head.unchecked_add(8)).unwrap();
98         let flags: u16 = mem.read_obj_from_addr(desc_head.unchecked_add(12)).unwrap();
99         let next: u16 = mem.read_obj_from_addr(desc_head.unchecked_add(14)).unwrap();
100         let chain = DescriptorChain {
101             mem: mem.clone(),
102             desc_table,
103             queue_size,
104             ttl: queue_size,
105             index,
106             addr,
107             len,
108             flags,
109             next,
110         };
111 
112         if chain.is_valid() && chain.flags & required_flags == required_flags {
113             Some(chain)
114         } else {
115             None
116         }
117     }
118 
119     #[allow(clippy::if_same_then_else, clippy::needless_bool)]
is_valid(&self) -> bool120     fn is_valid(&self) -> bool {
121         if self.len > 0
122             && self
123                 .mem
124                 .checked_offset(self.addr, self.len as u64 - 1u64)
125                 .is_none()
126         {
127             false
128         } else if self.has_next() && self.next >= self.queue_size {
129             false
130         } else {
131             true
132         }
133     }
134 
135     /// Gets if this descriptor chain has another descriptor chain linked after it.
has_next(&self) -> bool136     pub fn has_next(&self) -> bool {
137         self.flags & VIRTQ_DESC_F_NEXT != 0 && self.ttl > 1
138     }
139 
140     /// If the driver designated this as a write only descriptor.
141     ///
142     /// If this is false, this descriptor is read only.
143     /// Write only means the the emulated device can write and the driver can read.
is_write_only(&self) -> bool144     pub fn is_write_only(&self) -> bool {
145         self.flags & VIRTQ_DESC_F_WRITE != 0
146     }
147 
148     /// If the driver designated this as a read only descriptor.
149     ///
150     /// If this is false, this descriptor is write only.
151     /// Read only means the emulated device can read and the driver can write.
is_read_only(&self) -> bool152     pub fn is_read_only(&self) -> bool {
153         self.flags & VIRTQ_DESC_F_WRITE == 0
154     }
155 
156     /// Gets the next descriptor in this descriptor chain, if there is one.
157     ///
158     /// Note that this is distinct from the next descriptor chain returned by `AvailIter`, which is
159     /// the head of the next _available_ descriptor chain.
next_descriptor(&self) -> Option<DescriptorChain>160     pub fn next_descriptor(&self) -> Option<DescriptorChain> {
161         if self.has_next() {
162             // Once we see a write-only descriptor, all subsequent descriptors must be write-only.
163             let required_flags = self.flags & VIRTQ_DESC_F_WRITE;
164             DescriptorChain::checked_new(
165                 &self.mem,
166                 self.desc_table,
167                 self.queue_size,
168                 self.next,
169                 required_flags,
170             )
171             .map(|mut c| {
172                 c.ttl = self.ttl - 1;
173                 c
174             })
175         } else {
176             None
177         }
178     }
179 
180     /// Produces an iterator over all the descriptors in this chain.
into_iter(self) -> DescIter181     pub fn into_iter(self) -> DescIter {
182         DescIter { next: Some(self) }
183     }
184 }
185 
186 /// Consuming iterator over all available descriptor chain heads in the queue.
187 pub struct AvailIter<'a, 'b> {
188     mem: &'a GuestMemory,
189     queue: &'b mut Queue,
190 }
191 
192 impl<'a, 'b> Iterator for AvailIter<'a, 'b> {
193     type Item = DescriptorChain;
194 
next(&mut self) -> Option<Self::Item>195     fn next(&mut self) -> Option<Self::Item> {
196         self.queue.pop(self.mem)
197     }
198 }
199 
200 #[derive(Clone)]
201 /// A virtio queue's parameters.
202 pub struct Queue {
203     /// The maximal size in elements offered by the device
204     pub max_size: u16,
205 
206     /// The queue size in elements the driver selected
207     pub size: u16,
208 
209     /// Inidcates if the queue is finished with configuration
210     pub ready: bool,
211 
212     /// MSI-X vector for the queue. Don't care for INTx
213     pub vector: u16,
214 
215     /// Guest physical address of the descriptor table
216     pub desc_table: GuestAddress,
217 
218     /// Guest physical address of the available ring
219     pub avail_ring: GuestAddress,
220 
221     /// Guest physical address of the used ring
222     pub used_ring: GuestAddress,
223 
224     next_avail: Wrapping<u16>,
225     next_used: Wrapping<u16>,
226 
227     // Device feature bits accepted by the driver
228     features: u64,
229     last_used: Wrapping<u16>,
230 
231     // Count of notification disables. Users of the queue can disable guest notification while
232     // processing requests. This is the count of how many are in flight(could be several contexts
233     // handling requests in parallel). When this count is zero, notifications are re-enabled.
234     notification_disable_count: usize,
235 }
236 
237 impl Queue {
238     /// Constructs an empty virtio queue with the given `max_size`.
new(max_size: u16) -> Queue239     pub fn new(max_size: u16) -> Queue {
240         Queue {
241             max_size,
242             size: max_size,
243             ready: false,
244             vector: VIRTIO_MSI_NO_VECTOR,
245             desc_table: GuestAddress(0),
246             avail_ring: GuestAddress(0),
247             used_ring: GuestAddress(0),
248             next_avail: Wrapping(0),
249             next_used: Wrapping(0),
250             features: 0,
251             last_used: Wrapping(0),
252             notification_disable_count: 0,
253         }
254     }
255 
256     /// Return the actual size of the queue, as the driver may not set up a
257     /// queue as big as the device allows.
actual_size(&self) -> u16258     pub fn actual_size(&self) -> u16 {
259         min(self.size, self.max_size)
260     }
261 
262     /// Reset queue to a clean state
reset(&mut self)263     pub fn reset(&mut self) {
264         self.ready = false;
265         self.size = self.max_size;
266         self.vector = VIRTIO_MSI_NO_VECTOR;
267         self.desc_table = GuestAddress(0);
268         self.avail_ring = GuestAddress(0);
269         self.used_ring = GuestAddress(0);
270         self.next_avail = Wrapping(0);
271         self.next_used = Wrapping(0);
272         self.features = 0;
273         self.last_used = Wrapping(0);
274     }
275 
is_valid(&self, mem: &GuestMemory) -> bool276     pub fn is_valid(&self, mem: &GuestMemory) -> bool {
277         let queue_size = self.actual_size() as usize;
278         let desc_table = self.desc_table;
279         let desc_table_size = 16 * queue_size;
280         let avail_ring = self.avail_ring;
281         let avail_ring_size = 6 + 2 * queue_size;
282         let used_ring = self.used_ring;
283         let used_ring_size = 6 + 8 * queue_size;
284         if !self.ready {
285             error!("attempt to use virtio queue that is not marked ready");
286             false
287         } else if self.size > self.max_size || self.size == 0 || (self.size & (self.size - 1)) != 0
288         {
289             error!("virtio queue with invalid size: {}", self.size);
290             false
291         } else if desc_table
292             .checked_add(desc_table_size as u64)
293             .map_or(true, |v| !mem.address_in_range(v))
294         {
295             error!(
296                 "virtio queue descriptor table goes out of bounds: start:0x{:08x} size:0x{:08x}",
297                 desc_table.offset(),
298                 desc_table_size
299             );
300             false
301         } else if avail_ring
302             .checked_add(avail_ring_size as u64)
303             .map_or(true, |v| !mem.address_in_range(v))
304         {
305             error!(
306                 "virtio queue available ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
307                 avail_ring.offset(),
308                 avail_ring_size
309             );
310             false
311         } else if used_ring
312             .checked_add(used_ring_size as u64)
313             .map_or(true, |v| !mem.address_in_range(v))
314         {
315             error!(
316                 "virtio queue used ring goes out of bounds: start:0x{:08x} size:0x{:08x}",
317                 used_ring.offset(),
318                 used_ring_size
319             );
320             false
321         } else {
322             true
323         }
324     }
325 
326     // Get the index of the first available descriptor chain in the available ring
327     // (the next one that the driver will fill).
328     //
329     // All available ring entries between `self.next_avail` and `get_avail_index()` are available
330     // to be processed by the device.
get_avail_index(&self, mem: &GuestMemory) -> Wrapping<u16>331     fn get_avail_index(&self, mem: &GuestMemory) -> Wrapping<u16> {
332         let avail_index_addr = self.avail_ring.unchecked_add(2);
333         let avail_index: u16 = mem.read_obj_from_addr(avail_index_addr).unwrap();
334 
335         // Make sure following reads (e.g. desc_idx) don't pass the avail_index read.
336         fence(Ordering::Acquire);
337 
338         Wrapping(avail_index)
339     }
340 
341     // Set the `avail_event` field in the used ring.
342     //
343     // This allows the device to inform the driver that driver-to-device notification
344     // (kicking the ring) is not necessary until the driver reaches the `avail_index` descriptor.
345     //
346     // This value is only used if the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
set_avail_event(&mut self, mem: &GuestMemory, avail_index: Wrapping<u16>)347     fn set_avail_event(&mut self, mem: &GuestMemory, avail_index: Wrapping<u16>) {
348         let avail_event_addr = self
349             .used_ring
350             .unchecked_add(4 + 8 * u64::from(self.actual_size()));
351         mem.write_obj_at_addr(avail_index.0, avail_event_addr)
352             .unwrap();
353     }
354 
355     // Query the value of a single-bit flag in the available ring.
356     //
357     // Returns `true` if `flag` is currently set (by the driver) in the available ring flags.
358     #[allow(dead_code)]
get_avail_flag(&self, mem: &GuestMemory, flag: u16) -> bool359     fn get_avail_flag(&self, mem: &GuestMemory, flag: u16) -> bool {
360         let avail_flags: u16 = mem.read_obj_from_addr(self.avail_ring).unwrap();
361         avail_flags & flag == flag
362     }
363 
364     // Get the `used_event` field in the available ring.
365     //
366     // The returned value is the index of the next descriptor chain entry for which the driver
367     // needs to be notified upon use.  Entries before this index may be used without notifying
368     // the driver.
369     //
370     // This value is only valid if the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
get_used_event(&self, mem: &GuestMemory) -> Wrapping<u16>371     fn get_used_event(&self, mem: &GuestMemory) -> Wrapping<u16> {
372         let used_event_addr = self
373             .avail_ring
374             .unchecked_add(4 + 2 * u64::from(self.actual_size()));
375         let used_event: u16 = mem.read_obj_from_addr(used_event_addr).unwrap();
376         Wrapping(used_event)
377     }
378 
379     // Set the `idx` field in the used ring.
380     //
381     // This indicates to the driver that all entries up to (but not including) `used_index` have
382     // been used by the device and may be processed by the driver.
set_used_index(&mut self, mem: &GuestMemory, used_index: Wrapping<u16>)383     fn set_used_index(&mut self, mem: &GuestMemory, used_index: Wrapping<u16>) {
384         // This fence ensures all descriptor writes are visible before the index update.
385         fence(Ordering::Release);
386 
387         let used_index_addr = self.used_ring.unchecked_add(2);
388         mem.write_obj_at_addr(used_index.0, used_index_addr)
389             .unwrap();
390     }
391 
392     // Set a single-bit flag in the used ring.
393     //
394     // Changes the bit specified by the mask in `flag` to `value`.
set_used_flag(&mut self, mem: &GuestMemory, flag: u16, value: bool)395     fn set_used_flag(&mut self, mem: &GuestMemory, flag: u16, value: bool) {
396         let mut used_flags: u16 = mem.read_obj_from_addr(self.used_ring).unwrap();
397         if value {
398             used_flags |= flag;
399         } else {
400             used_flags &= !flag;
401         }
402         mem.write_obj_at_addr(used_flags, self.used_ring).unwrap();
403     }
404 
405     /// Get the first available descriptor chain without removing it from the queue.
406     /// Call `pop_peeked` to remove the returned descriptor chain from the queue.
peek(&mut self, mem: &GuestMemory) -> Option<DescriptorChain>407     pub fn peek(&mut self, mem: &GuestMemory) -> Option<DescriptorChain> {
408         if !self.is_valid(mem) {
409             return None;
410         }
411 
412         let queue_size = self.actual_size();
413         let avail_index = self.get_avail_index(mem);
414         let avail_len = avail_index - self.next_avail;
415 
416         if avail_len.0 > queue_size || self.next_avail == avail_index {
417             return None;
418         }
419 
420         let desc_idx_addr_offset = 4 + (u64::from(self.next_avail.0 % queue_size) * 2);
421         let desc_idx_addr = mem.checked_offset(self.avail_ring, desc_idx_addr_offset)?;
422 
423         // This index is checked below in checked_new.
424         let descriptor_index: u16 = mem.read_obj_from_addr(desc_idx_addr).unwrap();
425 
426         DescriptorChain::checked_new(mem, self.desc_table, queue_size, descriptor_index, 0)
427     }
428 
429     /// Remove the first available descriptor chain from the queue.
430     /// This function should only be called immediately following `peek`.
pop_peeked(&mut self, mem: &GuestMemory)431     pub fn pop_peeked(&mut self, mem: &GuestMemory) {
432         self.next_avail += Wrapping(1);
433         if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 {
434             self.set_avail_event(mem, self.next_avail);
435         }
436     }
437 
438     /// If a new DescriptorHead is available, returns one and removes it from the queue.
pop(&mut self, mem: &GuestMemory) -> Option<DescriptorChain>439     pub fn pop(&mut self, mem: &GuestMemory) -> Option<DescriptorChain> {
440         let descriptor_chain = self.peek(mem);
441         if descriptor_chain.is_some() {
442             self.pop_peeked(mem);
443         }
444         descriptor_chain
445     }
446 
447     /// A consuming iterator over all available descriptor chain heads offered by the driver.
iter<'a, 'b>(&'b mut self, mem: &'a GuestMemory) -> AvailIter<'a, 'b>448     pub fn iter<'a, 'b>(&'b mut self, mem: &'a GuestMemory) -> AvailIter<'a, 'b> {
449         AvailIter { mem, queue: self }
450     }
451 
452     /// Asynchronously read the next descriptor chain from the queue.
453     /// Returns a `DescriptorChain` when it is `await`ed.
next_async( &mut self, mem: &GuestMemory, eventfd: &mut EventAsync, ) -> std::result::Result<DescriptorChain, AsyncError>454     pub async fn next_async(
455         &mut self,
456         mem: &GuestMemory,
457         eventfd: &mut EventAsync,
458     ) -> std::result::Result<DescriptorChain, AsyncError> {
459         loop {
460             // Check if there are more descriptors available.
461             if let Some(chain) = self.pop(mem) {
462                 return Ok(chain);
463             }
464             eventfd.next_val().await?;
465         }
466     }
467 
468     /// Puts an available descriptor head into the used ring for use by the guest.
add_used(&mut self, mem: &GuestMemory, desc_index: u16, len: u32)469     pub fn add_used(&mut self, mem: &GuestMemory, desc_index: u16, len: u32) {
470         if desc_index >= self.actual_size() {
471             error!(
472                 "attempted to add out of bounds descriptor to used ring: {}",
473                 desc_index
474             );
475             return;
476         }
477 
478         let used_ring = self.used_ring;
479         let next_used = (self.next_used.0 % self.actual_size()) as usize;
480         let used_elem = used_ring.unchecked_add((4 + next_used * 8) as u64);
481 
482         // These writes can't fail as we are guaranteed to be within the descriptor ring.
483         mem.write_obj_at_addr(desc_index as u32, used_elem).unwrap();
484         mem.write_obj_at_addr(len as u32, used_elem.unchecked_add(4))
485             .unwrap();
486 
487         self.next_used += Wrapping(1);
488         self.set_used_index(mem, self.next_used);
489     }
490 
491     /// Updates the index at which the driver should signal the device next.
update_int_required(&mut self, mem: &GuestMemory)492     pub fn update_int_required(&mut self, mem: &GuestMemory) {
493         self.set_avail_event(mem, self.get_avail_index(mem));
494     }
495 
496     /// Enable / Disable guest notify device that requests are available on
497     /// the descriptor chain.
set_notify(&mut self, mem: &GuestMemory, enable: bool)498     pub fn set_notify(&mut self, mem: &GuestMemory, enable: bool) {
499         if enable {
500             self.notification_disable_count -= 1;
501         } else {
502             self.notification_disable_count += 1;
503         }
504 
505         if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 {
506             self.update_int_required(mem);
507         } else {
508             self.set_used_flag(
509                 mem,
510                 VIRTQ_USED_F_NO_NOTIFY,
511                 self.notification_disable_count > 0,
512             );
513         }
514     }
515 
516     // Check Whether guest enable interrupt injection or not.
available_interrupt_enabled(&self, mem: &GuestMemory) -> bool517     fn available_interrupt_enabled(&self, mem: &GuestMemory) -> bool {
518         if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 {
519             let used_event = self.get_used_event(mem);
520             // if used_event >= self.last_used, driver handle interrupt quickly enough, new
521             // interrupt could be injected.
522             // if used_event < self.last_used, driver hasn't finished the last interrupt,
523             // so no need to inject new interrupt.
524             self.next_used - used_event - Wrapping(1) < self.next_used - self.last_used
525         } else {
526             // TODO(b/172975852): This branch should check the flag that requests interrupt
527             // supression:
528             // ```
529             // !self.get_avail_flag(mem, VIRTQ_AVAIL_F_NO_INTERRUPT)
530             // ```
531             // Re-enable the flag check once the missing interrupt issue is debugged.
532             true
533         }
534     }
535 
536     /// inject interrupt into guest on this queue
537     /// return true: interrupt is injected into guest for this queue
538     ///        false: interrupt isn't injected
trigger_interrupt( &mut self, mem: &GuestMemory, interrupt: &dyn SignalableInterrupt, ) -> bool539     pub fn trigger_interrupt(
540         &mut self,
541         mem: &GuestMemory,
542         interrupt: &dyn SignalableInterrupt,
543     ) -> bool {
544         if self.available_interrupt_enabled(mem) {
545             self.last_used = self.next_used;
546             interrupt.signal_used_queue(self.vector);
547             true
548         } else {
549             false
550         }
551     }
552 
553     /// Acknowledges that this set of features should be enabled on this queue.
ack_features(&mut self, features: u64)554     pub fn ack_features(&mut self, features: u64) {
555         self.features |= features;
556     }
557 }
558 
559 #[cfg(test)]
560 mod tests {
561     use super::super::Interrupt;
562     use super::*;
563     use base::Event;
564     use data_model::{DataInit, Le16, Le32, Le64};
565     use std::convert::TryInto;
566     use std::sync::atomic::AtomicUsize;
567     use std::sync::Arc;
568 
569     const GUEST_MEMORY_SIZE: u64 = 0x10000;
570     const DESC_OFFSET: u64 = 0;
571     const AVAIL_OFFSET: u64 = 0x200;
572     const USED_OFFSET: u64 = 0x400;
573     const QUEUE_SIZE: usize = 0x10;
574     const BUFFER_OFFSET: u64 = 0x8000;
575     const BUFFER_LEN: u32 = 0x400;
576 
577     #[derive(Copy, Clone, Debug)]
578     #[repr(C)]
579     struct Desc {
580         addr: Le64,
581         len: Le32,
582         flags: Le16,
583         next: Le16,
584     }
585     // Safe as this only runs in test
586     unsafe impl DataInit for Desc {}
587 
588     #[derive(Copy, Clone, Debug)]
589     #[repr(C)]
590     struct Avail {
591         flags: Le16,
592         idx: Le16,
593         ring: [Le16; QUEUE_SIZE],
594         used_event: Le16,
595     }
596     // Safe as this only runs in test
597     unsafe impl DataInit for Avail {}
598     impl Default for Avail {
default() -> Self599         fn default() -> Self {
600             Avail {
601                 flags: Le16::from(0u16),
602                 idx: Le16::from(0u16),
603                 ring: [Le16::from(0u16); QUEUE_SIZE],
604                 used_event: Le16::from(0u16),
605             }
606         }
607     }
608 
609     #[derive(Copy, Clone, Debug)]
610     #[repr(C)]
611     struct UsedElem {
612         id: Le32,
613         len: Le32,
614     }
615     // Safe as this only runs in test
616     unsafe impl DataInit for UsedElem {}
617     impl Default for UsedElem {
default() -> Self618         fn default() -> Self {
619             UsedElem {
620                 id: Le32::from(0u32),
621                 len: Le32::from(0u32),
622             }
623         }
624     }
625 
626     #[derive(Copy, Clone, Debug)]
627     #[repr(C)]
628     struct Used {
629         flags: Le16,
630         idx: Le16,
631         used_elem_ring: [UsedElem; QUEUE_SIZE],
632         avail_event: Le16,
633     }
634     // Safe as this only runs in test
635     unsafe impl DataInit for Used {}
636     impl Default for Used {
default() -> Self637         fn default() -> Self {
638             Used {
639                 flags: Le16::from(0u16),
640                 idx: Le16::from(0u16),
641                 used_elem_ring: [UsedElem::default(); QUEUE_SIZE],
642                 avail_event: Le16::from(0u16),
643             }
644         }
645     }
646 
setup_vq(queue: &mut Queue, mem: &GuestMemory)647     fn setup_vq(queue: &mut Queue, mem: &GuestMemory) {
648         let desc = Desc {
649             addr: Le64::from(BUFFER_OFFSET),
650             len: Le32::from(BUFFER_LEN),
651             flags: Le16::from(0u16),
652             next: Le16::from(1u16),
653         };
654         let _ = mem.write_obj_at_addr(desc, GuestAddress(DESC_OFFSET));
655 
656         let avail = Avail::default();
657         let _ = mem.write_obj_at_addr(avail, GuestAddress(AVAIL_OFFSET));
658 
659         let used = Used::default();
660         let _ = mem.write_obj_at_addr(used, GuestAddress(USED_OFFSET));
661 
662         queue.desc_table = GuestAddress(DESC_OFFSET);
663         queue.avail_ring = GuestAddress(AVAIL_OFFSET);
664         queue.used_ring = GuestAddress(USED_OFFSET);
665         queue.ack_features((1u64) << VIRTIO_RING_F_EVENT_IDX);
666     }
667 
668     #[test]
queue_event_id_guest_fast()669     fn queue_event_id_guest_fast() {
670         let mut queue = Queue::new(QUEUE_SIZE.try_into().unwrap());
671         let memory_start_addr = GuestAddress(0x0);
672         let mem = GuestMemory::new(&vec![(memory_start_addr, GUEST_MEMORY_SIZE)]).unwrap();
673         setup_vq(&mut queue, &mem);
674 
675         let interrupt = Interrupt::new(
676             Arc::new(AtomicUsize::new(0)),
677             Event::new().unwrap(),
678             Event::new().unwrap(),
679             None,
680             10,
681         );
682 
683         // Calculating the offset of used_event within Avail structure
684         let used_event_offset: u64 =
685             unsafe { &(*(::std::ptr::null::<Avail>())).used_event as *const _ as u64 };
686         let used_event_address = GuestAddress(AVAIL_OFFSET + used_event_offset);
687 
688         // Assume driver submit 0x100 req to device,
689         // device has handled them, so increase self.next_used to 0x100
690         let mut device_generate: Wrapping<u16> = Wrapping(0x100);
691         for _ in 0..device_generate.0 {
692             queue.add_used(&mem, 0x0, BUFFER_LEN);
693         }
694 
695         // At this moment driver hasn't handled any interrupts yet, so it
696         // should inject interrupt.
697         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
698 
699         // Driver handle all the interrupts and update avail.used_event to 0x100
700         let mut driver_handled = device_generate;
701         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
702 
703         // At this moment driver have handled all the interrupts, and
704         // device doesn't generate more data, so interrupt isn't needed.
705         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
706 
707         // Assume driver submit another u16::MAX - 0x100 req to device,
708         // Device has handled all of them, so increase self.next_used to u16::MAX
709         for _ in device_generate.0..u16::max_value() {
710             queue.add_used(&mem, 0x0, BUFFER_LEN);
711         }
712         device_generate = Wrapping(u16::max_value());
713 
714         // At this moment driver just handled 0x100 interrupts, so it
715         // should inject interrupt.
716         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
717 
718         // driver handle all the interrupts and update avail.used_event to u16::MAX
719         driver_handled = device_generate;
720         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
721 
722         // At this moment driver have handled all the interrupts, and
723         // device doesn't generate more data, so interrupt isn't needed.
724         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
725 
726         // Assume driver submit another 1 request,
727         // device has handled it, so wrap self.next_used to 0
728         queue.add_used(&mem, 0x0, BUFFER_LEN);
729         device_generate += Wrapping(1);
730 
731         // At this moment driver has handled all the previous interrupts, so it
732         // should inject interrupt again.
733         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
734 
735         // driver handle that interrupts and update avail.used_event to 0
736         driver_handled = device_generate;
737         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
738 
739         // At this moment driver have handled all the interrupts, and
740         // device doesn't generate more data, so interrupt isn't needed.
741         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
742     }
743 
744     #[test]
queue_event_id_guest_slow()745     fn queue_event_id_guest_slow() {
746         let mut queue = Queue::new(QUEUE_SIZE.try_into().unwrap());
747         let memory_start_addr = GuestAddress(0x0);
748         let mem = GuestMemory::new(&vec![(memory_start_addr, GUEST_MEMORY_SIZE)]).unwrap();
749         setup_vq(&mut queue, &mem);
750 
751         let interrupt = Interrupt::new(
752             Arc::new(AtomicUsize::new(0)),
753             Event::new().unwrap(),
754             Event::new().unwrap(),
755             None,
756             10,
757         );
758 
759         // Calculating the offset of used_event within Avail structure
760         let used_event_offset: u64 =
761             unsafe { &(*(::std::ptr::null::<Avail>())).used_event as *const _ as u64 };
762         let used_event_address = GuestAddress(AVAIL_OFFSET + used_event_offset);
763 
764         // Assume driver submit 0x100 req to device,
765         // device have handled 0x100 req, so increase self.next_used to 0x100
766         let mut device_generate: Wrapping<u16> = Wrapping(0x100);
767         for _ in 0..device_generate.0 {
768             queue.add_used(&mem, 0x0, BUFFER_LEN);
769         }
770 
771         // At this moment driver hasn't handled any interrupts yet, so it
772         // should inject interrupt.
773         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
774 
775         // Driver handle part of the interrupts and update avail.used_event to 0x80
776         let mut driver_handled = Wrapping(0x80);
777         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
778 
779         // At this moment driver hasn't finished last interrupt yet,
780         // so interrupt isn't needed.
781         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
782 
783         // Assume driver submit another 1 request,
784         // device has handled it, so increment self.next_used.
785         queue.add_used(&mem, 0x0, BUFFER_LEN);
786         device_generate += Wrapping(1);
787 
788         // At this moment driver hasn't finished last interrupt yet,
789         // so interrupt isn't needed.
790         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
791 
792         // Assume driver submit another u16::MAX - 0x101 req to device,
793         // Device has handled all of them, so increase self.next_used to u16::MAX
794         for _ in device_generate.0..u16::max_value() {
795             queue.add_used(&mem, 0x0, BUFFER_LEN);
796         }
797         device_generate = Wrapping(u16::max_value());
798 
799         // At this moment driver hasn't finished last interrupt yet,
800         // so interrupt isn't needed.
801         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
802 
803         // driver handle most of the interrupts and update avail.used_event to u16::MAX - 1,
804         driver_handled = device_generate - Wrapping(1);
805         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
806 
807         // Assume driver submit another 1 request,
808         // device has handled it, so wrap self.next_used to 0
809         queue.add_used(&mem, 0x0, BUFFER_LEN);
810         device_generate += Wrapping(1);
811 
812         // At this moment driver has already finished the last interrupt(0x100),
813         // and device service other request, so new interrupt is needed.
814         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
815 
816         // Assume driver submit another 1 request,
817         // device has handled it, so increment self.next_used to 1
818         queue.add_used(&mem, 0x0, BUFFER_LEN);
819         device_generate += Wrapping(1);
820 
821         // At this moment driver hasn't finished last interrupt((Wrapping(0)) yet,
822         // so interrupt isn't needed.
823         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
824 
825         // driver handle all the remain interrupts and wrap avail.used_event to 0x1.
826         driver_handled = device_generate;
827         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
828 
829         // At this moment driver has handled all the interrupts, and
830         // device doesn't generate more data, so interrupt isn't needed.
831         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
832 
833         // Assume driver submit another 1 request,
834         // device has handled it, so increase self.next_used.
835         queue.add_used(&mem, 0x0, BUFFER_LEN);
836         device_generate += Wrapping(1);
837 
838         // At this moment driver has finished all the previous interrupts, so it
839         // should inject interrupt again.
840         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
841     }
842 }
843