• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::cmp::min;
6 use std::convert::TryInto;
7 use std::num::Wrapping;
8 use std::sync::atomic::{fence, Ordering};
9 use std::sync::Arc;
10 use sync::Mutex;
11 
12 use anyhow::{bail, Context};
13 use base::error;
14 use cros_async::{AsyncError, EventAsync};
15 use data_model::{DataInit, Le16, Le32, Le64};
16 use virtio_sys::virtio_ring::VIRTIO_RING_F_EVENT_IDX;
17 use vm_memory::{GuestAddress, GuestMemory};
18 
19 use super::{SignalableInterrupt, VIRTIO_MSI_NO_VECTOR};
20 use crate::virtio::ipc_memory_mapper::IpcMemoryMapper;
21 use crate::virtio::memory_mapper::{MemRegion, Permission, Translate};
22 use crate::virtio::memory_util::{
23     is_valid_wrapper, read_obj_from_addr_wrapper, write_obj_at_addr_wrapper,
24 };
25 
26 const VIRTQ_DESC_F_NEXT: u16 = 0x1;
27 const VIRTQ_DESC_F_WRITE: u16 = 0x2;
28 #[allow(dead_code)]
29 const VIRTQ_DESC_F_INDIRECT: u16 = 0x4;
30 
31 const VIRTQ_USED_F_NO_NOTIFY: u16 = 0x1;
32 #[allow(dead_code)]
33 const VIRTQ_AVAIL_F_NO_INTERRUPT: u16 = 0x1;
34 
35 /// An iterator over a single descriptor chain.  Not to be confused with AvailIter,
36 /// which iterates over the descriptor chain heads in a queue.
37 pub struct DescIter {
38     next: Option<DescriptorChain>,
39 }
40 
41 impl DescIter {
42     /// Returns an iterator that only yields the readable descriptors in the chain.
readable(self) -> impl Iterator<Item = DescriptorChain>43     pub fn readable(self) -> impl Iterator<Item = DescriptorChain> {
44         self.take_while(DescriptorChain::is_read_only)
45     }
46 
47     /// Returns an iterator that only yields the writable descriptors in the chain.
writable(self) -> impl Iterator<Item = DescriptorChain>48     pub fn writable(self) -> impl Iterator<Item = DescriptorChain> {
49         self.skip_while(DescriptorChain::is_read_only)
50     }
51 }
52 
53 impl Iterator for DescIter {
54     type Item = DescriptorChain;
55 
next(&mut self) -> Option<Self::Item>56     fn next(&mut self) -> Option<Self::Item> {
57         if let Some(current) = self.next.take() {
58             self.next = current.next_descriptor();
59             Some(current)
60         } else {
61             None
62         }
63     }
64 }
65 
66 /// A virtio descriptor chain.
67 #[derive(Clone)]
68 pub struct DescriptorChain {
69     mem: GuestMemory,
70     desc_table: GuestAddress,
71     queue_size: u16,
72     ttl: u16, // used to prevent infinite chain cycles
73 
74     /// Index into the descriptor table
75     pub index: u16,
76 
77     /// Guest physical address of device specific data, or IO virtual address
78     /// if iommu is used
79     pub addr: GuestAddress,
80 
81     /// Length of device specific data
82     pub len: u32,
83 
84     /// Includes next, write, and indirect bits
85     pub flags: u16,
86 
87     /// Index into the descriptor table of the next descriptor if flags has
88     /// the next bit set
89     pub next: u16,
90 
91     /// Translates `addr` to guest physical address
92     iommu: Option<Arc<Mutex<IpcMemoryMapper>>>,
93 }
94 
95 #[derive(Copy, Clone, Debug)]
96 #[repr(C)]
97 pub struct Desc {
98     pub addr: Le64,
99     pub len: Le32,
100     pub flags: Le16,
101     pub next: Le16,
102 }
103 // Safe because it only has data and has no implicit padding.
104 unsafe impl DataInit for Desc {}
105 
106 impl DescriptorChain {
checked_new( mem: &GuestMemory, desc_table: GuestAddress, queue_size: u16, index: u16, required_flags: u16, iommu: Option<Arc<Mutex<IpcMemoryMapper>>>, ) -> anyhow::Result<DescriptorChain>107     pub(crate) fn checked_new(
108         mem: &GuestMemory,
109         desc_table: GuestAddress,
110         queue_size: u16,
111         index: u16,
112         required_flags: u16,
113         iommu: Option<Arc<Mutex<IpcMemoryMapper>>>,
114     ) -> anyhow::Result<DescriptorChain> {
115         if index >= queue_size {
116             bail!("index ({}) >= queue_size ({})", index, queue_size);
117         }
118 
119         let desc_head = desc_table
120             .checked_add((index as u64) * 16)
121             .context("integer overflow")?;
122         let desc: Desc = read_obj_from_addr_wrapper::<Desc>(mem, &iommu, desc_head)
123             .context("failed to read desc")?;
124         let chain = DescriptorChain {
125             mem: mem.clone(),
126             desc_table,
127             queue_size,
128             ttl: queue_size,
129             index,
130             addr: GuestAddress(desc.addr.into()),
131             len: desc.len.into(),
132             flags: desc.flags.into(),
133             next: desc.next.into(),
134             iommu,
135         };
136 
137         if chain.is_valid() && chain.flags & required_flags == required_flags {
138             Ok(chain)
139         } else {
140             bail!("chain is invalid")
141         }
142     }
143 
144     /// Get the mem region(s), regardless if the `DescriptorChain`s contain gpa (guest physical
145     /// address), or iova (io virtual address) and iommu.
get_mem_regions(&self) -> anyhow::Result<Vec<MemRegion>>146     pub fn get_mem_regions(&self) -> anyhow::Result<Vec<MemRegion>> {
147         if let Some(iommu) = &self.iommu {
148             iommu
149                 .lock()
150                 .translate(self.addr.offset(), self.len as u64)
151                 .context("failed to get mem regions")
152         } else {
153             Ok(vec![MemRegion {
154                 gpa: self.addr,
155                 len: self.len.try_into().expect("u32 doesn't fit in usize"),
156                 perm: Permission::RW,
157             }])
158         }
159     }
160 
is_valid(&self) -> bool161     fn is_valid(&self) -> bool {
162         if self.len > 0 {
163             match self.get_mem_regions() {
164                 Ok(regions) => {
165                     // Each region in `self.regions` must be a contiguous range in `self.mem`.
166                     if !regions.iter().all(|r| self.mem.is_valid_range(r.gpa, r.len as u64)) {
167                         return false;
168                     }
169                 }
170                 Err(e) => {
171                     error!("{:#}", e);
172                     return false;
173                 }
174             }
175         }
176 
177         !self.has_next() || self.next < self.queue_size
178     }
179 
180     /// Gets if this descriptor chain has another descriptor chain linked after it.
has_next(&self) -> bool181     pub fn has_next(&self) -> bool {
182         self.flags & VIRTQ_DESC_F_NEXT != 0 && self.ttl > 1
183     }
184 
185     /// If the driver designated this as a write only descriptor.
186     ///
187     /// If this is false, this descriptor is read only.
188     /// Write only means the the emulated device can write and the driver can read.
is_write_only(&self) -> bool189     pub fn is_write_only(&self) -> bool {
190         self.flags & VIRTQ_DESC_F_WRITE != 0
191     }
192 
193     /// If the driver designated this as a read only descriptor.
194     ///
195     /// If this is false, this descriptor is write only.
196     /// Read only means the emulated device can read and the driver can write.
is_read_only(&self) -> bool197     pub fn is_read_only(&self) -> bool {
198         self.flags & VIRTQ_DESC_F_WRITE == 0
199     }
200 
201     /// Gets the next descriptor in this descriptor chain, if there is one.
202     ///
203     /// Note that this is distinct from the next descriptor chain returned by `AvailIter`, which is
204     /// the head of the next _available_ descriptor chain.
next_descriptor(&self) -> Option<DescriptorChain>205     pub fn next_descriptor(&self) -> Option<DescriptorChain> {
206         if self.has_next() {
207             // Once we see a write-only descriptor, all subsequent descriptors must be write-only.
208             let required_flags = self.flags & VIRTQ_DESC_F_WRITE;
209             let iommu = self.iommu.as_ref().map(Arc::clone);
210             match DescriptorChain::checked_new(
211                 &self.mem,
212                 self.desc_table,
213                 self.queue_size,
214                 self.next,
215                 required_flags,
216                 iommu,
217             ) {
218                 Ok(mut c) => {
219                     c.ttl = self.ttl - 1;
220                     Some(c)
221                 }
222                 Err(e) => {
223                     error!("{:#}", e);
224                     None
225                 }
226             }
227         } else {
228             None
229         }
230     }
231 
232     /// Produces an iterator over all the descriptors in this chain.
into_iter(self) -> DescIter233     pub fn into_iter(self) -> DescIter {
234         DescIter { next: Some(self) }
235     }
236 }
237 
238 /// Consuming iterator over all available descriptor chain heads in the queue.
239 pub struct AvailIter<'a, 'b> {
240     mem: &'a GuestMemory,
241     queue: &'b mut Queue,
242 }
243 
244 impl<'a, 'b> Iterator for AvailIter<'a, 'b> {
245     type Item = DescriptorChain;
246 
next(&mut self) -> Option<Self::Item>247     fn next(&mut self) -> Option<Self::Item> {
248         self.queue.pop(self.mem)
249     }
250 }
251 
252 #[derive(Clone)]
253 /// A virtio queue's parameters.
254 pub struct Queue {
255     /// The maximal size in elements offered by the device
256     pub max_size: u16,
257 
258     /// The queue size in elements the driver selected
259     pub size: u16,
260 
261     /// Inidcates if the queue is finished with configuration
262     pub ready: bool,
263 
264     /// MSI-X vector for the queue. Don't care for INTx
265     pub vector: u16,
266 
267     /// Guest physical address of the descriptor table
268     pub desc_table: GuestAddress,
269 
270     /// Guest physical address of the available ring
271     pub avail_ring: GuestAddress,
272 
273     /// Guest physical address of the used ring
274     pub used_ring: GuestAddress,
275 
276     pub next_avail: Wrapping<u16>,
277     pub next_used: Wrapping<u16>,
278 
279     // Device feature bits accepted by the driver
280     features: u64,
281     last_used: Wrapping<u16>,
282 
283     // Count of notification disables. Users of the queue can disable guest notification while
284     // processing requests. This is the count of how many are in flight(could be several contexts
285     // handling requests in parallel). When this count is zero, notifications are re-enabled.
286     notification_disable_count: usize,
287 
288     iommu: Option<Arc<Mutex<IpcMemoryMapper>>>,
289 }
290 
291 impl Queue {
292     /// Constructs an empty virtio queue with the given `max_size`.
new(max_size: u16) -> Queue293     pub fn new(max_size: u16) -> Queue {
294         Queue {
295             max_size,
296             size: max_size,
297             ready: false,
298             vector: VIRTIO_MSI_NO_VECTOR,
299             desc_table: GuestAddress(0),
300             avail_ring: GuestAddress(0),
301             used_ring: GuestAddress(0),
302             next_avail: Wrapping(0),
303             next_used: Wrapping(0),
304             features: 0,
305             last_used: Wrapping(0),
306             notification_disable_count: 0,
307             iommu: None,
308         }
309     }
310 
311     /// Return the actual size of the queue, as the driver may not set up a
312     /// queue as big as the device allows.
actual_size(&self) -> u16313     pub fn actual_size(&self) -> u16 {
314         min(self.size, self.max_size)
315     }
316 
317     /// Reset queue to a clean state
reset(&mut self)318     pub fn reset(&mut self) {
319         self.ready = false;
320         self.size = self.max_size;
321         self.vector = VIRTIO_MSI_NO_VECTOR;
322         self.desc_table = GuestAddress(0);
323         self.avail_ring = GuestAddress(0);
324         self.used_ring = GuestAddress(0);
325         self.next_avail = Wrapping(0);
326         self.next_used = Wrapping(0);
327         self.features = 0;
328         self.last_used = Wrapping(0);
329     }
330 
is_valid(&self, mem: &GuestMemory) -> bool331     pub fn is_valid(&self, mem: &GuestMemory) -> bool {
332         let queue_size = self.actual_size() as usize;
333         let desc_table = self.desc_table;
334         let desc_table_size = 16 * queue_size;
335         let avail_ring = self.avail_ring;
336         let avail_ring_size = 6 + 2 * queue_size;
337         let used_ring = self.used_ring;
338         let used_ring_size = 6 + 8 * queue_size;
339         if !self.ready {
340             error!("attempt to use virtio queue that is not marked ready");
341             return false;
342         } else if self.size > self.max_size || self.size == 0 || (self.size & (self.size - 1)) != 0
343         {
344             error!("virtio queue with invalid size: {}", self.size);
345             return false;
346         }
347 
348         let iommu = self.iommu.as_ref().map(|i| i.lock());
349         for (addr, size, name) in [
350             (desc_table, desc_table_size, "descriptor table"),
351             (avail_ring, avail_ring_size, "available ring"),
352             (used_ring, used_ring_size, "used ring"),
353         ] {
354             match is_valid_wrapper(mem, &iommu, addr, size as u64) {
355                 Ok(valid) => {
356                     if !valid {
357                         error!(
358                             "virtio queue {} goes out of bounds: start:0x{:08x} size:0x{:08x}",
359                             name,
360                             addr.offset(),
361                             size,
362                         );
363                         return false;
364                     }
365                 }
366                 Err(e) => {
367                     error!("is_valid failed: {:#}", e);
368                     return false;
369                 }
370             }
371         }
372         true
373     }
374 
375     // Get the index of the first available descriptor chain in the available ring
376     // (the next one that the driver will fill).
377     //
378     // All available ring entries between `self.next_avail` and `get_avail_index()` are available
379     // to be processed by the device.
get_avail_index(&self, mem: &GuestMemory) -> Wrapping<u16>380     fn get_avail_index(&self, mem: &GuestMemory) -> Wrapping<u16> {
381         fence(Ordering::SeqCst);
382 
383         let avail_index_addr = self.avail_ring.unchecked_add(2);
384         let avail_index: u16 =
385             read_obj_from_addr_wrapper(mem, &self.iommu, avail_index_addr).unwrap();
386 
387         Wrapping(avail_index)
388     }
389 
390     // Set the `avail_event` field in the used ring.
391     //
392     // This allows the device to inform the driver that driver-to-device notification
393     // (kicking the ring) is not necessary until the driver reaches the `avail_index` descriptor.
394     //
395     // This value is only used if the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
set_avail_event(&mut self, mem: &GuestMemory, avail_index: Wrapping<u16>)396     fn set_avail_event(&mut self, mem: &GuestMemory, avail_index: Wrapping<u16>) {
397         fence(Ordering::SeqCst);
398 
399         let avail_event_addr = self
400             .used_ring
401             .unchecked_add(4 + 8 * u64::from(self.actual_size()));
402         write_obj_at_addr_wrapper(mem, &self.iommu, avail_index.0, avail_event_addr).unwrap();
403     }
404 
405     // Query the value of a single-bit flag in the available ring.
406     //
407     // Returns `true` if `flag` is currently set (by the driver) in the available ring flags.
get_avail_flag(&self, mem: &GuestMemory, flag: u16) -> bool408     fn get_avail_flag(&self, mem: &GuestMemory, flag: u16) -> bool {
409         fence(Ordering::SeqCst);
410 
411         let avail_flags: u16 =
412             read_obj_from_addr_wrapper(mem, &self.iommu, self.avail_ring).unwrap();
413 
414         avail_flags & flag == flag
415     }
416 
417     // Get the `used_event` field in the available ring.
418     //
419     // The returned value is the index of the next descriptor chain entry for which the driver
420     // needs to be notified upon use.  Entries before this index may be used without notifying
421     // the driver.
422     //
423     // This value is only valid if the `VIRTIO_F_EVENT_IDX` feature has been negotiated.
get_used_event(&self, mem: &GuestMemory) -> Wrapping<u16>424     fn get_used_event(&self, mem: &GuestMemory) -> Wrapping<u16> {
425         fence(Ordering::SeqCst);
426 
427         let used_event_addr = self
428             .avail_ring
429             .unchecked_add(4 + 2 * u64::from(self.actual_size()));
430         let used_event: u16 =
431             read_obj_from_addr_wrapper(mem, &self.iommu, used_event_addr).unwrap();
432 
433         Wrapping(used_event)
434     }
435 
436     // Set the `idx` field in the used ring.
437     //
438     // This indicates to the driver that all entries up to (but not including) `used_index` have
439     // been used by the device and may be processed by the driver.
set_used_index(&mut self, mem: &GuestMemory, used_index: Wrapping<u16>)440     fn set_used_index(&mut self, mem: &GuestMemory, used_index: Wrapping<u16>) {
441         fence(Ordering::SeqCst);
442 
443         let used_index_addr = self.used_ring.unchecked_add(2);
444         write_obj_at_addr_wrapper(mem, &self.iommu, used_index.0, used_index_addr).unwrap();
445     }
446 
447     // Set a single-bit flag in the used ring.
448     //
449     // Changes the bit specified by the mask in `flag` to `value`.
set_used_flag(&mut self, mem: &GuestMemory, flag: u16, value: bool)450     fn set_used_flag(&mut self, mem: &GuestMemory, flag: u16, value: bool) {
451         fence(Ordering::SeqCst);
452 
453         let mut used_flags: u16 =
454             read_obj_from_addr_wrapper(mem, &self.iommu, self.used_ring).unwrap();
455         if value {
456             used_flags |= flag;
457         } else {
458             used_flags &= !flag;
459         }
460         write_obj_at_addr_wrapper(mem, &self.iommu, used_flags, self.used_ring).unwrap();
461     }
462 
463     /// Get the first available descriptor chain without removing it from the queue.
464     /// Call `pop_peeked` to remove the returned descriptor chain from the queue.
peek(&mut self, mem: &GuestMemory) -> Option<DescriptorChain>465     pub fn peek(&mut self, mem: &GuestMemory) -> Option<DescriptorChain> {
466         if !self.is_valid(mem) {
467             return None;
468         }
469 
470         let queue_size = self.actual_size();
471         let avail_index = self.get_avail_index(mem);
472         let avail_len = avail_index - self.next_avail;
473 
474         if avail_len.0 > queue_size || self.next_avail == avail_index {
475             return None;
476         }
477 
478         let desc_idx_addr_offset = 4 + (u64::from(self.next_avail.0 % queue_size) * 2);
479         let desc_idx_addr = self.avail_ring.checked_add(desc_idx_addr_offset)?;
480 
481         // This index is checked below in checked_new.
482         let descriptor_index: u16 =
483             read_obj_from_addr_wrapper(mem, &self.iommu, desc_idx_addr).unwrap();
484 
485         let iommu = self.iommu.as_ref().map(Arc::clone);
486         DescriptorChain::checked_new(mem, self.desc_table, queue_size, descriptor_index, 0, iommu)
487             .map_err(|e| {
488                 error!("{:#}", e);
489                 e
490             })
491             .ok()
492     }
493 
494     /// Remove the first available descriptor chain from the queue.
495     /// This function should only be called immediately following `peek`.
pop_peeked(&mut self, mem: &GuestMemory)496     pub fn pop_peeked(&mut self, mem: &GuestMemory) {
497         self.next_avail += Wrapping(1);
498         if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 {
499             self.set_avail_event(mem, self.next_avail);
500         }
501     }
502 
503     /// If a new DescriptorHead is available, returns one and removes it from the queue.
pop(&mut self, mem: &GuestMemory) -> Option<DescriptorChain>504     pub fn pop(&mut self, mem: &GuestMemory) -> Option<DescriptorChain> {
505         let descriptor_chain = self.peek(mem);
506         if descriptor_chain.is_some() {
507             self.pop_peeked(mem);
508         }
509         descriptor_chain
510     }
511 
512     /// A consuming iterator over all available descriptor chain heads offered by the driver.
iter<'a, 'b>(&'b mut self, mem: &'a GuestMemory) -> AvailIter<'a, 'b>513     pub fn iter<'a, 'b>(&'b mut self, mem: &'a GuestMemory) -> AvailIter<'a, 'b> {
514         AvailIter { mem, queue: self }
515     }
516 
517     /// Asynchronously read the next descriptor chain from the queue.
518     /// Returns a `DescriptorChain` when it is `await`ed.
next_async( &mut self, mem: &GuestMemory, eventfd: &mut EventAsync, ) -> std::result::Result<DescriptorChain, AsyncError>519     pub async fn next_async(
520         &mut self,
521         mem: &GuestMemory,
522         eventfd: &mut EventAsync,
523     ) -> std::result::Result<DescriptorChain, AsyncError> {
524         loop {
525             // Check if there are more descriptors available.
526             if let Some(chain) = self.pop(mem) {
527                 return Ok(chain);
528             }
529             eventfd.next_val().await?;
530         }
531     }
532 
533     /// Puts an available descriptor head into the used ring for use by the guest.
add_used(&mut self, mem: &GuestMemory, desc_index: u16, len: u32)534     pub fn add_used(&mut self, mem: &GuestMemory, desc_index: u16, len: u32) {
535         if desc_index >= self.actual_size() {
536             error!(
537                 "attempted to add out of bounds descriptor to used ring: {}",
538                 desc_index
539             );
540             return;
541         }
542 
543         let used_ring = self.used_ring;
544         let next_used = (self.next_used.0 % self.actual_size()) as usize;
545         let used_elem = used_ring.unchecked_add((4 + next_used * 8) as u64);
546 
547         // These writes can't fail as we are guaranteed to be within the descriptor ring.
548         write_obj_at_addr_wrapper(mem, &self.iommu, desc_index as u32, used_elem).unwrap();
549         write_obj_at_addr_wrapper(mem, &self.iommu, len as u32, used_elem.unchecked_add(4))
550             .unwrap();
551 
552         self.next_used += Wrapping(1);
553         self.set_used_index(mem, self.next_used);
554     }
555 
556     /// Enable / Disable guest notify device that requests are available on
557     /// the descriptor chain.
set_notify(&mut self, mem: &GuestMemory, enable: bool)558     pub fn set_notify(&mut self, mem: &GuestMemory, enable: bool) {
559         if enable {
560             self.notification_disable_count -= 1;
561         } else {
562             self.notification_disable_count += 1;
563         }
564 
565         // We should only set VIRTQ_USED_F_NO_NOTIFY when the VIRTIO_RING_F_EVENT_IDX feature has
566         // not been negotiated.
567         if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) == 0 {
568             self.set_used_flag(
569                 mem,
570                 VIRTQ_USED_F_NO_NOTIFY,
571                 self.notification_disable_count > 0,
572             );
573         }
574     }
575 
576     // Check Whether guest enable interrupt injection or not.
available_interrupt_enabled(&self, mem: &GuestMemory) -> bool577     fn available_interrupt_enabled(&self, mem: &GuestMemory) -> bool {
578         if self.features & ((1u64) << VIRTIO_RING_F_EVENT_IDX) != 0 {
579             let used_event = self.get_used_event(mem);
580             // if used_event >= self.last_used, driver handle interrupt quickly enough, new
581             // interrupt could be injected.
582             // if used_event < self.last_used, driver hasn't finished the last interrupt,
583             // so no need to inject new interrupt.
584             self.next_used - used_event - Wrapping(1) < self.next_used - self.last_used
585         } else {
586             !self.get_avail_flag(mem, VIRTQ_AVAIL_F_NO_INTERRUPT)
587         }
588     }
589 
590     /// inject interrupt into guest on this queue
591     /// return true: interrupt is injected into guest for this queue
592     ///        false: interrupt isn't injected
trigger_interrupt( &mut self, mem: &GuestMemory, interrupt: &dyn SignalableInterrupt, ) -> bool593     pub fn trigger_interrupt(
594         &mut self,
595         mem: &GuestMemory,
596         interrupt: &dyn SignalableInterrupt,
597     ) -> bool {
598         if self.available_interrupt_enabled(mem) {
599             self.last_used = self.next_used;
600             interrupt.signal_used_queue(self.vector);
601             true
602         } else {
603             false
604         }
605     }
606 
607     /// Acknowledges that this set of features should be enabled on this queue.
ack_features(&mut self, features: u64)608     pub fn ack_features(&mut self, features: u64) {
609         self.features |= features;
610     }
611 
set_iommu(&mut self, iommu: Arc<Mutex<IpcMemoryMapper>>)612     pub fn set_iommu(&mut self, iommu: Arc<Mutex<IpcMemoryMapper>>) {
613         self.iommu = Some(iommu);
614     }
615 }
616 
617 #[cfg(test)]
618 mod tests {
619     use super::super::Interrupt;
620     use super::*;
621     use crate::IrqLevelEvent;
622     use std::convert::TryInto;
623     use std::sync::atomic::AtomicUsize;
624     use std::sync::Arc;
625 
626     const GUEST_MEMORY_SIZE: u64 = 0x10000;
627     const DESC_OFFSET: u64 = 0;
628     const AVAIL_OFFSET: u64 = 0x200;
629     const USED_OFFSET: u64 = 0x400;
630     const QUEUE_SIZE: usize = 0x10;
631     const BUFFER_OFFSET: u64 = 0x8000;
632     const BUFFER_LEN: u32 = 0x400;
633 
634     #[derive(Copy, Clone, Debug)]
635     #[repr(C)]
636     struct Avail {
637         flags: Le16,
638         idx: Le16,
639         ring: [Le16; QUEUE_SIZE],
640         used_event: Le16,
641     }
642     // Safe as this only runs in test
643     unsafe impl DataInit for Avail {}
644     impl Default for Avail {
default() -> Self645         fn default() -> Self {
646             Avail {
647                 flags: Le16::from(0u16),
648                 idx: Le16::from(0u16),
649                 ring: [Le16::from(0u16); QUEUE_SIZE],
650                 used_event: Le16::from(0u16),
651             }
652         }
653     }
654 
655     #[derive(Copy, Clone, Debug)]
656     #[repr(C)]
657     struct UsedElem {
658         id: Le32,
659         len: Le32,
660     }
661     // Safe as this only runs in test
662     unsafe impl DataInit for UsedElem {}
663     impl Default for UsedElem {
default() -> Self664         fn default() -> Self {
665             UsedElem {
666                 id: Le32::from(0u32),
667                 len: Le32::from(0u32),
668             }
669         }
670     }
671 
672     #[derive(Copy, Clone, Debug)]
673     #[repr(C)]
674     struct Used {
675         flags: Le16,
676         idx: Le16,
677         used_elem_ring: [UsedElem; QUEUE_SIZE],
678         avail_event: Le16,
679     }
680     // Safe as this only runs in test
681     unsafe impl DataInit for Used {}
682     impl Default for Used {
default() -> Self683         fn default() -> Self {
684             Used {
685                 flags: Le16::from(0u16),
686                 idx: Le16::from(0u16),
687                 used_elem_ring: [UsedElem::default(); QUEUE_SIZE],
688                 avail_event: Le16::from(0u16),
689             }
690         }
691     }
692 
setup_vq(queue: &mut Queue, mem: &GuestMemory)693     fn setup_vq(queue: &mut Queue, mem: &GuestMemory) {
694         let desc = Desc {
695             addr: Le64::from(BUFFER_OFFSET),
696             len: Le32::from(BUFFER_LEN),
697             flags: Le16::from(0u16),
698             next: Le16::from(1u16),
699         };
700         let _ = mem.write_obj_at_addr(desc, GuestAddress(DESC_OFFSET));
701 
702         let avail = Avail::default();
703         let _ = mem.write_obj_at_addr(avail, GuestAddress(AVAIL_OFFSET));
704 
705         let used = Used::default();
706         let _ = mem.write_obj_at_addr(used, GuestAddress(USED_OFFSET));
707 
708         queue.desc_table = GuestAddress(DESC_OFFSET);
709         queue.avail_ring = GuestAddress(AVAIL_OFFSET);
710         queue.used_ring = GuestAddress(USED_OFFSET);
711         queue.ack_features((1u64) << VIRTIO_RING_F_EVENT_IDX);
712     }
713 
714     #[test]
queue_event_id_guest_fast()715     fn queue_event_id_guest_fast() {
716         let mut queue = Queue::new(QUEUE_SIZE.try_into().unwrap());
717         let memory_start_addr = GuestAddress(0x0);
718         let mem = GuestMemory::new(&[(memory_start_addr, GUEST_MEMORY_SIZE)]).unwrap();
719         setup_vq(&mut queue, &mem);
720 
721         let interrupt = Interrupt::new(
722             Arc::new(AtomicUsize::new(0)),
723             IrqLevelEvent::new().unwrap(),
724             None,
725             10,
726         );
727 
728         // Calculating the offset of used_event within Avail structure
729         #[allow(deref_nullptr)]
730         let used_event_offset: u64 =
731             unsafe { &(*(::std::ptr::null::<Avail>())).used_event as *const _ as u64 };
732         let used_event_address = GuestAddress(AVAIL_OFFSET + used_event_offset);
733 
734         // Assume driver submit 0x100 req to device,
735         // device has handled them, so increase self.next_used to 0x100
736         let mut device_generate: Wrapping<u16> = Wrapping(0x100);
737         for _ in 0..device_generate.0 {
738             queue.add_used(&mem, 0x0, BUFFER_LEN);
739         }
740 
741         // At this moment driver hasn't handled any interrupts yet, so it
742         // should inject interrupt.
743         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
744 
745         // Driver handle all the interrupts and update avail.used_event to 0x100
746         let mut driver_handled = device_generate;
747         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
748 
749         // At this moment driver have handled all the interrupts, and
750         // device doesn't generate more data, so interrupt isn't needed.
751         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
752 
753         // Assume driver submit another u16::MAX - 0x100 req to device,
754         // Device has handled all of them, so increase self.next_used to u16::MAX
755         for _ in device_generate.0..u16::max_value() {
756             queue.add_used(&mem, 0x0, BUFFER_LEN);
757         }
758         device_generate = Wrapping(u16::max_value());
759 
760         // At this moment driver just handled 0x100 interrupts, so it
761         // should inject interrupt.
762         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
763 
764         // driver handle all the interrupts and update avail.used_event to u16::MAX
765         driver_handled = device_generate;
766         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
767 
768         // At this moment driver have handled all the interrupts, and
769         // device doesn't generate more data, so interrupt isn't needed.
770         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
771 
772         // Assume driver submit another 1 request,
773         // device has handled it, so wrap self.next_used to 0
774         queue.add_used(&mem, 0x0, BUFFER_LEN);
775         device_generate += Wrapping(1);
776 
777         // At this moment driver has handled all the previous interrupts, so it
778         // should inject interrupt again.
779         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
780 
781         // driver handle that interrupts and update avail.used_event to 0
782         driver_handled = device_generate;
783         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
784 
785         // At this moment driver have handled all the interrupts, and
786         // device doesn't generate more data, so interrupt isn't needed.
787         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
788     }
789 
790     #[test]
queue_event_id_guest_slow()791     fn queue_event_id_guest_slow() {
792         let mut queue = Queue::new(QUEUE_SIZE.try_into().unwrap());
793         let memory_start_addr = GuestAddress(0x0);
794         let mem = GuestMemory::new(&[(memory_start_addr, GUEST_MEMORY_SIZE)]).unwrap();
795         setup_vq(&mut queue, &mem);
796 
797         let interrupt = Interrupt::new(
798             Arc::new(AtomicUsize::new(0)),
799             IrqLevelEvent::new().unwrap(),
800             None,
801             10,
802         );
803 
804         // Calculating the offset of used_event within Avail structure
805         #[allow(deref_nullptr)]
806         let used_event_offset: u64 =
807             unsafe { &(*(::std::ptr::null::<Avail>())).used_event as *const _ as u64 };
808         let used_event_address = GuestAddress(AVAIL_OFFSET + used_event_offset);
809 
810         // Assume driver submit 0x100 req to device,
811         // device have handled 0x100 req, so increase self.next_used to 0x100
812         let mut device_generate: Wrapping<u16> = Wrapping(0x100);
813         for _ in 0..device_generate.0 {
814             queue.add_used(&mem, 0x0, BUFFER_LEN);
815         }
816 
817         // At this moment driver hasn't handled any interrupts yet, so it
818         // should inject interrupt.
819         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
820 
821         // Driver handle part of the interrupts and update avail.used_event to 0x80
822         let mut driver_handled = Wrapping(0x80);
823         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
824 
825         // At this moment driver hasn't finished last interrupt yet,
826         // so interrupt isn't needed.
827         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
828 
829         // Assume driver submit another 1 request,
830         // device has handled it, so increment self.next_used.
831         queue.add_used(&mem, 0x0, BUFFER_LEN);
832         device_generate += Wrapping(1);
833 
834         // At this moment driver hasn't finished last interrupt yet,
835         // so interrupt isn't needed.
836         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
837 
838         // Assume driver submit another u16::MAX - 0x101 req to device,
839         // Device has handled all of them, so increase self.next_used to u16::MAX
840         for _ in device_generate.0..u16::max_value() {
841             queue.add_used(&mem, 0x0, BUFFER_LEN);
842         }
843         device_generate = Wrapping(u16::max_value());
844 
845         // At this moment driver hasn't finished last interrupt yet,
846         // so interrupt isn't needed.
847         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
848 
849         // driver handle most of the interrupts and update avail.used_event to u16::MAX - 1,
850         driver_handled = device_generate - Wrapping(1);
851         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
852 
853         // Assume driver submit another 1 request,
854         // device has handled it, so wrap self.next_used to 0
855         queue.add_used(&mem, 0x0, BUFFER_LEN);
856         device_generate += Wrapping(1);
857 
858         // At this moment driver has already finished the last interrupt(0x100),
859         // and device service other request, so new interrupt is needed.
860         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
861 
862         // Assume driver submit another 1 request,
863         // device has handled it, so increment self.next_used to 1
864         queue.add_used(&mem, 0x0, BUFFER_LEN);
865         device_generate += Wrapping(1);
866 
867         // At this moment driver hasn't finished last interrupt((Wrapping(0)) yet,
868         // so interrupt isn't needed.
869         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
870 
871         // driver handle all the remain interrupts and wrap avail.used_event to 0x1.
872         driver_handled = device_generate;
873         let _ = mem.write_obj_at_addr(Le16::from(driver_handled.0), used_event_address);
874 
875         // At this moment driver has handled all the interrupts, and
876         // device doesn't generate more data, so interrupt isn't needed.
877         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), false);
878 
879         // Assume driver submit another 1 request,
880         // device has handled it, so increase self.next_used.
881         queue.add_used(&mem, 0x0, BUFFER_LEN);
882         device_generate += Wrapping(1);
883 
884         // At this moment driver has finished all the previous interrupts, so it
885         // should inject interrupt again.
886         assert_eq!(queue.trigger_interrupt(&mem, &interrupt), true);
887     }
888 }
889