• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::sync::atomic::AtomicUsize;
6 use std::sync::atomic::Ordering;
7 use std::sync::Arc;
8 
9 use base::Event;
10 use sync::Mutex;
11 
12 use super::INTERRUPT_STATUS_CONFIG_CHANGED;
13 use super::INTERRUPT_STATUS_USED_RING;
14 use super::VIRTIO_MSI_NO_VECTOR;
15 use crate::irq_event::IrqEdgeEvent;
16 use crate::irq_event::IrqLevelEvent;
17 use crate::pci::MsixConfig;
18 
19 pub trait SignalableInterrupt: Clone {
20     /// Writes to the irqfd to VMM to deliver virtual interrupt to the guest.
signal(&self, vector: u16, interrupt_status_mask: u32)21     fn signal(&self, vector: u16, interrupt_status_mask: u32);
22 
23     /// Notify the driver that buffers have been placed in the used queue.
signal_used_queue(&self, vector: u16)24     fn signal_used_queue(&self, vector: u16) {
25         self.signal(vector, INTERRUPT_STATUS_USED_RING)
26     }
27 
28     /// Notify the driver that the device configuration has changed.
signal_config_changed(&self)29     fn signal_config_changed(&self);
30 
31     /// Get the event to signal resampling is needed if it exists.
get_resample_evt(&self) -> Option<&Event>32     fn get_resample_evt(&self) -> Option<&Event>;
33 
34     /// Reads the status and writes to the interrupt event. Doesn't read the resample event, it
35     /// assumes the resample has been requested.
do_interrupt_resample(&self)36     fn do_interrupt_resample(&self);
37 }
38 
39 struct TransportPci {
40     irq_evt_lvl: IrqLevelEvent,
41     msix_config: Option<Arc<Mutex<MsixConfig>>>,
42     config_msix_vector: u16,
43 }
44 
45 enum Transport {
46     Pci { pci: TransportPci },
47     Mmio { irq_evt_edge: IrqEdgeEvent },
48 }
49 
50 struct InterruptInner {
51     interrupt_status: AtomicUsize,
52     transport: Transport,
53     async_intr_status: bool,
54 }
55 
56 #[derive(Clone)]
57 pub struct Interrupt {
58     inner: Arc<InterruptInner>,
59 }
60 
61 impl SignalableInterrupt for Interrupt {
62     /// Virtqueue Interrupts From The Device
63     ///
64     /// If MSI-X is enabled in this device, MSI-X interrupt is preferred.
65     /// Write to the irqfd to VMM to deliver virtual interrupt to the guest
signal(&self, vector: u16, interrupt_status_mask: u32)66     fn signal(&self, vector: u16, interrupt_status_mask: u32) {
67         // Don't need to set ISR for MSI-X interrupts
68         if let Transport::Pci { pci } = &self.inner.as_ref().transport {
69             if let Some(msix_config) = &pci.msix_config {
70                 let mut msix_config = msix_config.lock();
71                 if msix_config.enabled() {
72                     if vector != VIRTIO_MSI_NO_VECTOR {
73                         msix_config.trigger(vector);
74                     }
75                     return;
76                 }
77             }
78         }
79 
80         // Set bit in ISR and inject the interrupt if it was not already pending.
81         // Don't need to inject the interrupt if the guest hasn't processed it.
82         // In hypervisors where interrupt_status is updated asynchronously, inject the
83         // interrupt even if the previous interrupt appears to be already pending.
84         if self
85             .inner
86             .as_ref()
87             .interrupt_status
88             .fetch_or(interrupt_status_mask as usize, Ordering::SeqCst)
89             == 0
90             || self.inner.as_ref().async_intr_status
91         {
92             // Write to irqfd to inject PCI INTx or MMIO interrupt
93             match &self.inner.as_ref().transport {
94                 Transport::Pci { pci } => pci.irq_evt_lvl.trigger().unwrap(),
95                 Transport::Mmio { irq_evt_edge } => irq_evt_edge.trigger().unwrap(),
96             }
97         }
98     }
99 
signal_config_changed(&self)100     fn signal_config_changed(&self) {
101         let vector = match &self.inner.as_ref().transport {
102             Transport::Pci { pci } => pci.config_msix_vector,
103             _ => VIRTIO_MSI_NO_VECTOR,
104         };
105         self.signal(vector, INTERRUPT_STATUS_CONFIG_CHANGED)
106     }
107 
get_resample_evt(&self) -> Option<&Event>108     fn get_resample_evt(&self) -> Option<&Event> {
109         match &self.inner.as_ref().transport {
110             Transport::Pci { pci } => Some(pci.irq_evt_lvl.get_resample()),
111             _ => None,
112         }
113     }
114 
do_interrupt_resample(&self)115     fn do_interrupt_resample(&self) {
116         if self.inner.interrupt_status.load(Ordering::SeqCst) != 0 {
117             match &self.inner.as_ref().transport {
118                 Transport::Pci { pci } => pci.irq_evt_lvl.trigger().unwrap(),
119                 _ => panic!("do_interrupt_resample() not supported"),
120             }
121         }
122     }
123 }
124 
125 impl Interrupt {
new( irq_evt_lvl: IrqLevelEvent, msix_config: Option<Arc<Mutex<MsixConfig>>>, config_msix_vector: u16, ) -> Interrupt126     pub fn new(
127         irq_evt_lvl: IrqLevelEvent,
128         msix_config: Option<Arc<Mutex<MsixConfig>>>,
129         config_msix_vector: u16,
130     ) -> Interrupt {
131         Interrupt {
132             inner: Arc::new(InterruptInner {
133                 interrupt_status: AtomicUsize::new(0),
134                 async_intr_status: false,
135                 transport: Transport::Pci {
136                     pci: TransportPci {
137                         irq_evt_lvl,
138                         msix_config,
139                         config_msix_vector,
140                     },
141                 },
142             }),
143         }
144     }
145 
new_mmio(irq_evt_edge: IrqEdgeEvent, async_intr_status: bool) -> Interrupt146     pub fn new_mmio(irq_evt_edge: IrqEdgeEvent, async_intr_status: bool) -> Interrupt {
147         Interrupt {
148             inner: Arc::new(InterruptInner {
149                 interrupt_status: AtomicUsize::new(0),
150                 transport: Transport::Mmio { irq_evt_edge },
151                 async_intr_status,
152             }),
153         }
154     }
155 
156     /// Get a reference to the interrupt event.
get_interrupt_evt(&self) -> &Event157     pub fn get_interrupt_evt(&self) -> &Event {
158         match &self.inner.as_ref().transport {
159             Transport::Pci { pci } => pci.irq_evt_lvl.get_trigger(),
160             Transport::Mmio { irq_evt_edge } => irq_evt_edge.get_trigger(),
161         }
162     }
163 
164     /// Handle interrupt resampling event, reading the value from the event and doing the resample.
interrupt_resample(&self)165     pub fn interrupt_resample(&self) {
166         match &self.inner.as_ref().transport {
167             Transport::Pci { pci } => {
168                 pci.irq_evt_lvl.clear_resample();
169                 self.do_interrupt_resample();
170             }
171             _ => panic!("interrupt_resample() not supported"),
172         }
173     }
174 
175     /// Get a reference to the msix configuration
get_msix_config(&self) -> &Option<Arc<Mutex<MsixConfig>>>176     pub fn get_msix_config(&self) -> &Option<Arc<Mutex<MsixConfig>>> {
177         match &self.inner.as_ref().transport {
178             Transport::Pci { pci } => &pci.msix_config,
179             _ => &None,
180         }
181     }
182 
183     /// Reads the current value of the interrupt status.
read_interrupt_status(&self) -> u8184     pub fn read_interrupt_status(&self) -> u8 {
185         self.inner.interrupt_status.load(Ordering::SeqCst) as u8
186     }
187 
188     /// Reads the current value of the interrupt status and resets it to 0.
read_and_reset_interrupt_status(&self) -> u8189     pub fn read_and_reset_interrupt_status(&self) -> u8 {
190         self.inner.interrupt_status.swap(0, Ordering::SeqCst) as u8
191     }
192 
193     /// Clear the bits set in `mask` in the interrupt status.
clear_interrupt_status_bits(&self, mask: u8)194     pub fn clear_interrupt_status_bits(&self, mask: u8) {
195         self.inner
196             .interrupt_status
197             .fetch_and(!(mask as usize), Ordering::SeqCst);
198     }
199 }
200