• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Implementation of an Intel ICH10 Input/Output Advanced Programmable Interrupt Controller
6 // See https://www.intel.com/content/dam/doc/datasheet/io-controller-hub-10-family-datasheet.pdf
7 // for a specification.
8 
9 use super::IrqEvent;
10 use crate::bus::BusAccessInfo;
11 use crate::BusDevice;
12 use base::{error, warn, Error, Event, Result, Tube, TubeError};
13 use hypervisor::{
14     IoapicRedirectionTableEntry, IoapicState, MsiAddressMessage, MsiDataMessage, TriggerMode,
15     MAX_IOAPIC_PINS, NUM_IOAPIC_PINS,
16 };
17 use remain::sorted;
18 use thiserror::Error;
19 use vm_control::{VmIrqRequest, VmIrqResponse};
20 
21 // ICH10 I/O APIC version: 0x20
22 const IOAPIC_VERSION_ID: u32 = 0x00000020;
23 pub const IOAPIC_BASE_ADDRESS: u64 = 0xfec00000;
24 // The Intel manual does not specify this size, but KVM uses it.
25 pub const IOAPIC_MEM_LENGTH_BYTES: u64 = 0x100;
26 
27 // Constants for IOAPIC direct register offset.
28 const IOAPIC_REG_ID: u8 = 0x00;
29 const IOAPIC_REG_VERSION: u8 = 0x01;
30 const IOAPIC_REG_ARBITRATION_ID: u8 = 0x02;
31 
32 // Register offsets
33 const IOREGSEL_OFF: u8 = 0x0;
34 const IOREGSEL_DUMMY_UPPER_32_BITS_OFF: u8 = 0x4;
35 const IOWIN_OFF: u8 = 0x10;
36 const IOEOIR_OFF: u8 = 0x40;
37 
38 const IOWIN_SCALE: u8 = 0x2;
39 
40 /// Given an IRQ and whether or not the selector should refer to the high bits, return a selector
41 /// suitable to use as an offset to read to/write from.
42 #[allow(dead_code)]
encode_selector_from_irq(irq: usize, is_high_bits: bool) -> u843 fn encode_selector_from_irq(irq: usize, is_high_bits: bool) -> u8 {
44     (irq as u8) * IOWIN_SCALE + IOWIN_OFF + (is_high_bits as u8)
45 }
46 
47 /// Given an offset that was read from/written to, return a tuple of the relevant IRQ and whether
48 /// the offset refers to the high bits of that register.
decode_irq_from_selector(selector: u8) -> (usize, bool)49 fn decode_irq_from_selector(selector: u8) -> (usize, bool) {
50     (
51         ((selector - IOWIN_OFF) / IOWIN_SCALE) as usize,
52         selector & 1 != 0,
53     )
54 }
55 
56 // The RTC needs special treatment to work properly for Windows (or other OSs that use tick
57 // stuffing). In order to avoid time drift, we need to guarantee that the correct number of RTC
58 // interrupts are injected into the guest. This hack essentialy treats RTC interrupts as level
59 // triggered, which allows the IOAPIC to be responsible for interrupt coalescing and allows the
60 // IOAPIC to pass back whether or not the interrupt was coalesced to the CMOS (which allows the
61 // CMOS to perform tick stuffing). This deviates from the IOAPIC spec in ways very similar to (but
62 // not exactly the same as) KVM's IOAPIC.
63 const RTC_IRQ: usize = 0x8;
64 
65 pub struct Ioapic {
66     /// Number of supported IO-APIC inputs / redirection entries.
67     num_pins: usize,
68     /// ioregsel register. Used for selecting which entry of the redirect table to read/write.
69     ioregsel: u8,
70     /// ioapicid register. Bits 24 - 27 contain the APIC ID for this device.
71     ioapicid: u32,
72     /// Remote IRR for Edge Triggered Real Time Clock interrupts, which allows the CMOS to know when
73     /// one of its interrupts is being coalesced.
74     rtc_remote_irr: bool,
75     /// Outgoing irq events that are used to inject MSI interrupts.
76     out_events: Vec<Option<IrqEvent>>,
77     /// Events that should be triggered on an EOI. The outer Vec is indexed by GSI, and the inner
78     /// Vec is an unordered list of registered resample events for the GSI.
79     resample_events: Vec<Vec<Event>>,
80     /// Redirection settings for each irq line.
81     redirect_table: Vec<IoapicRedirectionTableEntry>,
82     /// Interrupt activation state.
83     interrupt_level: Vec<bool>,
84     /// Tube used to route MSI irqs.
85     irq_tube: Tube,
86 }
87 
88 impl BusDevice for Ioapic {
debug_label(&self) -> String89     fn debug_label(&self) -> String {
90         "userspace IOAPIC".to_string()
91     }
92 
read(&mut self, info: BusAccessInfo, data: &mut [u8])93     fn read(&mut self, info: BusAccessInfo, data: &mut [u8]) {
94         if data.len() > 8 || data.is_empty() {
95             warn!("IOAPIC: Bad read size: {}", data.len());
96             return;
97         }
98         if info.offset >= IOAPIC_MEM_LENGTH_BYTES {
99             warn!("IOAPIC: Bad read from {}", info);
100         }
101         let out = match info.offset as u8 {
102             IOREGSEL_OFF => self.ioregsel.into(),
103             IOREGSEL_DUMMY_UPPER_32_BITS_OFF => 0,
104             IOWIN_OFF => self.ioapic_read(),
105             IOEOIR_OFF => 0,
106             _ => {
107                 warn!("IOAPIC: Bad read from {}", info);
108                 return;
109             }
110         };
111         let out_arr = out.to_ne_bytes();
112         for i in 0..4 {
113             if i < data.len() {
114                 data[i] = out_arr[i];
115             }
116         }
117     }
118 
write(&mut self, info: BusAccessInfo, data: &[u8])119     fn write(&mut self, info: BusAccessInfo, data: &[u8]) {
120         if data.len() > 8 || data.is_empty() {
121             warn!("IOAPIC: Bad write size: {}", data.len());
122             return;
123         }
124         if info.offset >= IOAPIC_MEM_LENGTH_BYTES {
125             warn!("IOAPIC: Bad write to {}", info);
126         }
127         match info.offset as u8 {
128             IOREGSEL_OFF => self.ioregsel = data[0],
129             IOREGSEL_DUMMY_UPPER_32_BITS_OFF => {} // Ignored.
130             IOWIN_OFF => {
131                 if data.len() != 4 {
132                     warn!("IOAPIC: Bad write size for iowin: {}", data.len());
133                     return;
134                 }
135                 let data_arr = [data[0], data[1], data[2], data[3]];
136                 let val = u32::from_ne_bytes(data_arr);
137                 self.ioapic_write(val);
138             }
139             IOEOIR_OFF => self.end_of_interrupt(data[0]),
140             _ => {
141                 warn!("IOAPIC: Bad write to {}", info);
142             }
143         }
144     }
145 }
146 
147 impl Ioapic {
new(irq_tube: Tube, num_pins: usize) -> Result<Ioapic>148     pub fn new(irq_tube: Tube, num_pins: usize) -> Result<Ioapic> {
149         let num_pins = num_pins.max(NUM_IOAPIC_PINS).min(MAX_IOAPIC_PINS);
150         let mut entry = IoapicRedirectionTableEntry::new();
151         entry.set_interrupt_mask(true);
152         Ok(Ioapic {
153             num_pins,
154             ioregsel: 0,
155             ioapicid: 0,
156             rtc_remote_irr: false,
157             out_events: (0..num_pins).map(|_| None).collect(),
158             resample_events: Vec::new(),
159             redirect_table: (0..num_pins).map(|_| entry).collect(),
160             interrupt_level: (0..num_pins).map(|_| false).collect(),
161             irq_tube,
162         })
163     }
164 
init_direct_gsi<F>(&mut self, register_irqfd: F) -> Result<()> where F: Fn(u32, &Event) -> Result<()>,165     pub fn init_direct_gsi<F>(&mut self, register_irqfd: F) -> Result<()>
166     where
167         F: Fn(u32, &Event) -> Result<()>,
168     {
169         for (gsi, out_event) in self.out_events.iter_mut().enumerate() {
170             let event = Event::new()?;
171             register_irqfd(gsi as u32, &event)?;
172             *out_event = Some(IrqEvent {
173                 gsi: gsi as u32,
174                 event,
175                 resample_event: None,
176             });
177         }
178         Ok(())
179     }
180 
get_ioapic_state(&self) -> IoapicState181     pub fn get_ioapic_state(&self) -> IoapicState {
182         // Convert vector of first NUM_IOAPIC_PINS active interrupts into an u32 value.
183         let level_bitmap = self
184             .interrupt_level
185             .iter()
186             .take(NUM_IOAPIC_PINS)
187             .rev()
188             .fold(0, |acc, &l| acc * 2 + l as u32);
189         let mut state = IoapicState {
190             base_address: IOAPIC_BASE_ADDRESS,
191             ioregsel: self.ioregsel,
192             ioapicid: self.ioapicid,
193             current_interrupt_level_bitmap: level_bitmap,
194             ..Default::default()
195         };
196         for (dst, src) in state
197             .redirect_table
198             .iter_mut()
199             .zip(self.redirect_table.iter())
200         {
201             *dst = *src;
202         }
203         state
204     }
205 
set_ioapic_state(&mut self, state: &IoapicState)206     pub fn set_ioapic_state(&mut self, state: &IoapicState) {
207         self.ioregsel = state.ioregsel;
208         self.ioapicid = state.ioapicid & 0x0f00_0000;
209         for (src, dst) in state
210             .redirect_table
211             .iter()
212             .zip(self.redirect_table.iter_mut())
213         {
214             *dst = *src;
215         }
216         for (i, level) in self
217             .interrupt_level
218             .iter_mut()
219             .take(NUM_IOAPIC_PINS)
220             .enumerate()
221         {
222             *level = state.current_interrupt_level_bitmap & (1 << i) != 0;
223         }
224     }
225 
register_resample_events(&mut self, resample_events: Vec<Vec<Event>>)226     pub fn register_resample_events(&mut self, resample_events: Vec<Vec<Event>>) {
227         self.resample_events = resample_events;
228     }
229 
230     // The ioapic must be informed about EOIs in order to avoid sending multiple interrupts of the
231     // same type at the same time.
end_of_interrupt(&mut self, vector: u8)232     pub fn end_of_interrupt(&mut self, vector: u8) {
233         if self.redirect_table[RTC_IRQ].get_vector() == vector && self.rtc_remote_irr {
234             // Specifically clear RTC IRQ field
235             self.rtc_remote_irr = false;
236         }
237 
238         for i in 0..self.num_pins {
239             if self.redirect_table[i].get_vector() == vector
240                 && self.redirect_table[i].get_trigger_mode() == TriggerMode::Level
241             {
242                 if self
243                     .resample_events
244                     .get(i)
245                     .map_or(false, |events| !events.is_empty())
246                 {
247                     self.service_irq(i, false);
248                 }
249 
250                 if let Some(resample_events) = self.resample_events.get(i) {
251                     for resample_evt in resample_events {
252                         resample_evt.write(1).unwrap();
253                     }
254                 }
255                 self.redirect_table[i].set_remote_irr(false);
256             }
257             // There is an inherent race condition in hardware if the OS is finished processing an
258             // interrupt and a new interrupt is delivered between issuing an EOI and the EOI being
259             // completed.  When that happens the ioapic is supposed to re-inject the interrupt.
260             if self.interrupt_level[i] {
261                 self.service_irq(i, true);
262             }
263         }
264     }
265 
service_irq(&mut self, irq: usize, level: bool) -> bool266     pub fn service_irq(&mut self, irq: usize, level: bool) -> bool {
267         let entry = &mut self.redirect_table[irq];
268 
269         // De-assert the interrupt.
270         if !level {
271             self.interrupt_level[irq] = false;
272             return true;
273         }
274 
275         // If it's an edge-triggered interrupt that's already high we ignore it.
276         if entry.get_trigger_mode() == TriggerMode::Edge && self.interrupt_level[irq] {
277             return false;
278         }
279 
280         self.interrupt_level[irq] = true;
281 
282         // Interrupts are masked, so don't inject.
283         if entry.get_interrupt_mask() {
284             return false;
285         }
286 
287         // Level-triggered and remote irr is already active, so we don't inject a new interrupt.
288         // (Coalesce with the prior one(s)).
289         if entry.get_trigger_mode() == TriggerMode::Level && entry.get_remote_irr() {
290             return false;
291         }
292 
293         // Coalesce RTC interrupt to make tick stuffing work.
294         if irq == RTC_IRQ && self.rtc_remote_irr {
295             return false;
296         }
297 
298         let injected = match self.out_events.get(irq) {
299             Some(Some(evt)) => evt.event.write(1).is_ok(),
300             _ => false,
301         };
302 
303         if entry.get_trigger_mode() == TriggerMode::Level && level && injected {
304             entry.set_remote_irr(true);
305         } else if irq == RTC_IRQ && injected {
306             self.rtc_remote_irr = true;
307         }
308 
309         injected
310     }
311 
ioapic_write(&mut self, val: u32)312     fn ioapic_write(&mut self, val: u32) {
313         match self.ioregsel {
314             IOAPIC_REG_VERSION => { /* read-only register */ }
315             IOAPIC_REG_ID => self.ioapicid = val & 0x0f00_0000,
316             IOAPIC_REG_ARBITRATION_ID => { /* read-only register */ }
317             _ => {
318                 if self.ioregsel < IOWIN_OFF {
319                     // Invalid write; ignore.
320                     return;
321                 }
322                 let (index, is_high_bits) = decode_irq_from_selector(self.ioregsel);
323                 if index >= self.num_pins {
324                     // Invalid write; ignore.
325                     return;
326                 }
327 
328                 let entry = &mut self.redirect_table[index];
329                 if is_high_bits {
330                     entry.set(32, 32, val.into());
331                 } else {
332                     let before = *entry;
333                     entry.set(0, 32, val.into());
334 
335                     // respect R/O bits.
336                     entry.set_delivery_status(before.get_delivery_status());
337                     entry.set_remote_irr(before.get_remote_irr());
338 
339                     // Clear remote_irr when switching to edge_triggered.
340                     if entry.get_trigger_mode() == TriggerMode::Edge {
341                         entry.set_remote_irr(false);
342                     }
343 
344                     // NOTE: on pre-4.0 kernels, there's a race we would need to work around.
345                     // "KVM: x86: ioapic: Fix level-triggered EOI and IOAPIC reconfigure race"
346                     // is the fix for this.
347                 }
348 
349                 if self.redirect_table[index].get_trigger_mode() == TriggerMode::Level
350                     && self.interrupt_level[index]
351                     && !self.redirect_table[index].get_interrupt_mask()
352                 {
353                     self.service_irq(index, true);
354                 }
355 
356                 let mut address = MsiAddressMessage::new();
357                 let mut data = MsiDataMessage::new();
358                 let entry = &self.redirect_table[index];
359                 address.set_destination_mode(entry.get_dest_mode());
360                 address.set_destination_id(entry.get_dest_id());
361                 address.set_always_0xfee(0xfee);
362                 data.set_vector(entry.get_vector());
363                 data.set_delivery_mode(entry.get_delivery_mode());
364                 data.set_trigger(entry.get_trigger_mode());
365 
366                 let msi_address = address.get(0, 32);
367                 let msi_data = data.get(0, 32);
368                 if let Err(e) = self.setup_msi(index, msi_address, msi_data as u32) {
369                     error!("IOAPIC failed to set up MSI for index {}: {}", index, e);
370                 }
371             }
372         }
373     }
374 
setup_msi( &mut self, index: usize, msi_address: u64, msi_data: u32, ) -> std::result::Result<(), IoapicError>375     fn setup_msi(
376         &mut self,
377         index: usize,
378         msi_address: u64,
379         msi_data: u32,
380     ) -> std::result::Result<(), IoapicError> {
381         if msi_data == 0 {
382             // During boot, Linux first configures all ioapic pins with msi_data == 0; the routes
383             // aren't yet assigned to devices and aren't usable.  We skip MSI setup if msi_data is
384             // 0.
385             return Ok(());
386         }
387 
388         // Allocate a GSI and event for the outgoing route, if we haven't already done it.
389         // The event will be used on the "outgoing" end of the ioapic to send an interrupt to the
390         // apics: when an incoming ioapic irq line gets signalled, the ioapic writes to the
391         // corresponding outgoing event. The GSI number is used to update the routing info (MSI
392         // data and addr) for the event. The GSI and event are allocated only once for each ioapic
393         // irq line, when the guest first sets up the ioapic with a valid route. If the guest
394         // later reconfigures an ioapic irq line, the same GSI and event are reused, and we change
395         // the GSI's route to the new MSI data+addr destination.
396         let gsi = if let Some(evt) = &self.out_events[index] {
397             evt.gsi
398         } else {
399             let event = Event::new().map_err(IoapicError::CreateEvent)?;
400             let request = VmIrqRequest::AllocateOneMsi {
401                 irqfd: event,
402                 device_id: self.device_id(),
403                 queue_id: index,
404                 device_name: self.debug_label(),
405             };
406             self.irq_tube
407                 .send(&request)
408                 .map_err(IoapicError::AllocateOneMsiSend)?;
409             match self
410                 .irq_tube
411                 .recv()
412                 .map_err(IoapicError::AllocateOneMsiRecv)?
413             {
414                 VmIrqResponse::AllocateOneMsi { gsi, .. } => {
415                     self.out_events[index] = Some(IrqEvent {
416                         gsi,
417                         event: match request {
418                             VmIrqRequest::AllocateOneMsi { irqfd, .. } => irqfd,
419                             _ => unreachable!(),
420                         },
421                         resample_event: None,
422                     });
423                     gsi
424                 }
425                 VmIrqResponse::Err(e) => return Err(IoapicError::AllocateOneMsi(e)),
426                 _ => unreachable!(),
427             }
428         };
429 
430         // Set the MSI route for the GSI.  This controls which apic(s) get the interrupt when the
431         // ioapic's outgoing event is written, and various attributes of how the interrupt is
432         // delivered.
433         let request = VmIrqRequest::AddMsiRoute {
434             gsi,
435             msi_address,
436             msi_data,
437         };
438         self.irq_tube
439             .send(&request)
440             .map_err(IoapicError::AddMsiRouteSend)?;
441         if let VmIrqResponse::Err(e) = self.irq_tube.recv().map_err(IoapicError::AddMsiRouteRecv)? {
442             return Err(IoapicError::AddMsiRoute(e));
443         }
444         Ok(())
445     }
446 
ioapic_read(&mut self) -> u32447     fn ioapic_read(&mut self) -> u32 {
448         match self.ioregsel {
449             IOAPIC_REG_VERSION => ((self.num_pins - 1) as u32) << 16 | IOAPIC_VERSION_ID,
450             IOAPIC_REG_ID | IOAPIC_REG_ARBITRATION_ID => self.ioapicid,
451             _ => {
452                 if self.ioregsel < IOWIN_OFF {
453                     // Invalid read; ignore and return 0.
454                     0
455                 } else {
456                     let (index, is_high_bits) = decode_irq_from_selector(self.ioregsel);
457                     if index < self.num_pins {
458                         let offset = if is_high_bits { 32 } else { 0 };
459                         self.redirect_table[index].get(offset, 32) as u32
460                     } else {
461                         !0 // Invalid index - return all 1s
462                     }
463                 }
464             }
465         }
466     }
467 }
468 
469 #[sorted]
470 #[derive(Error, Debug)]
471 enum IoapicError {
472     #[error("AddMsiRoute failed: {0}")]
473     AddMsiRoute(Error),
474     #[error("failed to receive AddMsiRoute response: {0}")]
475     AddMsiRouteRecv(TubeError),
476     #[error("failed to send AddMsiRoute request: {0}")]
477     AddMsiRouteSend(TubeError),
478     #[error("AllocateOneMsi failed: {0}")]
479     AllocateOneMsi(Error),
480     #[error("failed to receive AllocateOneMsi response: {0}")]
481     AllocateOneMsiRecv(TubeError),
482     #[error("failed to send AllocateOneMsi request: {0}")]
483     AllocateOneMsiSend(TubeError),
484     #[error("failed to create event object: {0}")]
485     CreateEvent(Error),
486 }
487 
488 #[cfg(test)]
489 mod tests {
490     use super::*;
491     use hypervisor::{DeliveryMode, DeliveryStatus, DestinationMode};
492 
493     const DEFAULT_VECTOR: u8 = 0x3a;
494     const DEFAULT_DESTINATION_ID: u8 = 0x5f;
495 
new() -> Ioapic496     fn new() -> Ioapic {
497         let (_, irq_tube) = Tube::pair().unwrap();
498         Ioapic::new(irq_tube, NUM_IOAPIC_PINS).unwrap()
499     }
500 
ioapic_bus_address(offset: u8) -> BusAccessInfo501     fn ioapic_bus_address(offset: u8) -> BusAccessInfo {
502         let offset = offset as u64;
503         BusAccessInfo {
504             offset,
505             address: IOAPIC_BASE_ADDRESS + offset,
506             id: 0,
507         }
508     }
509 
set_up(trigger: TriggerMode) -> (Ioapic, usize)510     fn set_up(trigger: TriggerMode) -> (Ioapic, usize) {
511         let irq = NUM_IOAPIC_PINS - 1;
512         let ioapic = set_up_with_irq(irq, trigger);
513         (ioapic, irq)
514     }
515 
set_up_with_irq(irq: usize, trigger: TriggerMode) -> Ioapic516     fn set_up_with_irq(irq: usize, trigger: TriggerMode) -> Ioapic {
517         let mut ioapic = self::new();
518         set_up_redirection_table_entry(&mut ioapic, irq, trigger);
519         ioapic.out_events[irq] = Some(IrqEvent {
520             gsi: NUM_IOAPIC_PINS as u32,
521             event: Event::new().unwrap(),
522             resample_event: None,
523         });
524         ioapic
525     }
526 
read_reg(ioapic: &mut Ioapic, selector: u8) -> u32527     fn read_reg(ioapic: &mut Ioapic, selector: u8) -> u32 {
528         let mut data = [0; 4];
529         ioapic.write(ioapic_bus_address(IOREGSEL_OFF), &[selector]);
530         ioapic.read(ioapic_bus_address(IOWIN_OFF), &mut data);
531         u32::from_ne_bytes(data)
532     }
533 
write_reg(ioapic: &mut Ioapic, selector: u8, value: u32)534     fn write_reg(ioapic: &mut Ioapic, selector: u8, value: u32) {
535         ioapic.write(ioapic_bus_address(IOREGSEL_OFF), &[selector]);
536         ioapic.write(ioapic_bus_address(IOWIN_OFF), &value.to_ne_bytes());
537     }
538 
read_entry(ioapic: &mut Ioapic, irq: usize) -> IoapicRedirectionTableEntry539     fn read_entry(ioapic: &mut Ioapic, irq: usize) -> IoapicRedirectionTableEntry {
540         let mut entry = IoapicRedirectionTableEntry::new();
541         entry.set(
542             0,
543             32,
544             read_reg(ioapic, encode_selector_from_irq(irq, false)).into(),
545         );
546         entry.set(
547             32,
548             32,
549             read_reg(ioapic, encode_selector_from_irq(irq, true)).into(),
550         );
551         entry
552     }
553 
write_entry(ioapic: &mut Ioapic, irq: usize, entry: IoapicRedirectionTableEntry)554     fn write_entry(ioapic: &mut Ioapic, irq: usize, entry: IoapicRedirectionTableEntry) {
555         write_reg(
556             ioapic,
557             encode_selector_from_irq(irq, false),
558             entry.get(0, 32) as u32,
559         );
560         write_reg(
561             ioapic,
562             encode_selector_from_irq(irq, true),
563             entry.get(32, 32) as u32,
564         );
565     }
566 
set_up_redirection_table_entry(ioapic: &mut Ioapic, irq: usize, trigger_mode: TriggerMode)567     fn set_up_redirection_table_entry(ioapic: &mut Ioapic, irq: usize, trigger_mode: TriggerMode) {
568         let mut entry = IoapicRedirectionTableEntry::new();
569         entry.set_vector(DEFAULT_DESTINATION_ID);
570         entry.set_delivery_mode(DeliveryMode::Startup);
571         entry.set_delivery_status(DeliveryStatus::Pending);
572         entry.set_dest_id(DEFAULT_VECTOR);
573         entry.set_trigger_mode(trigger_mode);
574         write_entry(ioapic, irq, entry);
575     }
576 
set_mask(ioapic: &mut Ioapic, irq: usize, mask: bool)577     fn set_mask(ioapic: &mut Ioapic, irq: usize, mask: bool) {
578         let mut entry = read_entry(ioapic, irq);
579         entry.set_interrupt_mask(mask);
580         write_entry(ioapic, irq, entry);
581     }
582 
583     #[test]
write_read_ioregsel()584     fn write_read_ioregsel() {
585         let mut ioapic = self::new();
586         let data_write = [0x0f, 0xf0, 0x01, 0xff];
587         let mut data_read = [0; 4];
588 
589         for i in 0..data_write.len() {
590             ioapic.write(ioapic_bus_address(IOREGSEL_OFF), &data_write[i..i + 1]);
591             ioapic.read(ioapic_bus_address(IOREGSEL_OFF), &mut data_read[i..i + 1]);
592             assert_eq!(data_write[i], data_read[i]);
593         }
594     }
595 
596     // Verify that version register is actually read-only.
597     #[test]
write_read_ioaic_reg_version()598     fn write_read_ioaic_reg_version() {
599         let mut ioapic = self::new();
600         let before = read_reg(&mut ioapic, IOAPIC_REG_VERSION);
601         let data_write = !before;
602 
603         write_reg(&mut ioapic, IOAPIC_REG_VERSION, data_write);
604         assert_eq!(read_reg(&mut ioapic, IOAPIC_REG_VERSION), before);
605     }
606 
607     // Verify that only bits 27:24 of the IOAPICID are readable/writable.
608     #[test]
write_read_ioapic_reg_id()609     fn write_read_ioapic_reg_id() {
610         let mut ioapic = self::new();
611 
612         write_reg(&mut ioapic, IOAPIC_REG_ID, 0x1f3e5d7c);
613         assert_eq!(read_reg(&mut ioapic, IOAPIC_REG_ID), 0x0f000000);
614     }
615 
616     // Write to read-only register IOAPICARB.
617     #[test]
write_read_ioapic_arbitration_id()618     fn write_read_ioapic_arbitration_id() {
619         let mut ioapic = self::new();
620 
621         let data_write_id = 0x1f3e5d7c;
622         let expected_result = 0x0f000000;
623 
624         // Write to IOAPICID.  This should also change IOAPICARB.
625         write_reg(&mut ioapic, IOAPIC_REG_ID, data_write_id);
626 
627         // Read IOAPICARB
628         assert_eq!(
629             read_reg(&mut ioapic, IOAPIC_REG_ARBITRATION_ID),
630             expected_result
631         );
632 
633         // Try to write to IOAPICARB and verify unchanged result.
634         write_reg(&mut ioapic, IOAPIC_REG_ARBITRATION_ID, !data_write_id);
635         assert_eq!(
636             read_reg(&mut ioapic, IOAPIC_REG_ARBITRATION_ID),
637             expected_result
638         );
639     }
640 
641     #[test]
642     #[should_panic(expected = "index out of bounds: the len is 24 but the index is 24")]
service_invalid_irq()643     fn service_invalid_irq() {
644         let mut ioapic = self::new();
645         ioapic.service_irq(NUM_IOAPIC_PINS, false);
646     }
647 
648     // Test a level triggered IRQ interrupt.
649     #[test]
service_level_irq()650     fn service_level_irq() {
651         let (mut ioapic, irq) = set_up(TriggerMode::Level);
652 
653         // TODO(mutexlox): Check that interrupt is fired once.
654         ioapic.service_irq(irq, true);
655         ioapic.service_irq(irq, false);
656     }
657 
658     #[test]
service_multiple_level_irqs()659     fn service_multiple_level_irqs() {
660         let (mut ioapic, irq) = set_up(TriggerMode::Level);
661         // TODO(mutexlox): Check that interrupt is fired twice.
662         ioapic.service_irq(irq, true);
663         ioapic.service_irq(irq, false);
664         ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
665         ioapic.service_irq(irq, true);
666     }
667 
668     // Test multiple level interrupts without an EOI and verify that only one interrupt is
669     // delivered.
670     #[test]
coalesce_multiple_level_irqs()671     fn coalesce_multiple_level_irqs() {
672         let (mut ioapic, irq) = set_up(TriggerMode::Level);
673 
674         // TODO(mutexlox): Test that only one interrupt is delivered.
675         ioapic.service_irq(irq, true);
676         ioapic.service_irq(irq, false);
677         ioapic.service_irq(irq, true);
678     }
679 
680     // Test multiple RTC interrupts without an EOI and verify that only one interrupt is delivered.
681     #[test]
coalesce_multiple_rtc_irqs()682     fn coalesce_multiple_rtc_irqs() {
683         let irq = RTC_IRQ;
684         let mut ioapic = set_up_with_irq(irq, TriggerMode::Edge);
685 
686         // TODO(mutexlox): Verify that only one IRQ is delivered.
687         ioapic.service_irq(irq, true);
688         ioapic.service_irq(irq, false);
689         ioapic.service_irq(irq, true);
690     }
691 
692     // Test that a level interrupt that has been coalesced is re-raised if a guest issues an
693     // EndOfInterrupt after the interrupt was coalesced while the line  is still asserted.
694     #[test]
reinject_level_interrupt()695     fn reinject_level_interrupt() {
696         let (mut ioapic, irq) = set_up(TriggerMode::Level);
697 
698         // TODO(mutexlox): Verify that only one IRQ is delivered.
699         ioapic.service_irq(irq, true);
700         ioapic.service_irq(irq, false);
701         ioapic.service_irq(irq, true);
702 
703         // TODO(mutexlox): Verify that this last interrupt occurs as a result of the EOI, rather
704         // than in response to the last service_irq.
705         ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
706     }
707 
708     #[test]
service_edge_triggered_irq()709     fn service_edge_triggered_irq() {
710         let (mut ioapic, irq) = set_up(TriggerMode::Edge);
711 
712         // TODO(mutexlox): Verify that one interrupt is delivered.
713         ioapic.service_irq(irq, true);
714         ioapic.service_irq(irq, true); // Repeated asserts before a deassert should be ignored.
715         ioapic.service_irq(irq, false);
716     }
717 
718     // Verify that the state of an edge-triggered interrupt is properly tracked even when the
719     // interrupt is disabled.
720     #[test]
edge_trigger_unmask_test()721     fn edge_trigger_unmask_test() {
722         let (mut ioapic, irq) = set_up(TriggerMode::Edge);
723 
724         // TODO(mutexlox): Expect an IRQ.
725 
726         ioapic.service_irq(irq, true);
727 
728         set_mask(&mut ioapic, irq, true);
729         ioapic.service_irq(irq, false);
730 
731         // No interrupt triggered while masked.
732         ioapic.service_irq(irq, true);
733         ioapic.service_irq(irq, false);
734 
735         set_mask(&mut ioapic, irq, false);
736 
737         // TODO(mutexlox): Expect another interrupt.
738         // Interrupt triggered while unmasked, even though when it was masked the level was high.
739         ioapic.service_irq(irq, true);
740         ioapic.service_irq(irq, false);
741     }
742 
743     // Verify that a level-triggered interrupt that is triggered while masked will fire once the
744     // interrupt is unmasked.
745     #[test]
level_trigger_unmask_test()746     fn level_trigger_unmask_test() {
747         let (mut ioapic, irq) = set_up(TriggerMode::Level);
748 
749         set_mask(&mut ioapic, irq, true);
750         ioapic.service_irq(irq, true);
751 
752         // TODO(mutexlox): expect an interrupt after this.
753         set_mask(&mut ioapic, irq, false);
754     }
755 
756     // Verify that multiple asserts before a deassert are ignored even if there's an EOI between
757     // them.
758     #[test]
end_of_interrupt_edge_triggered_irq()759     fn end_of_interrupt_edge_triggered_irq() {
760         let (mut ioapic, irq) = set_up(TriggerMode::Edge);
761 
762         // TODO(mutexlox): Expect 1 interrupt.
763         ioapic.service_irq(irq, true);
764         ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
765         // Repeated asserts before a de-assert should be ignored.
766         ioapic.service_irq(irq, true);
767         ioapic.service_irq(irq, false);
768     }
769 
770     // Send multiple edge-triggered interrupts in a row.
771     #[test]
service_multiple_edge_irqs()772     fn service_multiple_edge_irqs() {
773         let (mut ioapic, irq) = set_up(TriggerMode::Edge);
774 
775         ioapic.service_irq(irq, true);
776         // TODO(mutexlox): Verify that an interrupt occurs here.
777         ioapic.service_irq(irq, false);
778 
779         ioapic.service_irq(irq, true);
780         // TODO(mutexlox): Verify that an interrupt occurs here.
781         ioapic.service_irq(irq, false);
782     }
783 
784     // Test an interrupt line with negative polarity.
785     #[test]
service_negative_polarity_irq()786     fn service_negative_polarity_irq() {
787         let (mut ioapic, irq) = set_up(TriggerMode::Level);
788 
789         let mut entry = read_entry(&mut ioapic, irq);
790         entry.set_polarity(1);
791         write_entry(&mut ioapic, irq, entry);
792 
793         // TODO(mutexlox): Expect an interrupt to fire.
794         ioapic.service_irq(irq, false);
795     }
796 
797     // Ensure that remote IRR can't be edited via mmio.
798     #[test]
remote_irr_read_only()799     fn remote_irr_read_only() {
800         let (mut ioapic, irq) = set_up(TriggerMode::Level);
801 
802         ioapic.redirect_table[irq].set_remote_irr(true);
803 
804         let mut entry = read_entry(&mut ioapic, irq);
805         entry.set_remote_irr(false);
806         write_entry(&mut ioapic, irq, entry);
807 
808         assert_eq!(read_entry(&mut ioapic, irq).get_remote_irr(), true);
809     }
810 
811     #[test]
delivery_status_read_only()812     fn delivery_status_read_only() {
813         let (mut ioapic, irq) = set_up(TriggerMode::Level);
814 
815         ioapic.redirect_table[irq].set_delivery_status(DeliveryStatus::Pending);
816 
817         let mut entry = read_entry(&mut ioapic, irq);
818         entry.set_delivery_status(DeliveryStatus::Idle);
819         write_entry(&mut ioapic, irq, entry);
820 
821         assert_eq!(
822             read_entry(&mut ioapic, irq).get_delivery_status(),
823             DeliveryStatus::Pending
824         );
825     }
826 
827     #[test]
level_to_edge_transition_clears_remote_irr()828     fn level_to_edge_transition_clears_remote_irr() {
829         let (mut ioapic, irq) = set_up(TriggerMode::Level);
830 
831         ioapic.redirect_table[irq].set_remote_irr(true);
832 
833         let mut entry = read_entry(&mut ioapic, irq);
834         entry.set_trigger_mode(TriggerMode::Edge);
835         write_entry(&mut ioapic, irq, entry);
836 
837         assert_eq!(read_entry(&mut ioapic, irq).get_remote_irr(), false);
838     }
839 
840     #[test]
masking_preserves_remote_irr()841     fn masking_preserves_remote_irr() {
842         let (mut ioapic, irq) = set_up(TriggerMode::Level);
843 
844         ioapic.redirect_table[irq].set_remote_irr(true);
845 
846         set_mask(&mut ioapic, irq, true);
847         set_mask(&mut ioapic, irq, false);
848 
849         assert_eq!(read_entry(&mut ioapic, irq).get_remote_irr(), true);
850     }
851 
852     // Test reconfiguration racing with EOIs.
853     #[test]
reconfiguration_race()854     fn reconfiguration_race() {
855         let (mut ioapic, irq) = set_up(TriggerMode::Level);
856 
857         // Fire one level-triggered interrupt.
858         // TODO(mutexlox): Check that it fires.
859         ioapic.service_irq(irq, true);
860 
861         // Read the redirection table entry before the EOI...
862         let mut entry = read_entry(&mut ioapic, irq);
863         entry.set_trigger_mode(TriggerMode::Edge);
864 
865         ioapic.service_irq(irq, false);
866         ioapic.end_of_interrupt(DEFAULT_DESTINATION_ID);
867 
868         // ... and write back that (modified) value.
869         write_entry(&mut ioapic, irq, entry);
870 
871         // Fire one *edge* triggered interrupt
872         // TODO(mutexlox): Assert that the interrupt fires once.
873         ioapic.service_irq(irq, true);
874         ioapic.service_irq(irq, false);
875     }
876 
877     // Ensure that swapping to edge triggered and back clears the remote irr bit.
878     #[test]
implicit_eoi()879     fn implicit_eoi() {
880         let (mut ioapic, irq) = set_up(TriggerMode::Level);
881 
882         // Fire one level-triggered interrupt.
883         ioapic.service_irq(irq, true);
884         // TODO(mutexlox): Verify that one interrupt was fired.
885         ioapic.service_irq(irq, false);
886 
887         // Do an implicit EOI by cycling between edge and level triggered.
888         let mut entry = read_entry(&mut ioapic, irq);
889         entry.set_trigger_mode(TriggerMode::Edge);
890         write_entry(&mut ioapic, irq, entry);
891         entry.set_trigger_mode(TriggerMode::Level);
892         write_entry(&mut ioapic, irq, entry);
893 
894         // Fire one level-triggered interrupt.
895         ioapic.service_irq(irq, true);
896         // TODO(mutexlox): Verify that one interrupt fires.
897         ioapic.service_irq(irq, false);
898     }
899 
900     #[test]
set_redirection_entry_by_bits()901     fn set_redirection_entry_by_bits() {
902         let mut entry = IoapicRedirectionTableEntry::new();
903         //                                                          destination_mode
904         //                                                         polarity |
905         //                                                  trigger_mode |  |
906         //                                                             | |  |
907         // 0011 1010 0000 0000 0000 0000 0000 0000 0000 0000 0000 0000 1001 0110 0101 1111
908         // |_______| |______________________________________________||  | |  |_| |_______|
909         //  dest_id                      reserved                    |  | |   |    vector
910         //                                               interrupt_mask | |   |
911         //                                                     remote_irr |   |
912         //                                                    delivery_status |
913         //                                                              delivery_mode
914         entry.set(0, 64, 0x3a0000000000965f);
915         assert_eq!(entry.get_vector(), 0x5f);
916         assert_eq!(entry.get_delivery_mode(), DeliveryMode::Startup);
917         assert_eq!(entry.get_dest_mode(), DestinationMode::Physical);
918         assert_eq!(entry.get_delivery_status(), DeliveryStatus::Pending);
919         assert_eq!(entry.get_polarity(), 0);
920         assert_eq!(entry.get_remote_irr(), false);
921         assert_eq!(entry.get_trigger_mode(), TriggerMode::Level);
922         assert_eq!(entry.get_interrupt_mask(), false);
923         assert_eq!(entry.get_reserved(), 0);
924         assert_eq!(entry.get_dest_id(), 0x3a);
925 
926         let (mut ioapic, irq) = set_up(TriggerMode::Edge);
927         write_entry(&mut ioapic, irq, entry);
928         assert_eq!(
929             read_entry(&mut ioapic, irq).get_trigger_mode(),
930             TriggerMode::Level
931         );
932 
933         // TODO(mutexlox): Verify that this actually fires an interrupt.
934         ioapic.service_irq(irq, true);
935     }
936 }
937