• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use base::{error, AsRawDescriptor, Error as SysError, Event, RawDescriptor, Tube, TubeError};
6 use bit_field::*;
7 use data_model::DataInit;
8 use remain::sorted;
9 use std::convert::TryInto;
10 use thiserror::Error;
11 use vm_control::{VmIrqRequest, VmIrqResponse};
12 
13 use crate::pci::{PciCapability, PciCapabilityID};
14 
15 const MAX_MSIX_VECTORS_PER_DEVICE: u16 = 2048;
16 pub const MSIX_TABLE_ENTRIES_MODULO: u64 = 16;
17 pub const MSIX_PBA_ENTRIES_MODULO: u64 = 8;
18 pub const BITS_PER_PBA_ENTRY: usize = 64;
19 const FUNCTION_MASK_BIT: u16 = 0x4000;
20 const MSIX_ENABLE_BIT: u16 = 0x8000;
21 const MSIX_TABLE_ENTRY_MASK_BIT: u32 = 0x1;
22 
23 #[derive(Clone, Default)]
24 struct MsixTableEntry {
25     msg_addr_lo: u32,
26     msg_addr_hi: u32,
27     msg_data: u32,
28     vector_ctl: u32,
29 }
30 
31 impl MsixTableEntry {
masked(&self) -> bool32     fn masked(&self) -> bool {
33         self.vector_ctl & MSIX_TABLE_ENTRY_MASK_BIT == MSIX_TABLE_ENTRY_MASK_BIT
34     }
35 }
36 
37 struct IrqfdGsi {
38     irqfd: Event,
39     gsi: u32,
40 }
41 
42 /// Wrapper over MSI-X Capability Structure and MSI-X Tables
43 pub struct MsixConfig {
44     table_entries: Vec<MsixTableEntry>,
45     pba_entries: Vec<u64>,
46     irq_vec: Vec<Option<IrqfdGsi>>,
47     masked: bool,
48     enabled: bool,
49     msi_device_socket: Tube,
50     msix_num: u16,
51     pci_id: u32,
52     device_name: String,
53 }
54 
55 #[sorted]
56 #[derive(Error, Debug)]
57 enum MsixError {
58     #[error("AddMsiRoute failed: {0}")]
59     AddMsiRoute(SysError),
60     #[error("failed to receive AddMsiRoute response: {0}")]
61     AddMsiRouteRecv(TubeError),
62     #[error("failed to send AddMsiRoute request: {0}")]
63     AddMsiRouteSend(TubeError),
64     #[error("AllocateOneMsi failed: {0}")]
65     AllocateOneMsi(SysError),
66     #[error("failed to receive AllocateOneMsi response: {0}")]
67     AllocateOneMsiRecv(TubeError),
68     #[error("failed to send AllocateOneMsi request: {0}")]
69     AllocateOneMsiSend(TubeError),
70 }
71 
72 type MsixResult<T> = std::result::Result<T, MsixError>;
73 
74 pub enum MsixStatus {
75     Changed,
76     EntryChanged(usize),
77     NothingToDo,
78 }
79 
80 impl MsixConfig {
new(msix_vectors: u16, vm_socket: Tube, pci_id: u32, device_name: String) -> Self81     pub fn new(msix_vectors: u16, vm_socket: Tube, pci_id: u32, device_name: String) -> Self {
82         assert!(msix_vectors <= MAX_MSIX_VECTORS_PER_DEVICE);
83 
84         let mut table_entries: Vec<MsixTableEntry> = Vec::new();
85         table_entries.resize_with(msix_vectors as usize, Default::default);
86         table_entries
87             .iter_mut()
88             .for_each(|entry| entry.vector_ctl |= MSIX_TABLE_ENTRY_MASK_BIT);
89         let mut pba_entries: Vec<u64> = Vec::new();
90         let num_pba_entries: usize =
91             ((msix_vectors as usize) + BITS_PER_PBA_ENTRY - 1) / BITS_PER_PBA_ENTRY;
92         pba_entries.resize_with(num_pba_entries, Default::default);
93 
94         let mut irq_vec = Vec::new();
95         irq_vec.resize_with(msix_vectors.into(), || None::<IrqfdGsi>);
96 
97         MsixConfig {
98             table_entries,
99             pba_entries,
100             irq_vec,
101             masked: false,
102             enabled: false,
103             msi_device_socket: vm_socket,
104             msix_num: msix_vectors,
105             pci_id,
106             device_name,
107         }
108     }
109 
110     /// Get the number of MSI-X vectors in this configuration.
num_vectors(&self) -> u16111     pub fn num_vectors(&self) -> u16 {
112         self.msix_num
113     }
114 
115     /// Check whether the Function Mask bit in Message Control word in set or not.
116     /// if 1, all of the vectors associated with the function are masked,
117     /// regardless of their per-vector Mask bit states.
118     /// If 0, each vector's Mask bit determines whether the vector is masked or not.
masked(&self) -> bool119     pub fn masked(&self) -> bool {
120         self.masked
121     }
122 
123     /// Check whether the Function Mask bit in MSIX table Message Control
124     /// word in set or not.
125     /// If true, the vector is masked.
126     /// If false, the vector is unmasked.
table_masked(&self, index: usize) -> bool127     pub fn table_masked(&self, index: usize) -> bool {
128         if index >= self.table_entries.len() {
129             true
130         } else {
131             self.table_entries[index].masked()
132         }
133     }
134 
135     /// Check whether the MSI-X Enable bit in Message Control word in set or not.
136     /// if 1, the function is permitted to use MSI-X to request service.
enabled(&self) -> bool137     pub fn enabled(&self) -> bool {
138         self.enabled
139     }
140 
141     /// Read the MSI-X Capability Structure.
142     /// The top 2 bits in Message Control word are emulated and all other
143     /// bits are read only.
read_msix_capability(&self, data: u32) -> u32144     pub fn read_msix_capability(&self, data: u32) -> u32 {
145         let mut msg_ctl = (data >> 16) as u16;
146         msg_ctl &= !(MSIX_ENABLE_BIT | FUNCTION_MASK_BIT);
147 
148         if self.enabled {
149             msg_ctl |= MSIX_ENABLE_BIT;
150         }
151         if self.masked {
152             msg_ctl |= FUNCTION_MASK_BIT;
153         }
154         (msg_ctl as u32) << 16 | (data & u16::max_value() as u32)
155     }
156 
157     /// Write to the MSI-X Capability Structure.
158     /// Only the top 2 bits in Message Control Word are writable.
write_msix_capability(&mut self, offset: u64, data: &[u8]) -> MsixStatus159     pub fn write_msix_capability(&mut self, offset: u64, data: &[u8]) -> MsixStatus {
160         if offset == 2 && data.len() == 2 {
161             let reg = u16::from_le_bytes([data[0], data[1]]);
162             let old_masked = self.masked;
163             let old_enabled = self.enabled;
164 
165             self.masked = (reg & FUNCTION_MASK_BIT) == FUNCTION_MASK_BIT;
166             self.enabled = (reg & MSIX_ENABLE_BIT) == MSIX_ENABLE_BIT;
167 
168             if !old_enabled && self.enabled {
169                 if let Err(e) = self.msix_enable_all() {
170                     error!("failed to enable MSI-X: {}", e);
171                     self.enabled = false;
172                 }
173             }
174 
175             // If the Function Mask bit was set, and has just been cleared, it's
176             // important to go through the entire PBA to check if there was any
177             // pending MSI-X message to inject, given that the vector is not
178             // masked.
179             if old_masked && !self.masked {
180                 for (index, entry) in self.table_entries.clone().iter().enumerate() {
181                     if !entry.masked() && self.get_pba_bit(index as u16) == 1 {
182                         self.inject_msix_and_clear_pba(index);
183                     }
184                 }
185                 return MsixStatus::Changed;
186             } else if !old_masked && self.masked {
187                 return MsixStatus::Changed;
188             }
189         } else {
190             error!(
191                 "invalid write to MSI-X Capability Structure offset {:x}",
192                 offset
193             );
194         }
195         MsixStatus::NothingToDo
196     }
197 
add_msi_route(&self, index: u16, gsi: u32) -> MsixResult<()>198     fn add_msi_route(&self, index: u16, gsi: u32) -> MsixResult<()> {
199         let mut data: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
200         self.read_msix_table((index * 16).into(), data.as_mut());
201         let msi_address: u64 = u64::from_le_bytes(data);
202         let mut data: [u8; 4] = [0, 0, 0, 0];
203         self.read_msix_table((index * 16 + 8).into(), data.as_mut());
204         let msi_data: u32 = u32::from_le_bytes(data);
205 
206         if msi_address == 0 {
207             return Ok(());
208         }
209 
210         self.msi_device_socket
211             .send(&VmIrqRequest::AddMsiRoute {
212                 gsi,
213                 msi_address,
214                 msi_data,
215             })
216             .map_err(MsixError::AddMsiRouteSend)?;
217         if let VmIrqResponse::Err(e) = self
218             .msi_device_socket
219             .recv()
220             .map_err(MsixError::AddMsiRouteRecv)?
221         {
222             return Err(MsixError::AddMsiRoute(e));
223         }
224         Ok(())
225     }
226 
227     // Enable MSI-X
msix_enable_all(&mut self) -> MsixResult<()>228     fn msix_enable_all(&mut self) -> MsixResult<()> {
229         for index in 0..self.irq_vec.len() {
230             self.msix_enable_one(index)?;
231         }
232         Ok(())
233     }
234 
235     // Use a new MSI-X vector
236     // Create a new eventfd and bind them to a new msi
msix_enable_one(&mut self, index: usize) -> MsixResult<()>237     fn msix_enable_one(&mut self, index: usize) -> MsixResult<()> {
238         if self.irq_vec[index].is_some()
239             || !self.enabled()
240             || self.masked()
241             || self.table_masked(index)
242         {
243             return Ok(());
244         }
245         let irqfd = Event::new().map_err(MsixError::AllocateOneMsi)?;
246         let request = VmIrqRequest::AllocateOneMsi {
247             irqfd,
248             device_id: self.pci_id,
249             queue_id: index as usize,
250             device_name: self.device_name.clone(),
251         };
252         self.msi_device_socket
253             .send(&request)
254             .map_err(MsixError::AllocateOneMsiSend)?;
255         let irq_num: u32 = match self
256             .msi_device_socket
257             .recv()
258             .map_err(MsixError::AllocateOneMsiRecv)?
259         {
260             VmIrqResponse::AllocateOneMsi { gsi } => gsi,
261             VmIrqResponse::Err(e) => return Err(MsixError::AllocateOneMsi(e)),
262             _ => unreachable!(),
263         };
264         self.irq_vec[index] = Some(IrqfdGsi {
265             irqfd: match request {
266                 VmIrqRequest::AllocateOneMsi { irqfd, .. } => irqfd,
267                 _ => unreachable!(),
268             },
269             gsi: irq_num,
270         });
271 
272         self.add_msi_route(index as u16, irq_num)?;
273         Ok(())
274     }
275 
276     /// Read MSI-X table
277     ///  # Arguments
278     ///  * 'offset' - the offset within the MSI-X Table
279     ///  * 'data' - used to store the read results
280     ///
281     /// For all accesses to MSI-X Table and MSI-X PBA fields, software must use aligned full
282     /// DWORD or aligned full QWORD transactions; otherwise, the result is undefined.
283     ///
284     ///   location: DWORD3            DWORD2      DWORD1            DWORD0
285     ///   entry 0:  Vector Control    Msg Data    Msg Upper Addr    Msg Addr
286     ///   entry 1:  Vector Control    Msg Data    Msg Upper Addr    Msg Addr
287     ///   entry 2:  Vector Control    Msg Data    Msg Upper Addr    Msg Addr
288     ///   ...
read_msix_table(&self, offset: u64, data: &mut [u8])289     pub fn read_msix_table(&self, offset: u64, data: &mut [u8]) {
290         let index: usize = (offset / MSIX_TABLE_ENTRIES_MODULO) as usize;
291         let modulo_offset = offset % MSIX_TABLE_ENTRIES_MODULO;
292 
293         match data.len() {
294             4 => {
295                 let value = match modulo_offset {
296                     0x0 => self.table_entries[index].msg_addr_lo,
297                     0x4 => self.table_entries[index].msg_addr_hi,
298                     0x8 => self.table_entries[index].msg_data,
299                     0xc => self.table_entries[index].vector_ctl,
300                     _ => {
301                         error!("invalid offset");
302                         0
303                     }
304                 };
305 
306                 data.copy_from_slice(&value.to_le_bytes());
307             }
308             8 => {
309                 let value = match modulo_offset {
310                     0x0 => {
311                         (u64::from(self.table_entries[index].msg_addr_hi) << 32)
312                             | u64::from(self.table_entries[index].msg_addr_lo)
313                     }
314                     0x8 => {
315                         (u64::from(self.table_entries[index].vector_ctl) << 32)
316                             | u64::from(self.table_entries[index].msg_data)
317                     }
318                     _ => {
319                         error!("invalid offset");
320                         0
321                     }
322                 };
323 
324                 data.copy_from_slice(&value.to_le_bytes());
325             }
326             _ => error!("invalid data length"),
327         };
328     }
329 
330     /// Write to MSI-X table
331     ///
332     /// Message Address: the contents of this field specifies the address
333     ///     for the memory write transaction; different MSI-X vectors have
334     ///     different Message Address values
335     /// Message Data: the contents of this field specifies the data driven
336     ///     on AD[31::00] during the memory write transaction's data phase.
337     /// Vector Control: only bit 0 (Mask Bit) is not reserved: when this bit
338     ///     is set, the function is prohibited from sending a message using
339     ///     this MSI-X Table entry.
write_msix_table(&mut self, offset: u64, data: &[u8]) -> MsixStatus340     pub fn write_msix_table(&mut self, offset: u64, data: &[u8]) -> MsixStatus {
341         let index: usize = (offset / MSIX_TABLE_ENTRIES_MODULO) as usize;
342         let modulo_offset = offset % MSIX_TABLE_ENTRIES_MODULO;
343 
344         // Store the value of the entry before modification
345         let old_entry = self.table_entries[index].clone();
346 
347         match data.len() {
348             4 => {
349                 let value = u32::from_le_bytes(data.try_into().unwrap());
350                 match modulo_offset {
351                     0x0 => self.table_entries[index].msg_addr_lo = value,
352                     0x4 => self.table_entries[index].msg_addr_hi = value,
353                     0x8 => self.table_entries[index].msg_data = value,
354                     0xc => self.table_entries[index].vector_ctl = value,
355                     _ => error!("invalid offset"),
356                 };
357             }
358             8 => {
359                 let value = u64::from_le_bytes(data.try_into().unwrap());
360                 match modulo_offset {
361                     0x0 => {
362                         self.table_entries[index].msg_addr_lo = (value & 0xffff_ffffu64) as u32;
363                         self.table_entries[index].msg_addr_hi = (value >> 32) as u32;
364                     }
365                     0x8 => {
366                         self.table_entries[index].msg_data = (value & 0xffff_ffffu64) as u32;
367                         self.table_entries[index].vector_ctl = (value >> 32) as u32;
368                     }
369                     _ => error!("invalid offset"),
370                 };
371             }
372             _ => error!("invalid data length"),
373         };
374 
375         let new_entry = self.table_entries[index].clone();
376 
377         // This MSI-X vector is enabled for the first time.
378         if self.enabled()
379             && !self.masked()
380             && self.irq_vec[index].is_none()
381             && old_entry.masked()
382             && !new_entry.masked()
383         {
384             if let Err(e) = self.msix_enable_one(index) {
385                 error!("failed to enable MSI-X vector {}: {}", index, e);
386                 self.table_entries[index].vector_ctl |= MSIX_TABLE_ENTRY_MASK_BIT;
387             }
388             return MsixStatus::EntryChanged(index);
389         }
390 
391         if self.enabled()
392             && (old_entry.msg_addr_lo != new_entry.msg_addr_lo
393                 || old_entry.msg_addr_hi != new_entry.msg_addr_hi
394                 || old_entry.msg_data != new_entry.msg_data)
395         {
396             if let Some(irqfd_gsi) = &self.irq_vec[index] {
397                 let irq_num = irqfd_gsi.gsi;
398                 if let Err(e) = self.add_msi_route(index as u16, irq_num) {
399                     error!("add_msi_route failed: {}", e);
400                 }
401             }
402         }
403 
404         // After the MSI-X table entry has been updated, it is necessary to
405         // check if the vector control masking bit has changed. In case the
406         // bit has been flipped from 1 to 0, we need to inject a MSI message
407         // if the corresponding pending bit from the PBA is set. Once the MSI
408         // has been injected, the pending bit in the PBA needs to be cleared.
409         // All of this is valid only if MSI-X has not been masked for the whole
410         // device.
411 
412         // Check if bit has been flipped
413         if !self.masked() {
414             if old_entry.masked() && !self.table_entries[index].masked() {
415                 if self.get_pba_bit(index as u16) == 1 {
416                     self.inject_msix_and_clear_pba(index);
417                 }
418                 return MsixStatus::EntryChanged(index);
419             } else if !old_entry.masked() && self.table_entries[index].masked() {
420                 return MsixStatus::EntryChanged(index);
421             }
422         }
423         MsixStatus::NothingToDo
424     }
425 
426     /// Read PBA Entries
427     ///  # Arguments
428     ///  * 'offset' - the offset within the PBA entries
429     ///  * 'data' - used to store the read results
430     ///
431     /// Pending Bits[63::00]: For each Pending Bit that is set, the function
432     /// has a pending message for the associated MSI-X Table entry.
read_pba_entries(&self, offset: u64, data: &mut [u8])433     pub fn read_pba_entries(&self, offset: u64, data: &mut [u8]) {
434         let index: usize = (offset / MSIX_PBA_ENTRIES_MODULO) as usize;
435         let modulo_offset = offset % MSIX_PBA_ENTRIES_MODULO;
436 
437         match data.len() {
438             4 => {
439                 let value: u32 = match modulo_offset {
440                     0x0 => (self.pba_entries[index] & 0xffff_ffffu64) as u32,
441                     0x4 => (self.pba_entries[index] >> 32) as u32,
442                     _ => {
443                         error!("invalid offset");
444                         0
445                     }
446                 };
447 
448                 data.copy_from_slice(&value.to_le_bytes());
449             }
450             8 => {
451                 let value: u64 = match modulo_offset {
452                     0x0 => self.pba_entries[index],
453                     _ => {
454                         error!("invalid offset");
455                         0
456                     }
457                 };
458 
459                 data.copy_from_slice(&value.to_le_bytes());
460             }
461             _ => error!("invalid data length"),
462         }
463     }
464 
465     /// Write to PBA Entries
466     ///
467     /// Software should never write, and should only read Pending Bits.
468     /// If software writes to Pending Bits, the result is undefined.
write_pba_entries(&mut self, _offset: u64, _data: &[u8])469     pub fn write_pba_entries(&mut self, _offset: u64, _data: &[u8]) {
470         error!("Pending Bit Array is read only");
471     }
472 
set_pba_bit(&mut self, vector: u16, set: bool)473     fn set_pba_bit(&mut self, vector: u16, set: bool) {
474         assert!(vector < MAX_MSIX_VECTORS_PER_DEVICE);
475 
476         let index: usize = (vector as usize) / BITS_PER_PBA_ENTRY;
477         let shift: usize = (vector as usize) % BITS_PER_PBA_ENTRY;
478         let mut mask: u64 = (1 << shift) as u64;
479 
480         if set {
481             self.pba_entries[index] |= mask;
482         } else {
483             mask = !mask;
484             self.pba_entries[index] &= mask;
485         }
486     }
487 
get_pba_bit(&self, vector: u16) -> u8488     fn get_pba_bit(&self, vector: u16) -> u8 {
489         assert!(vector < MAX_MSIX_VECTORS_PER_DEVICE);
490 
491         let index: usize = (vector as usize) / BITS_PER_PBA_ENTRY;
492         let shift: usize = (vector as usize) % BITS_PER_PBA_ENTRY;
493 
494         ((self.pba_entries[index] >> shift) & 0x0000_0001u64) as u8
495     }
496 
inject_msix_and_clear_pba(&mut self, vector: usize)497     fn inject_msix_and_clear_pba(&mut self, vector: usize) {
498         if let Some(irq) = &self.irq_vec[vector] {
499             irq.irqfd.write(1).unwrap();
500         }
501 
502         // Clear the bit from PBA
503         self.set_pba_bit(vector as u16, false);
504     }
505 
506     /// Inject virtual interrupt to the guest
507     ///
508     ///  # Arguments
509     ///  * 'vector' - the index to the MSI-X Table entry
510     ///
511     /// PCI Spec 3.0 6.8.3.5: while a vector is masked, the function is
512     /// prohibited from sending the associated message, and the function
513     /// must set the associated Pending bit whenever the function would
514     /// otherwise send the message. When software unmasks a vector whose
515     /// associated Pending bit is set, the function must schedule sending
516     /// the associated message, and clear the Pending bit as soon as the
517     /// message has been sent.
518     ///
519     /// If the vector is unmasked, writing to irqfd which wakes up KVM to
520     /// inject virtual interrupt to the guest.
trigger(&mut self, vector: u16)521     pub fn trigger(&mut self, vector: u16) {
522         if self.table_entries[vector as usize].masked() || self.masked() {
523             self.set_pba_bit(vector, true);
524         } else if let Some(irq) = self.irq_vec.get(vector as usize).unwrap_or(&None) {
525             irq.irqfd.write(1).unwrap();
526         }
527     }
528 
529     /// Return the raw descriptor of the MSI device socket
get_msi_socket(&self) -> RawDescriptor530     pub fn get_msi_socket(&self) -> RawDescriptor {
531         self.msi_device_socket.as_raw_descriptor()
532     }
533 
534     /// Return irqfd of MSI-X Table entry
535     ///
536     ///  # Arguments
537     ///  * 'vector' - the index to the MSI-X table entry
get_irqfd(&self, vector: usize) -> Option<&Event>538     pub fn get_irqfd(&self, vector: usize) -> Option<&Event> {
539         match self.irq_vec.get(vector as usize).unwrap_or(&None) {
540             Some(irq) => Some(&irq.irqfd),
541             None => None,
542         }
543     }
544 
destroy(&mut self)545     pub fn destroy(&mut self) {
546         while let Some(irq) = self.irq_vec.pop() {
547             if let Some(irq) = irq {
548                 let request = VmIrqRequest::ReleaseOneIrq {
549                     gsi: irq.gsi,
550                     irqfd: irq.irqfd,
551                 };
552                 if self.msi_device_socket.send(&request).is_err() {
553                     continue;
554                 }
555                 let _ = self.msi_device_socket.recv::<VmIrqResponse>();
556             }
557         }
558     }
559 }
560 
561 impl AsRawDescriptor for MsixConfig {
as_raw_descriptor(&self) -> RawDescriptor562     fn as_raw_descriptor(&self) -> RawDescriptor {
563         self.msi_device_socket.as_raw_descriptor()
564     }
565 }
566 
567 /// Message Control Register
568 //   10-0:  MSI-X Table size
569 //   13-11: Reserved
570 //   14:    Mask. Mask all MSI-X when set.
571 //   15:    Enable. Enable all MSI-X when set.
572 // See <https://wiki.osdev.org/PCI#Enabling_MSI-X> for the details.
573 #[bitfield]
574 #[derive(Copy, Clone, Default)]
575 pub struct MsixCtrl {
576     table_size: B10,
577     reserved: B4,
578     mask: B1,
579     enable: B1,
580 }
581 
582 // It is safe to implement DataInit; all members are simple numbers and any value is valid.
583 unsafe impl DataInit for MsixCap {}
584 
585 #[allow(dead_code)]
586 #[repr(C)]
587 #[derive(Clone, Copy, Default)]
588 /// MSI-X Capability Structure
589 pub struct MsixCap {
590     // To make add_capability() happy
591     _cap_vndr: u8,
592     _cap_next: u8,
593     // Message Control Register
594     msg_ctl: MsixCtrl,
595     // Table. Contains the offset and the BAR indicator (BIR)
596     //   2-0:  Table BAR indicator (BIR). Can be 0 to 5.
597     //   31-3: Table offset in the BAR pointed by the BIR.
598     table: u32,
599     // Pending Bit Array. Contains the offset and the BAR indicator (BIR)
600     //   2-0:  PBA BAR indicator (BIR). Can be 0 to 5.
601     //   31-3: PBA offset in the BAR pointed by the BIR.
602     pba: u32,
603 }
604 
605 impl PciCapability for MsixCap {
bytes(&self) -> &[u8]606     fn bytes(&self) -> &[u8] {
607         self.as_slice()
608     }
609 
id(&self) -> PciCapabilityID610     fn id(&self) -> PciCapabilityID {
611         PciCapabilityID::Msix
612     }
613 
writable_bits(&self) -> Vec<u32>614     fn writable_bits(&self) -> Vec<u32> {
615         // Only msg_ctl[15:14] is writable
616         vec![0x3000_0000, 0, 0]
617     }
618 }
619 
620 impl MsixCap {
new( table_pci_bar: u8, table_size: u16, table_off: u32, pba_pci_bar: u8, pba_off: u32, ) -> Self621     pub fn new(
622         table_pci_bar: u8,
623         table_size: u16,
624         table_off: u32,
625         pba_pci_bar: u8,
626         pba_off: u32,
627     ) -> Self {
628         assert!(table_size < MAX_MSIX_VECTORS_PER_DEVICE);
629 
630         // Set the table size and enable MSI-X.
631         let mut msg_ctl = MsixCtrl::new();
632         msg_ctl.set_enable(1);
633         // Table Size is N - 1 encoded.
634         msg_ctl.set_table_size(table_size - 1);
635 
636         MsixCap {
637             _cap_vndr: 0,
638             _cap_next: 0,
639             msg_ctl,
640             table: (table_off & 0xffff_fff8u32) | u32::from(table_pci_bar & 0x7u8),
641             pba: (pba_off & 0xffff_fff8u32) | u32::from(pba_pci_bar & 0x7u8),
642         }
643     }
644 
645     #[cfg(unix)]
msg_ctl(&self) -> MsixCtrl646     pub fn msg_ctl(&self) -> MsixCtrl {
647         self.msg_ctl
648     }
649 }
650