1 // Copyright 2018 The Chromium OS Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 use std::sync::atomic::{AtomicUsize, Ordering}; 6 use std::sync::Arc; 7 use sync::Mutex; 8 9 use base::{warn, AsRawDescriptor, Event, RawDescriptor, Result, Tube}; 10 use data_model::{DataInit, Le32}; 11 use hypervisor::Datamatch; 12 use libc::ERANGE; 13 use resources::{Alloc, MmioType, SystemAllocator}; 14 use vm_memory::GuestMemory; 15 16 use super::*; 17 use crate::pci::{ 18 MsixCap, MsixConfig, PciAddress, PciBarConfiguration, PciCapability, PciCapabilityID, 19 PciClassCode, PciConfiguration, PciDevice, PciDeviceError, PciDisplaySubclass, PciHeaderType, 20 PciInterruptPin, PciSubclass, 21 }; 22 23 use self::virtio_pci_common_config::VirtioPciCommonConfig; 24 25 pub enum PciCapabilityType { 26 CommonConfig = 1, 27 NotifyConfig = 2, 28 IsrConfig = 3, 29 DeviceConfig = 4, 30 PciConfig = 5, 31 SharedMemoryConfig = 8, 32 } 33 34 #[allow(dead_code)] 35 #[repr(C)] 36 #[derive(Clone, Copy)] 37 struct VirtioPciCap { 38 // _cap_vndr and _cap_next are autofilled based on id() in pci configuration 39 _cap_vndr: u8, // Generic PCI field: PCI_CAP_ID_VNDR 40 _cap_next: u8, // Generic PCI field: next ptr 41 cap_len: u8, // Generic PCI field: capability length 42 cfg_type: u8, // Identifies the structure. 43 bar: u8, // Where to find it. 44 id: u8, // Multiple capabilities of the same type 45 padding: [u8; 2], // Pad to full dword. 46 offset: Le32, // Offset within bar. 47 length: Le32, // Length of the structure, in bytes. 48 } 49 // It is safe to implement DataInit; all members are simple numbers and any value is valid. 50 unsafe impl DataInit for VirtioPciCap {} 51 52 impl PciCapability for VirtioPciCap { bytes(&self) -> &[u8]53 fn bytes(&self) -> &[u8] { 54 self.as_slice() 55 } 56 id(&self) -> PciCapabilityID57 fn id(&self) -> PciCapabilityID { 58 PciCapabilityID::VendorSpecific 59 } 60 } 61 62 impl VirtioPciCap { new(cfg_type: PciCapabilityType, bar: u8, offset: u32, length: u32) -> Self63 pub fn new(cfg_type: PciCapabilityType, bar: u8, offset: u32, length: u32) -> Self { 64 VirtioPciCap { 65 _cap_vndr: 0, 66 _cap_next: 0, 67 cap_len: std::mem::size_of::<VirtioPciCap>() as u8, 68 cfg_type: cfg_type as u8, 69 bar, 70 id: 0, 71 padding: [0; 2], 72 offset: Le32::from(offset), 73 length: Le32::from(length), 74 } 75 } 76 } 77 78 #[allow(dead_code)] 79 #[repr(C)] 80 #[derive(Clone, Copy)] 81 pub struct VirtioPciNotifyCap { 82 cap: VirtioPciCap, 83 notify_off_multiplier: Le32, 84 } 85 // It is safe to implement DataInit; all members are simple numbers and any value is valid. 86 unsafe impl DataInit for VirtioPciNotifyCap {} 87 88 impl PciCapability for VirtioPciNotifyCap { bytes(&self) -> &[u8]89 fn bytes(&self) -> &[u8] { 90 self.as_slice() 91 } 92 id(&self) -> PciCapabilityID93 fn id(&self) -> PciCapabilityID { 94 PciCapabilityID::VendorSpecific 95 } 96 } 97 98 impl VirtioPciNotifyCap { new( cfg_type: PciCapabilityType, bar: u8, offset: u32, length: u32, multiplier: Le32, ) -> Self99 pub fn new( 100 cfg_type: PciCapabilityType, 101 bar: u8, 102 offset: u32, 103 length: u32, 104 multiplier: Le32, 105 ) -> Self { 106 VirtioPciNotifyCap { 107 cap: VirtioPciCap { 108 _cap_vndr: 0, 109 _cap_next: 0, 110 cap_len: std::mem::size_of::<VirtioPciNotifyCap>() as u8, 111 cfg_type: cfg_type as u8, 112 bar, 113 id: 0, 114 padding: [0; 2], 115 offset: Le32::from(offset), 116 length: Le32::from(length), 117 }, 118 notify_off_multiplier: multiplier, 119 } 120 } 121 } 122 123 #[repr(C)] 124 #[derive(Clone, Copy)] 125 pub struct VirtioPciShmCap { 126 cap: VirtioPciCap, 127 offset_hi: Le32, // Most sig 32 bits of offset 128 length_hi: Le32, // Most sig 32 bits of length 129 } 130 // It is safe to implement DataInit; all members are simple numbers and any value is valid. 131 unsafe impl DataInit for VirtioPciShmCap {} 132 133 impl PciCapability for VirtioPciShmCap { bytes(&self) -> &[u8]134 fn bytes(&self) -> &[u8] { 135 self.as_slice() 136 } 137 id(&self) -> PciCapabilityID138 fn id(&self) -> PciCapabilityID { 139 PciCapabilityID::VendorSpecific 140 } 141 } 142 143 impl VirtioPciShmCap { new(cfg_type: PciCapabilityType, bar: u8, offset: u64, length: u64, shmid: u8) -> Self144 pub fn new(cfg_type: PciCapabilityType, bar: u8, offset: u64, length: u64, shmid: u8) -> Self { 145 VirtioPciShmCap { 146 cap: VirtioPciCap { 147 _cap_vndr: 0, 148 _cap_next: 0, 149 cap_len: std::mem::size_of::<VirtioPciShmCap>() as u8, 150 cfg_type: cfg_type as u8, 151 bar, 152 id: shmid, 153 padding: [0; 2], 154 offset: Le32::from(offset as u32), 155 length: Le32::from(length as u32), 156 }, 157 offset_hi: Le32::from((offset >> 32) as u32), 158 length_hi: Le32::from((length >> 32) as u32), 159 } 160 } 161 } 162 163 /// Subclasses for virtio. 164 #[allow(dead_code)] 165 #[derive(Copy, Clone)] 166 pub enum PciVirtioSubclass { 167 NonTransitionalBase = 0xff, 168 } 169 170 impl PciSubclass for PciVirtioSubclass { get_register_value(&self) -> u8171 fn get_register_value(&self) -> u8 { 172 *self as u8 173 } 174 } 175 176 // Allocate one bar for the structs pointed to by the capability structures. 177 const COMMON_CONFIG_BAR_OFFSET: u64 = 0x0000; 178 const COMMON_CONFIG_SIZE: u64 = 56; 179 const ISR_CONFIG_BAR_OFFSET: u64 = 0x1000; 180 const ISR_CONFIG_SIZE: u64 = 1; 181 const DEVICE_CONFIG_BAR_OFFSET: u64 = 0x2000; 182 const DEVICE_CONFIG_SIZE: u64 = 0x1000; 183 const NOTIFICATION_BAR_OFFSET: u64 = 0x3000; 184 const NOTIFICATION_SIZE: u64 = 0x1000; 185 const MSIX_TABLE_BAR_OFFSET: u64 = 0x6000; 186 const MSIX_TABLE_SIZE: u64 = 0x1000; 187 const MSIX_PBA_BAR_OFFSET: u64 = 0x7000; 188 const MSIX_PBA_SIZE: u64 = 0x1000; 189 const CAPABILITY_BAR_SIZE: u64 = 0x8000; 190 191 const NOTIFY_OFF_MULTIPLIER: u32 = 4; // A dword per notification address. 192 193 const VIRTIO_PCI_VENDOR_ID: u16 = 0x1af4; 194 const VIRTIO_PCI_DEVICE_ID_BASE: u16 = 0x1040; // Add to device type to get device ID. 195 const VIRTIO_PCI_REVISION_ID: u8 = 1; 196 197 /// Implements the 198 /// [PCI](http://docs.oasis-open.org/virtio/virtio/v1.0/cs04/virtio-v1.0-cs04.html#x1-650001) 199 /// transport for virtio devices. 200 pub struct VirtioPciDevice { 201 config_regs: PciConfiguration, 202 pci_address: Option<PciAddress>, 203 204 device: Box<dyn VirtioDevice>, 205 device_activated: bool, 206 207 interrupt_status: Arc<AtomicUsize>, 208 interrupt_evt: Option<Event>, 209 interrupt_resample_evt: Option<Event>, 210 queues: Vec<Queue>, 211 queue_evts: Vec<Event>, 212 mem: Option<GuestMemory>, 213 settings_bar: u8, 214 msix_config: Arc<Mutex<MsixConfig>>, 215 msix_cap_reg_idx: Option<usize>, 216 common_config: VirtioPciCommonConfig, 217 } 218 219 impl VirtioPciDevice { 220 /// Constructs a new PCI transport for the given virtio device. new( mem: GuestMemory, device: Box<dyn VirtioDevice>, msi_device_tube: Tube, ) -> Result<Self>221 pub fn new( 222 mem: GuestMemory, 223 device: Box<dyn VirtioDevice>, 224 msi_device_tube: Tube, 225 ) -> Result<Self> { 226 let mut queue_evts = Vec::new(); 227 for _ in device.queue_max_sizes() { 228 queue_evts.push(Event::new()?) 229 } 230 let queues = device 231 .queue_max_sizes() 232 .iter() 233 .map(|&s| Queue::new(s)) 234 .collect(); 235 236 let pci_device_id = VIRTIO_PCI_DEVICE_ID_BASE + device.device_type() as u16; 237 238 let (pci_device_class, pci_device_subclass) = match device.device_type() { 239 TYPE_GPU => ( 240 PciClassCode::DisplayController, 241 &PciDisplaySubclass::Other as &dyn PciSubclass, 242 ), 243 _ => ( 244 PciClassCode::Other, 245 &PciVirtioSubclass::NonTransitionalBase as &dyn PciSubclass, 246 ), 247 }; 248 249 let num_queues = device.queue_max_sizes().len(); 250 251 // One MSI-X vector per queue plus one for configuration changes. 252 let msix_num = u16::try_from(num_queues + 1).map_err(|_| base::Error::new(ERANGE))?; 253 let msix_config = Arc::new(Mutex::new(MsixConfig::new(msix_num, msi_device_tube))); 254 255 let config_regs = PciConfiguration::new( 256 VIRTIO_PCI_VENDOR_ID, 257 pci_device_id, 258 pci_device_class, 259 pci_device_subclass, 260 None, 261 PciHeaderType::Device, 262 VIRTIO_PCI_VENDOR_ID, 263 pci_device_id, 264 VIRTIO_PCI_REVISION_ID, 265 ); 266 267 Ok(VirtioPciDevice { 268 config_regs, 269 pci_address: None, 270 device, 271 device_activated: false, 272 interrupt_status: Arc::new(AtomicUsize::new(0)), 273 interrupt_evt: None, 274 interrupt_resample_evt: None, 275 queues, 276 queue_evts, 277 mem: Some(mem), 278 settings_bar: 0, 279 msix_config, 280 msix_cap_reg_idx: None, 281 common_config: VirtioPciCommonConfig { 282 driver_status: 0, 283 config_generation: 0, 284 device_feature_select: 0, 285 driver_feature_select: 0, 286 queue_select: 0, 287 msix_config: VIRTIO_MSI_NO_VECTOR, 288 }, 289 }) 290 } 291 is_driver_ready(&self) -> bool292 fn is_driver_ready(&self) -> bool { 293 let ready_bits = 294 (DEVICE_ACKNOWLEDGE | DEVICE_DRIVER | DEVICE_DRIVER_OK | DEVICE_FEATURES_OK) as u8; 295 self.common_config.driver_status == ready_bits 296 && self.common_config.driver_status & DEVICE_FAILED as u8 == 0 297 } 298 299 /// Determines if the driver has requested the device reset itself is_reset_requested(&self) -> bool300 fn is_reset_requested(&self) -> bool { 301 self.common_config.driver_status == DEVICE_RESET as u8 302 } 303 are_queues_valid(&self) -> bool304 fn are_queues_valid(&self) -> bool { 305 if let Some(mem) = self.mem.as_ref() { 306 // All queues marked as ready must be valid. 307 self.queues 308 .iter() 309 .filter(|q| q.ready) 310 .all(|q| q.is_valid(mem)) 311 } else { 312 false 313 } 314 } 315 add_settings_pci_capabilities( &mut self, settings_bar: u8, ) -> std::result::Result<(), PciDeviceError>316 fn add_settings_pci_capabilities( 317 &mut self, 318 settings_bar: u8, 319 ) -> std::result::Result<(), PciDeviceError> { 320 // Add pointers to the different configuration structures from the PCI capabilities. 321 let common_cap = VirtioPciCap::new( 322 PciCapabilityType::CommonConfig, 323 settings_bar, 324 COMMON_CONFIG_BAR_OFFSET as u32, 325 COMMON_CONFIG_SIZE as u32, 326 ); 327 self.config_regs 328 .add_capability(&common_cap) 329 .map_err(PciDeviceError::CapabilitiesSetup)?; 330 331 let isr_cap = VirtioPciCap::new( 332 PciCapabilityType::IsrConfig, 333 settings_bar, 334 ISR_CONFIG_BAR_OFFSET as u32, 335 ISR_CONFIG_SIZE as u32, 336 ); 337 self.config_regs 338 .add_capability(&isr_cap) 339 .map_err(PciDeviceError::CapabilitiesSetup)?; 340 341 // TODO(dgreid) - set based on device's configuration size? 342 let device_cap = VirtioPciCap::new( 343 PciCapabilityType::DeviceConfig, 344 settings_bar, 345 DEVICE_CONFIG_BAR_OFFSET as u32, 346 DEVICE_CONFIG_SIZE as u32, 347 ); 348 self.config_regs 349 .add_capability(&device_cap) 350 .map_err(PciDeviceError::CapabilitiesSetup)?; 351 352 let notify_cap = VirtioPciNotifyCap::new( 353 PciCapabilityType::NotifyConfig, 354 settings_bar, 355 NOTIFICATION_BAR_OFFSET as u32, 356 NOTIFICATION_SIZE as u32, 357 Le32::from(NOTIFY_OFF_MULTIPLIER), 358 ); 359 self.config_regs 360 .add_capability(¬ify_cap) 361 .map_err(PciDeviceError::CapabilitiesSetup)?; 362 363 //TODO(dgreid) - How will the configuration_cap work? 364 let configuration_cap = VirtioPciCap::new(PciCapabilityType::PciConfig, 0, 0, 0); 365 self.config_regs 366 .add_capability(&configuration_cap) 367 .map_err(PciDeviceError::CapabilitiesSetup)?; 368 369 let msix_cap = MsixCap::new( 370 settings_bar, 371 self.msix_config.lock().num_vectors(), 372 MSIX_TABLE_BAR_OFFSET as u32, 373 settings_bar, 374 MSIX_PBA_BAR_OFFSET as u32, 375 ); 376 let msix_offset = self 377 .config_regs 378 .add_capability(&msix_cap) 379 .map_err(PciDeviceError::CapabilitiesSetup)?; 380 self.msix_cap_reg_idx = Some(msix_offset / 4); 381 382 self.settings_bar = settings_bar; 383 Ok(()) 384 } 385 clone_queue_evts(&self) -> Result<Vec<Event>>386 fn clone_queue_evts(&self) -> Result<Vec<Event>> { 387 self.queue_evts.iter().map(|e| e.try_clone()).collect() 388 } 389 } 390 391 impl PciDevice for VirtioPciDevice { debug_label(&self) -> String392 fn debug_label(&self) -> String { 393 format!("pci{}", self.device.debug_label()) 394 } 395 allocate_address( &mut self, resources: &mut SystemAllocator, ) -> std::result::Result<PciAddress, PciDeviceError>396 fn allocate_address( 397 &mut self, 398 resources: &mut SystemAllocator, 399 ) -> std::result::Result<PciAddress, PciDeviceError> { 400 if self.pci_address.is_none() { 401 self.pci_address = match resources.allocate_pci(self.debug_label()) { 402 Some(Alloc::PciBar { 403 bus, 404 dev, 405 func, 406 bar: _, 407 }) => Some(PciAddress { bus, dev, func }), 408 _ => None, 409 } 410 } 411 self.pci_address.ok_or(PciDeviceError::PciAllocationFailed) 412 } 413 keep_rds(&self) -> Vec<RawDescriptor>414 fn keep_rds(&self) -> Vec<RawDescriptor> { 415 let mut rds = self.device.keep_rds(); 416 if let Some(interrupt_evt) = &self.interrupt_evt { 417 rds.push(interrupt_evt.as_raw_descriptor()); 418 } 419 if let Some(interrupt_resample_evt) = &self.interrupt_resample_evt { 420 rds.push(interrupt_resample_evt.as_raw_descriptor()); 421 } 422 let descriptor = self.msix_config.lock().get_msi_socket(); 423 rds.push(descriptor); 424 rds 425 } 426 assign_irq( &mut self, irq_evt: Event, irq_resample_evt: Event, irq_num: u32, irq_pin: PciInterruptPin, )427 fn assign_irq( 428 &mut self, 429 irq_evt: Event, 430 irq_resample_evt: Event, 431 irq_num: u32, 432 irq_pin: PciInterruptPin, 433 ) { 434 self.config_regs.set_irq(irq_num as u8, irq_pin); 435 self.interrupt_evt = Some(irq_evt); 436 self.interrupt_resample_evt = Some(irq_resample_evt); 437 } 438 allocate_io_bars( &mut self, resources: &mut SystemAllocator, ) -> std::result::Result<Vec<(u64, u64)>, PciDeviceError>439 fn allocate_io_bars( 440 &mut self, 441 resources: &mut SystemAllocator, 442 ) -> std::result::Result<Vec<(u64, u64)>, PciDeviceError> { 443 let address = self 444 .pci_address 445 .expect("allocaten_address must be called prior to allocate_io_bars"); 446 // Allocate one bar for the structures pointed to by the capability structures. 447 let mut ranges = Vec::new(); 448 let settings_config_addr = resources 449 .mmio_allocator(MmioType::Low) 450 .allocate_with_align( 451 CAPABILITY_BAR_SIZE, 452 Alloc::PciBar { 453 bus: address.bus, 454 dev: address.dev, 455 func: address.func, 456 bar: 0, 457 }, 458 format!( 459 "virtio-{}-cap_bar", 460 type_to_str(self.device.device_type()).unwrap_or("?") 461 ), 462 CAPABILITY_BAR_SIZE, 463 ) 464 .map_err(|e| PciDeviceError::IoAllocationFailed(CAPABILITY_BAR_SIZE, e))?; 465 let config = PciBarConfiguration::default() 466 .set_register_index(0) 467 .set_address(settings_config_addr) 468 .set_size(CAPABILITY_BAR_SIZE); 469 let settings_bar = self 470 .config_regs 471 .add_pci_bar(config) 472 .map_err(|e| PciDeviceError::IoRegistrationFailed(settings_config_addr, e))? 473 as u8; 474 ranges.push((settings_config_addr, CAPABILITY_BAR_SIZE)); 475 476 // Once the BARs are allocated, the capabilities can be added to the PCI configuration. 477 self.add_settings_pci_capabilities(settings_bar)?; 478 479 Ok(ranges) 480 } 481 allocate_device_bars( &mut self, resources: &mut SystemAllocator, ) -> std::result::Result<Vec<(u64, u64)>, PciDeviceError>482 fn allocate_device_bars( 483 &mut self, 484 resources: &mut SystemAllocator, 485 ) -> std::result::Result<Vec<(u64, u64)>, PciDeviceError> { 486 let address = self 487 .pci_address 488 .expect("allocaten_address must be called prior to allocate_device_bars"); 489 let mut ranges = Vec::new(); 490 for config in self.device.get_device_bars(address) { 491 let device_addr = resources 492 .mmio_allocator_any() 493 .allocate_with_align( 494 config.get_size(), 495 Alloc::PciBar { 496 bus: address.bus, 497 dev: address.dev, 498 func: address.func, 499 bar: config.get_register_index() as u8, 500 }, 501 format!( 502 "virtio-{}-custom_bar", 503 type_to_str(self.device.device_type()).unwrap_or("?") 504 ), 505 config.get_size(), 506 ) 507 .map_err(|e| PciDeviceError::IoAllocationFailed(config.get_size(), e))?; 508 let config = config.set_address(device_addr); 509 let _device_bar = self 510 .config_regs 511 .add_pci_bar(config) 512 .map_err(|e| PciDeviceError::IoRegistrationFailed(device_addr, e))?; 513 ranges.push((device_addr, config.get_size())); 514 } 515 Ok(ranges) 516 } 517 register_device_capabilities(&mut self) -> std::result::Result<(), PciDeviceError>518 fn register_device_capabilities(&mut self) -> std::result::Result<(), PciDeviceError> { 519 for cap in self.device.get_device_caps() { 520 self.config_regs 521 .add_capability(&*cap) 522 .map_err(PciDeviceError::CapabilitiesSetup)?; 523 } 524 525 Ok(()) 526 } 527 ioevents(&self) -> Vec<(&Event, u64, Datamatch)>528 fn ioevents(&self) -> Vec<(&Event, u64, Datamatch)> { 529 let bar0 = self.config_regs.get_bar_addr(self.settings_bar as usize); 530 let notify_base = bar0 + NOTIFICATION_BAR_OFFSET; 531 self.queue_evts 532 .iter() 533 .enumerate() 534 .map(|(i, event)| { 535 ( 536 event, 537 notify_base + i as u64 * NOTIFY_OFF_MULTIPLIER as u64, 538 Datamatch::AnyLength, 539 ) 540 }) 541 .collect() 542 } 543 read_config_register(&self, reg_idx: usize) -> u32544 fn read_config_register(&self, reg_idx: usize) -> u32 { 545 let mut data: u32 = self.config_regs.read_reg(reg_idx); 546 if let Some(msix_cap_reg_idx) = self.msix_cap_reg_idx { 547 if msix_cap_reg_idx == reg_idx { 548 data = self.msix_config.lock().read_msix_capability(data); 549 } 550 } 551 552 data 553 } 554 write_config_register(&mut self, reg_idx: usize, offset: u64, data: &[u8])555 fn write_config_register(&mut self, reg_idx: usize, offset: u64, data: &[u8]) { 556 if let Some(msix_cap_reg_idx) = self.msix_cap_reg_idx { 557 if msix_cap_reg_idx == reg_idx { 558 let behavior = self.msix_config.lock().write_msix_capability(offset, data); 559 self.device.control_notify(behavior); 560 } 561 } 562 563 (&mut self.config_regs).write_reg(reg_idx, offset, data) 564 } 565 566 // Clippy: the value of COMMON_CONFIG_BAR_OFFSET happens to be zero so the 567 // expression `COMMON_CONFIG_BAR_OFFSET <= o` is always true, but this code 568 // is written such that the value of the const may be changed independently. 569 #[allow(clippy::absurd_extreme_comparisons)] read_bar(&mut self, addr: u64, data: &mut [u8])570 fn read_bar(&mut self, addr: u64, data: &mut [u8]) { 571 // The driver is only allowed to do aligned, properly sized access. 572 let bar0 = self.config_regs.get_bar_addr(self.settings_bar as usize); 573 let offset = addr - bar0; 574 match offset { 575 o if COMMON_CONFIG_BAR_OFFSET <= o 576 && o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => 577 { 578 self.common_config.read( 579 o - COMMON_CONFIG_BAR_OFFSET, 580 data, 581 &mut self.queues, 582 self.device.as_mut(), 583 ) 584 } 585 o if ISR_CONFIG_BAR_OFFSET <= o && o < ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE => { 586 if let Some(v) = data.get_mut(0) { 587 // Reading this register resets it to 0. 588 *v = self.interrupt_status.swap(0, Ordering::SeqCst) as u8; 589 } 590 } 591 o if DEVICE_CONFIG_BAR_OFFSET <= o 592 && o < DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE => 593 { 594 self.device.read_config(o - DEVICE_CONFIG_BAR_OFFSET, data); 595 } 596 o if NOTIFICATION_BAR_OFFSET <= o 597 && o < NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE => 598 { 599 // Handled with ioevents. 600 } 601 602 o if MSIX_TABLE_BAR_OFFSET <= o && o < MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE => { 603 self.msix_config 604 .lock() 605 .read_msix_table(o - MSIX_TABLE_BAR_OFFSET, data); 606 } 607 608 o if MSIX_PBA_BAR_OFFSET <= o && o < MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE => { 609 self.msix_config 610 .lock() 611 .read_pba_entries(o - MSIX_PBA_BAR_OFFSET, data); 612 } 613 614 _ => (), 615 } 616 } 617 618 #[allow(clippy::absurd_extreme_comparisons)] write_bar(&mut self, addr: u64, data: &[u8])619 fn write_bar(&mut self, addr: u64, data: &[u8]) { 620 let bar0 = self.config_regs.get_bar_addr(self.settings_bar as usize); 621 let offset = addr - bar0; 622 match offset { 623 o if COMMON_CONFIG_BAR_OFFSET <= o 624 && o < COMMON_CONFIG_BAR_OFFSET + COMMON_CONFIG_SIZE => 625 { 626 self.common_config.write( 627 o - COMMON_CONFIG_BAR_OFFSET, 628 data, 629 &mut self.queues, 630 self.device.as_mut(), 631 ) 632 } 633 o if ISR_CONFIG_BAR_OFFSET <= o && o < ISR_CONFIG_BAR_OFFSET + ISR_CONFIG_SIZE => { 634 if let Some(v) = data.get(0) { 635 self.interrupt_status 636 .fetch_and(!(*v as usize), Ordering::SeqCst); 637 } 638 } 639 o if DEVICE_CONFIG_BAR_OFFSET <= o 640 && o < DEVICE_CONFIG_BAR_OFFSET + DEVICE_CONFIG_SIZE => 641 { 642 self.device.write_config(o - DEVICE_CONFIG_BAR_OFFSET, data); 643 } 644 o if NOTIFICATION_BAR_OFFSET <= o 645 && o < NOTIFICATION_BAR_OFFSET + NOTIFICATION_SIZE => 646 { 647 // Handled with ioevents. 648 } 649 o if MSIX_TABLE_BAR_OFFSET <= o && o < MSIX_TABLE_BAR_OFFSET + MSIX_TABLE_SIZE => { 650 let behavior = self 651 .msix_config 652 .lock() 653 .write_msix_table(o - MSIX_TABLE_BAR_OFFSET, data); 654 self.device.control_notify(behavior); 655 } 656 o if MSIX_PBA_BAR_OFFSET <= o && o < MSIX_PBA_BAR_OFFSET + MSIX_PBA_SIZE => { 657 self.msix_config 658 .lock() 659 .write_pba_entries(o - MSIX_PBA_BAR_OFFSET, data); 660 } 661 662 _ => (), 663 }; 664 665 if !self.device_activated && self.is_driver_ready() && self.are_queues_valid() { 666 if let Some(interrupt_evt) = self.interrupt_evt.take() { 667 self.interrupt_evt = match interrupt_evt.try_clone() { 668 Ok(evt) => Some(evt), 669 Err(e) => { 670 warn!( 671 "{} failed to clone interrupt_evt: {}", 672 self.debug_label(), 673 e 674 ); 675 None 676 } 677 }; 678 if let Some(interrupt_resample_evt) = self.interrupt_resample_evt.take() { 679 self.interrupt_resample_evt = match interrupt_resample_evt.try_clone() { 680 Ok(evt) => Some(evt), 681 Err(e) => { 682 warn!( 683 "{} failed to clone interrupt_resample_evt: {}", 684 self.debug_label(), 685 e 686 ); 687 None 688 } 689 }; 690 if let Some(mem) = self.mem.take() { 691 self.mem = Some(mem.clone()); 692 let interrupt = Interrupt::new( 693 self.interrupt_status.clone(), 694 interrupt_evt, 695 interrupt_resample_evt, 696 Some(self.msix_config.clone()), 697 self.common_config.msix_config, 698 ); 699 700 match self.clone_queue_evts() { 701 Ok(queue_evts) => { 702 // Use ready queues and their events. 703 let (queues, queue_evts) = self 704 .queues 705 .clone() 706 .into_iter() 707 .zip(queue_evts.into_iter()) 708 .filter(|(q, _)| q.ready) 709 .unzip(); 710 711 self.device.activate(mem, interrupt, queues, queue_evts); 712 self.device_activated = true; 713 } 714 Err(e) => { 715 warn!( 716 "{} not activate due to failed to clone queue_evts: {}", 717 self.debug_label(), 718 e 719 ); 720 } 721 } 722 } 723 } 724 } 725 } 726 727 // Device has been reset by the driver 728 if self.device_activated && self.is_reset_requested() && self.device.reset() { 729 self.device_activated = false; 730 // reset queues 731 self.queues.iter_mut().for_each(Queue::reset); 732 // select queue 0 by default 733 self.common_config.queue_select = 0; 734 } 735 } 736 on_device_sandboxed(&mut self)737 fn on_device_sandboxed(&mut self) { 738 self.device.on_device_sandboxed(); 739 } 740 } 741