• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022, The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //     http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 //! Functions to scan the PCI bus for VirtIO device.
16 
17 use aarch64_paging::paging::MemoryRegion;
18 use alloc::alloc::{alloc, dealloc, handle_alloc_error, Layout};
19 use core::{mem::size_of, ptr::NonNull};
20 use fdtpci::PciInfo;
21 use log::{debug, info};
22 use virtio_drivers::{
23     device::{blk::VirtIOBlk, console::VirtIOConsole},
24     transport::{
25         pci::{bus::PciRoot, virtio_device_type, PciTransport},
26         DeviceType, Transport,
27     },
28     BufferDirection, Hal, PhysAddr, PAGE_SIZE,
29 };
30 
31 /// The standard sector size of a VirtIO block device, in bytes.
32 const SECTOR_SIZE_BYTES: usize = 512;
33 
34 /// The size in sectors of the test block device we expect.
35 const EXPECTED_SECTOR_COUNT: usize = 4;
36 
check_pci(pci_root: &mut PciRoot)37 pub fn check_pci(pci_root: &mut PciRoot) {
38     let mut checked_virtio_device_count = 0;
39     for (device_function, info) in pci_root.enumerate_bus(0) {
40         let (status, command) = pci_root.get_status_command(device_function);
41         info!("Found {} at {}, status {:?} command {:?}", info, device_function, status, command);
42         if let Some(virtio_type) = virtio_device_type(&info) {
43             info!("  VirtIO {:?}", virtio_type);
44             let mut transport = PciTransport::new::<HalImpl>(pci_root, device_function).unwrap();
45             info!(
46                 "Detected virtio PCI device with device type {:?}, features {:#018x}",
47                 transport.device_type(),
48                 transport.read_device_features(),
49             );
50             if check_virtio_device(transport, virtio_type) {
51                 checked_virtio_device_count += 1;
52             }
53         }
54     }
55 
56     assert_eq!(checked_virtio_device_count, 4);
57 }
58 
59 /// Checks the given VirtIO device, if we know how to.
60 ///
61 /// Returns true if the device was checked, or false if it was ignored.
check_virtio_device(transport: impl Transport, device_type: DeviceType) -> bool62 fn check_virtio_device(transport: impl Transport, device_type: DeviceType) -> bool {
63     match device_type {
64         DeviceType::Block => {
65             let mut blk =
66                 VirtIOBlk::<HalImpl, _>::new(transport).expect("failed to create blk driver");
67             info!("Found {} KiB block device.", blk.capacity() * SECTOR_SIZE_BYTES as u64 / 1024);
68             assert_eq!(blk.capacity(), EXPECTED_SECTOR_COUNT as u64);
69             let mut data = [0; SECTOR_SIZE_BYTES * EXPECTED_SECTOR_COUNT];
70             for i in 0..EXPECTED_SECTOR_COUNT {
71                 blk.read_block(i, &mut data[i * SECTOR_SIZE_BYTES..(i + 1) * SECTOR_SIZE_BYTES])
72                     .expect("Failed to read block device.");
73             }
74             for (i, chunk) in data.chunks(size_of::<u32>()).enumerate() {
75                 assert_eq!(chunk, &(i as u32).to_le_bytes());
76             }
77             info!("Read expected data from block device.");
78             true
79         }
80         DeviceType::Console => {
81             let mut console = VirtIOConsole::<HalImpl, _>::new(transport)
82                 .expect("Failed to create VirtIO console driver");
83             info!("Found console device: {:?}", console.info());
84             for &c in b"Hello VirtIO console\n" {
85                 console.send(c).expect("Failed to send character to VirtIO console device");
86             }
87             info!("Wrote to VirtIO console.");
88             true
89         }
90         _ => false,
91     }
92 }
93 
94 /// Gets the memory region in which BARs are allocated.
get_bar_region(pci_info: &PciInfo) -> MemoryRegion95 pub fn get_bar_region(pci_info: &PciInfo) -> MemoryRegion {
96     MemoryRegion::new(pci_info.bar_range.start as usize, pci_info.bar_range.end as usize)
97 }
98 
99 struct HalImpl;
100 
101 unsafe impl Hal for HalImpl {
dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>)102     fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
103         debug!("dma_alloc: pages={}", pages);
104         let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
105         // Safe because the layout has a non-zero size.
106         let vaddr = unsafe { alloc(layout) };
107         let vaddr =
108             if let Some(vaddr) = NonNull::new(vaddr) { vaddr } else { handle_alloc_error(layout) };
109         let paddr = virt_to_phys(vaddr);
110         (paddr, vaddr)
111     }
112 
dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32113     unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
114         debug!("dma_dealloc: paddr={:#x}, pages={}", paddr, pages);
115         let layout = Layout::from_size_align(pages * PAGE_SIZE, PAGE_SIZE).unwrap();
116         // Safe because the memory was allocated by `dma_alloc` above using the same allocator, and
117         // the layout is the same as was used then.
118         unsafe {
119             dealloc(vaddr.as_ptr(), layout);
120         }
121         0
122     }
123 
mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8>124     unsafe fn mmio_phys_to_virt(paddr: PhysAddr, _size: usize) -> NonNull<u8> {
125         NonNull::new(paddr as _).unwrap()
126     }
127 
share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr128     unsafe fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
129         let vaddr = buffer.cast();
130         // Nothing to do, as the host already has access to all memory.
131         virt_to_phys(vaddr)
132     }
133 
unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection)134     unsafe fn unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection) {
135         // Nothing to do, as the host already has access to all memory and we didn't copy the buffer
136         // anywhere else.
137     }
138 }
139 
virt_to_phys(vaddr: NonNull<u8>) -> PhysAddr140 fn virt_to_phys(vaddr: NonNull<u8>) -> PhysAddr {
141     vaddr.as_ptr() as _
142 }
143