• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2024 Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 use core::ops::Deref;
25 use core::ops::DerefMut;
26 use core::ptr::NonNull;
27 
28 use lazy_static::lazy_static;
29 
30 use hypervisor::mmio_map_region;
31 
32 use rust_support::mmu::ARCH_MMU_FLAG_PERM_NO_EXECUTE;
33 use rust_support::mmu::ARCH_MMU_FLAG_UNCACHED_DEVICE;
34 use rust_support::paddr_t;
35 use rust_support::sync::Mutex;
36 use rust_support::vaddr_t;
37 use rust_support::vmm::vmm_alloc_physical;
38 use rust_support::vmm::vmm_get_kernel_aspace;
39 use rust_support::Error as LkError;
40 
41 use static_assertions::const_assert_eq;
42 
43 use virtio_drivers_and_devices::transport::pci::bus::ConfigurationAccess;
44 use virtio_drivers_and_devices::transport::pci::bus::DeviceFunction;
45 use virtio_drivers_and_devices::transport::pci::bus::PciRoot;
46 use virtio_drivers_and_devices::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
47 
48 use crate::err::Error;
49 use crate::pci::arch;
50 
51 #[derive(Copy, Clone)]
52 struct BarInfo {
53     paddr: paddr_t,
54     size: usize,
55     vaddr: vaddr_t,
56 }
57 
58 const NUM_BARS: usize = 6;
59 lazy_static! {
60     static ref BARS: Mutex<[Option<BarInfo>; NUM_BARS]> = Mutex::new([None; NUM_BARS]);
61 }
62 
63 // virtio-drivers requires 4k pages, check that we meet requirement
64 const_assert_eq!(PAGE_SIZE, rust_support::mmu::PAGE_SIZE as usize);
65 
66 pub struct TrustyHal;
67 
68 impl TrustyHal {
mmio_alloc( pci_root: &mut PciRoot<impl ConfigurationAccess>, device_function: DeviceFunction, ) -> Result<(), Error>69     pub fn mmio_alloc(
70         pci_root: &mut PciRoot<impl ConfigurationAccess>,
71         device_function: DeviceFunction,
72     ) -> Result<(), Error> {
73         for bar in 0..NUM_BARS {
74             let bar_info = pci_root.bar_info(device_function, bar as u8).unwrap();
75             if let Some((bar_paddr, bar_size)) = bar_info.memory_address_size() {
76                 let bar_vaddr = core::ptr::null_mut();
77                 let bar_size_aligned = (bar_size as usize + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
78 
79                 // Safety:
80                 // `aspace` is `vmm_get_kernel_aspace()`.
81                 // `name` is a `&'static CStr`.
82                 // `bar_paddr` and `bar_size_aligned` are safe by this function's safety requirements.
83                 let ret = unsafe {
84                     vmm_alloc_physical(
85                         vmm_get_kernel_aspace(),
86                         c"pci_config_space".as_ptr(),
87                         bar_size_aligned,
88                         &bar_vaddr,
89                         0,
90                         bar_paddr as usize,
91                         0,
92                         ARCH_MMU_FLAG_PERM_NO_EXECUTE | ARCH_MMU_FLAG_UNCACHED_DEVICE,
93                     )
94                 };
95                 LkError::from_lk(ret)?;
96 
97                 // Safety:
98                 // `bar_paddr` and `bar_size_aligned` are safe by this function's safety requirements.
99                 match unsafe { mmio_map_region(bar_paddr as usize, bar_size_aligned) } {
100                     // Ignore not supported which implies that guard is not used.
101                     Ok(()) | Err(LkError::ERR_NOT_SUPPORTED) | Err(LkError::ERR_INVALID_ARGS) => {}
102                     Err(err) => {
103                         log::error!("mmio_map_region returned unexpected error: {:?}", err);
104                         return Err(Error::Lk(err));
105                     }
106                 }
107 
108                 BARS.lock().deref_mut()[bar] = Some(BarInfo {
109                     paddr: bar_paddr as usize,
110                     size: bar_size_aligned,
111                     vaddr: bar_vaddr as usize,
112                 });
113             }
114         }
115         Ok(())
116     }
117 }
118 
119 // Safety: TrustyHal is stateless and thus trivially safe to send to another thread
120 unsafe impl Send for TrustyHal {}
121 
122 // Safety: See function specific comments
123 unsafe impl Hal for TrustyHal {
124     // Safety:
125     // Function either returns a non-null, properly aligned pointer or panics the kernel.
126     // The call to `vmm_alloc_contiguous` ensures that the pointed to memory is zeroed.
dma_alloc(pages: usize, direction: BufferDirection) -> (PhysAddr, NonNull<u8>)127     fn dma_alloc(pages: usize, direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
128         let size = pages * PAGE_SIZE;
129         let (paddr, vaddr) = crate::hal::dma_alloc(pages, direction);
130         arch::dma_alloc_share(paddr, size);
131         (paddr, vaddr)
132     }
133 
134     // Safety: `vaddr` was returned by `dma_alloc` and hasn't been deallocated.
dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32135     unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32 {
136         let size = pages * PAGE_SIZE;
137         arch::dma_dealloc_unshare(paddr, size);
138 
139         // Safety:
140         // `vaddr` was returned by `dma_alloc` and hasn't been deallocated.
141         unsafe { crate::hal::dma_dealloc(paddr, vaddr, pages) }
142     }
143 
144     // Only used for MMIO addresses within BARs read from the device,
145     // for the PCI transport.
146     //
147     // Safety: `paddr` and `size` are validated against allocations made in
148     // `Self::mmio_alloc`; panics on validation failure.
mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8>149     unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
150         for bar in BARS.lock().deref().iter().flatten() {
151             let bar_paddr_end = bar.paddr + bar.size;
152             if (bar.paddr..bar_paddr_end).contains(&paddr) {
153                 // check that the address range up to the given size is within
154                 // the region expected for MMIO.
155                 if paddr + size > bar_paddr_end {
156                     panic!("invalid arguments passed to mmio_phys_to_virt");
157                 }
158                 let offset = paddr - bar.paddr;
159 
160                 let bar_vaddr_ptr: *mut u8 = bar.vaddr as _;
161                 // Safety:
162                 // - `BARS` correctly maps from physical to virtual pages
163                 // - `offset` is less than or equal to bar.size because
164                 //   `bar.paddr` <= `paddr`` < `bar_paddr_end`
165                 let vaddr = unsafe { bar_vaddr_ptr.add(offset) };
166                 return NonNull::<u8>::new(vaddr).unwrap();
167             }
168         }
169 
170         panic!("error mapping physical memory to virtual for mmio");
171     }
172 
173     // Safety: delegated to callee
share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr174     unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
175         // Safety: delegated to arch::share
176         unsafe { arch::share(buffer, direction) }
177     }
178 
179     // Safety: delegated to callee
unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection)180     unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
181         // Safety: delegated to arch::unshare
182         unsafe {
183             arch::unshare(paddr, buffer, direction);
184         }
185     }
186 }
187