• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2024 Google Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 use core::ops::Deref;
25 use core::ops::DerefMut;
26 use core::ptr::NonNull;
27 
28 use lazy_static::lazy_static;
29 
30 use rust_support::mmu::ARCH_MMU_FLAG_PERM_NO_EXECUTE;
31 use rust_support::mmu::ARCH_MMU_FLAG_UNCACHED_DEVICE;
32 use rust_support::mmu::PAGE_SIZE_SHIFT;
33 use rust_support::paddr_t;
34 use rust_support::sync::Mutex;
35 use rust_support::vaddr_t;
36 use rust_support::vmm::vaddr_to_paddr;
37 use rust_support::vmm::vmm_alloc_contiguous;
38 use rust_support::vmm::vmm_alloc_physical;
39 use rust_support::vmm::vmm_free_region;
40 use rust_support::vmm::vmm_get_kernel_aspace;
41 
42 use static_assertions::const_assert_eq;
43 
44 use virtio_drivers::transport::pci::bus::DeviceFunction;
45 use virtio_drivers::transport::pci::bus::PciRoot;
46 use virtio_drivers::{BufferDirection, Hal, PhysAddr, PAGE_SIZE};
47 
48 use crate::err::Error;
49 
50 #[derive(Copy, Clone)]
51 struct BarInfo {
52     paddr: paddr_t,
53     size: usize,
54     vaddr: vaddr_t,
55 }
56 
57 const NUM_BARS: usize = 6;
58 lazy_static! {
59     static ref BARS: Mutex<[Option<BarInfo>; NUM_BARS]> = Mutex::new([None; NUM_BARS]);
60 }
61 
62 // virtio-drivers requires 4k pages, check that we meet requirement
63 const_assert_eq!(PAGE_SIZE, rust_support::mmu::PAGE_SIZE as usize);
64 
65 pub struct TrustyHal;
66 
67 impl TrustyHal {
mmio_alloc( pci_root: &mut PciRoot, device_function: DeviceFunction, ) -> Result<(), Error>68     pub fn mmio_alloc(
69         pci_root: &mut PciRoot,
70         device_function: DeviceFunction,
71     ) -> Result<(), Error> {
72         for bar in 0..NUM_BARS {
73             let bar_info = pci_root.bar_info(device_function, bar as u8).unwrap();
74             if let Some((bar_paddr, bar_size)) = bar_info.memory_address_size() {
75                 let bar_vaddr = core::ptr::null_mut();
76                 let bar_size_aligned = (bar_size as usize + PAGE_SIZE - 1) & !(PAGE_SIZE - 1);
77 
78                 // # Safety
79                 // `aspace` is `vmm_get_kernel_aspace()`.
80                 // `name` is a `&'static CStr`.
81                 // `bar_paddr` and `bar_size_aligned` are safe by this function's safety requirements.
82                 let ret = unsafe {
83                     vmm_alloc_physical(
84                         vmm_get_kernel_aspace(),
85                         c"pci_config_space".as_ptr(),
86                         bar_size_aligned,
87                         &bar_vaddr,
88                         0,
89                         bar_paddr as usize,
90                         0,
91                         ARCH_MMU_FLAG_PERM_NO_EXECUTE | ARCH_MMU_FLAG_UNCACHED_DEVICE,
92                     )
93                 };
94                 rust_support::Error::from_lk(ret)?;
95 
96                 BARS.lock().deref_mut()[bar] = Some(BarInfo {
97                     paddr: bar_paddr as usize,
98                     size: bar_size_aligned,
99                     vaddr: bar_vaddr as usize,
100                 });
101             }
102         }
103         Ok(())
104     }
105 }
106 
107 unsafe impl Hal for TrustyHal {
108     // Safety:
109     // Function either returns a non-null, properly aligned pointer or panics the kernel.
110     // The call to `vmm_alloc_contiguous` ensures that the pointed to memory is zeroed.
dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>)111     fn dma_alloc(pages: usize, _direction: BufferDirection) -> (PhysAddr, NonNull<u8>) {
112         let name = c"vsock-rust";
113         // dma_alloc requests num pages but vmm_alloc_contiguous expects bytes.
114         let size = pages * PAGE_SIZE as usize;
115         let mut vaddr = core::ptr::null_mut(); // stores pointer to virtual memory
116         let align_pow2 = PAGE_SIZE_SHIFT as u8;
117         let vmm_flags = 0;
118         let arch_mmu_flags = 0;
119         let aspace = vmm_get_kernel_aspace();
120 
121         // NOTE: the allocated memory will be zeroed since vmm_alloc_contiguous
122         // calls vmm_alloc_pmm which does not set the PMM_ALLOC_FLAG_NO_CLEAR
123         // flag.
124         //
125         // # Safety
126         // `aspace` is `vmm_get_kernel_aspace()`.
127         // `name` is a `&'static CStr`.
128         // `size` is validated by the callee
129         let rc = unsafe {
130             vmm_alloc_contiguous(
131                 aspace,
132                 name.as_ptr(),
133                 size,
134                 &mut vaddr,
135                 align_pow2,
136                 vmm_flags,
137                 arch_mmu_flags,
138             )
139         };
140         if rc != 0 {
141             panic!("error {} allocating physical memory", rc);
142         }
143         if vaddr as usize & (PAGE_SIZE - 1usize) != 0 {
144             panic!("error page-aligning allocation {:#x}", vaddr as usize);
145         }
146 
147         // Safety: `vaddr` is valid because the call to `vmm_alloc_continuous` succeeded
148         let paddr = unsafe { vaddr_to_paddr(vaddr) };
149 
150         (paddr, NonNull::<u8>::new(vaddr as *mut u8).unwrap())
151     }
152 
dma_dealloc(_paddr: PhysAddr, vaddr: NonNull<u8>, _pages: usize) -> i32153     unsafe fn dma_dealloc(_paddr: PhysAddr, vaddr: NonNull<u8>, _pages: usize) -> i32 {
154         // TODO: store pointers allocated with dma_alloc to validate the args
155         let aspace = vmm_get_kernel_aspace();
156         vmm_free_region(aspace, vaddr.as_ptr() as _)
157     }
158 
159     // Only used for MMIO addresses within BARs read from the device,
160     // for the PCI transport.
161     //
162     // Safety: `paddr` and `size` are validated against allocations made in
163     // `Self::mmio_alloc`; panics on validation failure.
mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8>164     unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8> {
165         for bar in BARS.lock().deref().iter().flatten() {
166             let bar_paddr_end = bar.paddr + bar.size;
167             if (bar.paddr..bar_paddr_end).contains(&paddr) {
168                 // check that the address range up to the given size is within
169                 // the region expected for MMIO.
170                 if paddr + size > bar_paddr_end {
171                     panic!("invalid arguments passed to mmio_phys_to_virt");
172                 }
173                 let offset: isize = (paddr - bar.paddr).try_into().unwrap();
174 
175                 let bar_vaddr_ptr: *mut u8 = bar.vaddr as _;
176                 return NonNull::<u8>::new(bar_vaddr_ptr.offset(offset)).unwrap();
177             }
178         }
179 
180         panic!("error mapping physical memory to virtual for mmio");
181     }
182 
share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr183     unsafe fn share(buffer: NonNull<[u8]>, _direction: BufferDirection) -> PhysAddr {
184         // no-op on x86_64, not implemented on other architectures
185         #[cfg(not(target_arch = "x86_64"))]
186         unimplemented!();
187 
188         vaddr_to_paddr(buffer.as_ptr().cast())
189     }
190 
191     // Safety: no-op on x86-64, panic elsewhere.
unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection)192     unsafe fn unshare(_paddr: PhysAddr, _buffer: NonNull<[u8]>, _direction: BufferDirection) {
193         // no-op on x86_64, not implemented on other architectures
194         #[cfg(not(target_arch = "x86_64"))]
195         unimplemented!();
196     }
197 }
198