1 /*
2 * Copyright (c) 2024 Google Inc. All rights reserved
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining
5 * a copy of this software and associated documentation files
6 * (the "Software"), to deal in the Software without restriction,
7 * including without limitation the rights to use, copy, modify, merge,
8 * publish, distribute, sublicense, and/or sell copies of the Software,
9 * and to permit persons to whom the Software is furnished to do so,
10 * subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 use alloc::collections::btree_map::BTreeMap;
25
26 use lazy_static::lazy_static;
27
28 use core::ffi::c_void;
29 use core::ops::DerefMut;
30 use core::ptr::copy_nonoverlapping;
31 use core::ptr::NonNull;
32
33 use hypervisor::share_pages;
34 use hypervisor::unshare_pages;
35
36 use crate::pci::hal::TrustyHal;
37
38 use rust_support::paddr_t;
39 use rust_support::sync::Mutex;
40 use rust_support::vaddr_t;
41
42 use static_assertions::assert_cfg;
43
44 use virtio_drivers_and_devices::BufferDirection;
45 use virtio_drivers_and_devices::Hal;
46 use virtio_drivers_and_devices::PhysAddr;
47 use virtio_drivers_and_devices::PAGE_SIZE;
48
49 // This code will only work on x86_64 or aarch64
50 assert_cfg!(any(target_arch = "x86_64", target_arch = "aarch64"), "Must target x86_64 or aarch64");
51
52 lazy_static! {
53 /// Stores the paddr to vaddr mapping in `share` for use in `unshare`
54 static ref VADDRS: Mutex<BTreeMap<paddr_t, vaddr_t>> = Mutex::new(BTreeMap::new());
55 }
56
57 /// Perform architecture-specific DMA allocation
dma_alloc_share(paddr: usize, size: usize)58 pub(crate) fn dma_alloc_share(paddr: usize, size: usize) {
59 share_pages(paddr, size).expect("failed to share pages");
60 }
61
62 /// Perform architecture-specific DMA deallocation
dma_dealloc_unshare(paddr: PhysAddr, size: usize)63 pub(crate) fn dma_dealloc_unshare(paddr: PhysAddr, size: usize) {
64 unshare_pages(paddr, size).expect("failed to unshare pages");
65 }
66
67 // Safety: buffer must be a valid kernel virtual address that is not already mapped for DMA.
share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr68 pub(crate) unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr {
69 let size = buffer.len();
70 let pages = to_pages(size);
71
72 let (paddr, vaddr) = TrustyHal::dma_alloc(pages, direction);
73 if let Some(old_vaddr) = VADDRS.lock().deref_mut().insert(paddr, vaddr.as_ptr() as usize) {
74 panic!("paddr ({:#x}) was already mapped to vaddr ({:#x})", paddr, old_vaddr);
75 }
76
77 let dst_ptr = vaddr.as_ptr() as *mut c_void;
78
79 if direction != BufferDirection::DeviceToDriver {
80 let src_ptr = buffer.as_ptr() as *const u8 as *const c_void;
81 // Safety: Both regions are valid, properly aligned, and don't overlap.
82 // - Because `vaddr` is a virtual address returned by `dma_alloc`, it is
83 // properly aligned and does not overlap with `buffer`.
84 // - There are no particular alignment requirements on `buffer`.
85 unsafe { copy_nonoverlapping(src_ptr, dst_ptr, size) };
86 }
87
88 paddr
89 }
90
91 // Safety:
92 // - paddr is a valid physical address returned by call to `share`
93 // - buffer must be a valid kernel virtual address previously passed to `share` that
94 // has not already been `unshare`d by this function.
unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection)95 pub(crate) unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection) {
96 let size = buffer.len();
97 let vaddr = VADDRS.lock().deref_mut().remove(&paddr).expect("paddr was inserted by share")
98 as *const c_void;
99
100 if direction != BufferDirection::DriverToDevice {
101 let dest = buffer.as_ptr() as *mut u8 as *mut c_void;
102 // Safety: Both regions are valid, properly aligned, and don't overlap.
103 // - Because `vaddr` was retrieved from `VADDRS`, it must have been returned
104 // from the call to `dma_alloc` in `share`.
105 // - Because `vaddr` is a virtual address returned by `dma_alloc`, it is
106 // properly aligned and does not overlap with `buffer`.
107 // - There are no particular alignment requirements on `buffer`.
108 unsafe { copy_nonoverlapping(vaddr, dest, size) };
109 }
110
111 let vaddr = NonNull::<u8>::new(vaddr as *mut u8).unwrap();
112 // Safety: memory was allocated by `share` and not previously `unshare`d.
113 unsafe {
114 TrustyHal::dma_dealloc(paddr, vaddr, to_pages(size));
115 }
116 }
117
to_pages(size: usize) -> usize118 fn to_pages(size: usize) -> usize {
119 size.div_ceil(PAGE_SIZE)
120 }
121