• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #[cfg(test)]
2 pub mod fake;
3 
4 use crate::{Error, Result, PAGE_SIZE};
5 use core::{marker::PhantomData, ptr::NonNull};
6 
7 /// A physical address as used for virtio.
8 pub type PhysAddr = usize;
9 
10 pub trait DmaMemory {
paddr(&self) -> usize11     fn paddr(&self) -> usize;
vaddr(&self, offset: usize) -> NonNull<u8>12     fn vaddr(&self, offset: usize) -> NonNull<u8>;
raw_slice(&self) -> NonNull<[u8]>13     fn raw_slice(&self) -> NonNull<[u8]>;
14 }
15 
16 /// A region of contiguous physical memory used for DMA.
17 #[derive(Debug)]
18 pub struct Dma<H: Hal> {
19     paddr: usize,
20     vaddr: NonNull<u8>,
21     pages: usize,
22     _hal: PhantomData<H>,
23 }
24 
25 // SAFETY: DMA memory can be accessed from any thread.
26 unsafe impl<H: Hal> Send for Dma<H> {}
27 
28 // SAFETY: `&Dma` only allows pointers and physical addresses to be returned. Any actual access to
29 // the memory requires unsafe code, which is responsible for avoiding data races.
30 unsafe impl<H: Hal> Sync for Dma<H> {}
31 
32 impl<H: Hal> Dma<H> {
33     /// Allocates the given number of pages of physically contiguous memory to be used for DMA in
34     /// the given direction.
35     ///
36     /// The pages will be zeroed.
new(pages: usize, direction: BufferDirection) -> Result<Self>37     pub fn new(pages: usize, direction: BufferDirection) -> Result<Self> {
38         let (paddr, vaddr) = H::dma_alloc(pages, direction);
39         if paddr == 0 {
40             return Err(Error::DmaError);
41         }
42         Ok(Self {
43             paddr,
44             vaddr,
45             pages,
46             _hal: PhantomData,
47         })
48     }
49 }
50 
51 impl<H: Hal> DmaMemory for Dma<H> {
52     /// Returns the physical address of the start of the DMA region, as seen by devices.
paddr(&self) -> usize53     fn paddr(&self) -> usize {
54         self.paddr
55     }
56 
57     /// Returns a pointer to the given offset within the DMA region.
vaddr(&self, offset: usize) -> NonNull<u8>58     fn vaddr(&self, offset: usize) -> NonNull<u8> {
59         assert!(offset < self.pages * PAGE_SIZE);
60         NonNull::new((self.vaddr.as_ptr() as usize + offset) as _).unwrap()
61     }
62 
63     /// Returns a pointer to the entire DMA region as a slice.
raw_slice(&self) -> NonNull<[u8]>64     fn raw_slice(&self) -> NonNull<[u8]> {
65         let raw_slice =
66             core::ptr::slice_from_raw_parts_mut(self.vaddr(0).as_ptr(), self.pages * PAGE_SIZE);
67         NonNull::new(raw_slice).unwrap()
68     }
69 }
70 
71 impl<H: Hal> Drop for Dma<H> {
drop(&mut self)72     fn drop(&mut self) {
73         // Safe because the memory was previously allocated by `dma_alloc` in `Dma::new`, not yet
74         // deallocated, and we are passing the values from then.
75         let err = unsafe { H::dma_dealloc(self.paddr, self.vaddr, self.pages) };
76         assert_eq!(err, 0, "failed to deallocate DMA");
77     }
78 }
79 
80 #[derive(Debug)]
81 pub struct DeviceDma<H: DeviceHal> {
82     paddr: usize,
83     vaddr: NonNull<u8>,
84     pages: usize,
85     _hal: PhantomData<H>,
86     client_id: u16,
87 }
88 
89 // SAFETY: Device DMA memory can be accessed from any thread.
90 unsafe impl<H: DeviceHal> Send for DeviceDma<H> {}
91 
92 // SAFETY: `&DeviceDma` only allows pointers and physical addresses to be returned. Any accesses to
93 // the memory requires unsafe code, which is responsible for avoiding data races.
94 unsafe impl<H: DeviceHal> Sync for DeviceDma<H> {}
95 
96 impl<H: DeviceHal> DeviceDma<H> {
97     // SAFETY: The caller must ensure that the memory described by paddr and pages can be mapped by
98     // the type implementing DeviceHal such as a virtqueue or a buffer described by a descriptor.
new( paddr: PhysAddr, pages: usize, direction: BufferDirection, client_id: u16, ) -> Result<Self>99     pub unsafe fn new(
100         paddr: PhysAddr,
101         pages: usize,
102         direction: BufferDirection,
103         client_id: u16,
104     ) -> Result<Self> {
105         let vaddr = H::dma_map(paddr, pages, direction, client_id)?;
106         Ok(Self {
107             paddr,
108             vaddr,
109             pages,
110             _hal: PhantomData,
111             client_id,
112         })
113     }
114 }
115 
116 impl<H: DeviceHal> DmaMemory for DeviceDma<H> {
117     /// Returns the physical address of the start of the DMA region, as seen by devices.
paddr(&self) -> usize118     fn paddr(&self) -> usize {
119         self.paddr
120     }
121 
122     /// Returns a pointer to the given offset within the DMA region.
vaddr(&self, offset: usize) -> NonNull<u8>123     fn vaddr(&self, offset: usize) -> NonNull<u8> {
124         assert!(offset < self.pages * PAGE_SIZE);
125         NonNull::new((self.vaddr.as_ptr() as usize + offset) as _).unwrap()
126     }
127 
128     /// Returns a pointer to the entire DMA region as a slice.
raw_slice(&self) -> NonNull<[u8]>129     fn raw_slice(&self) -> NonNull<[u8]> {
130         let raw_slice =
131             core::ptr::slice_from_raw_parts_mut(self.vaddr(0).as_ptr(), self.pages * PAGE_SIZE);
132         NonNull::new(raw_slice).unwrap()
133     }
134 }
135 
136 impl<H: DeviceHal> Drop for DeviceDma<H> {
drop(&mut self)137     fn drop(&mut self) {
138         // SAFETY: DeviceDma::new ensures that paddr, vaddr and pages were passed to
139         // DeviceHal::dma_map for this instance of DeviceDma
140         let err = unsafe { H::dma_unmap(self.paddr, self.vaddr, self.pages) };
141         assert_eq!(err, 0, "failed to unmap DMA");
142     }
143 }
144 
145 /// The interface which a particular hardware implementation must implement.
146 ///
147 /// # Safety
148 ///
149 /// Implementations of this trait must follow the "implementation safety" requirements documented
150 /// for each method. Callers must follow the safety requirements documented for the unsafe methods.
151 pub unsafe trait Hal {
152     /// Allocates and zeroes the given number of contiguous physical pages of DMA memory for VirtIO
153     /// use.
154     ///
155     /// Returns both the physical address which the device can use to access the memory, and a
156     /// pointer to the start of it which the driver can use to access it.
157     ///
158     /// # Implementation safety
159     ///
160     /// Implementations of this method must ensure that the `NonNull<u8>` returned is a
161     /// [_valid_](https://doc.rust-lang.org/std/ptr/index.html#safety) pointer, aligned to
162     /// [`PAGE_SIZE`], and won't alias any other allocations or references in the program until it
163     /// is deallocated by `dma_dealloc`. The pages must be zeroed.
dma_alloc(pages: usize, direction: BufferDirection) -> (PhysAddr, NonNull<u8>)164     fn dma_alloc(pages: usize, direction: BufferDirection) -> (PhysAddr, NonNull<u8>);
165 
166     /// Deallocates the given contiguous physical DMA memory pages.
167     ///
168     /// # Safety
169     ///
170     /// The memory must have been allocated by `dma_alloc` on the same `Hal` implementation, and not
171     /// yet deallocated. `pages` must be the same number passed to `dma_alloc` originally, and both
172     /// `paddr` and `vaddr` must be the values returned by `dma_alloc`.
dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32173     unsafe fn dma_dealloc(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32;
174 
175     /// Converts a physical address used for MMIO to a virtual address which the driver can access.
176     ///
177     /// This is only used for MMIO addresses within BARs read from the device, for the PCI
178     /// transport. It may check that the address range up to the given size is within the region
179     /// expected for MMIO.
180     ///
181     /// # Implementation safety
182     ///
183     /// Implementations of this method must ensure that the `NonNull<u8>` returned is a
184     /// [_valid_](https://doc.rust-lang.org/std/ptr/index.html#safety) pointer, and won't alias any
185     /// other allocations or references in the program.
186     ///
187     /// # Safety
188     ///
189     /// The `paddr` and `size` must describe a valid MMIO region. The implementation may validate it
190     /// in some way (and panic if it is invalid) but is not guaranteed to.
mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8>191     unsafe fn mmio_phys_to_virt(paddr: PhysAddr, size: usize) -> NonNull<u8>;
192 
193     /// Shares the given memory range with the device, and returns the physical address that the
194     /// device can use to access it.
195     ///
196     /// This may involve mapping the buffer into an IOMMU, giving the host permission to access the
197     /// memory, or copying it to a special region where it can be accessed.
198     ///
199     /// # Safety
200     ///
201     /// The buffer must be a valid pointer to a non-empty memory range which will not be accessed by
202     /// any other thread for the duration of this method call.
share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr203     unsafe fn share(buffer: NonNull<[u8]>, direction: BufferDirection) -> PhysAddr;
204 
205     /// Unshares the given memory range from the device and (if necessary) copies it back to the
206     /// original buffer.
207     ///
208     /// # Safety
209     ///
210     /// The buffer must be a valid pointer to a non-empty memory range which will not be accessed by
211     /// any other thread for the duration of this method call. The `paddr` must be the value
212     /// previously returned by the corresponding `share` call.
unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection)213     unsafe fn unshare(paddr: PhysAddr, buffer: NonNull<[u8]>, direction: BufferDirection);
214 }
215 
216 /// Device-side abstraction layer for mapping and unmapping memory shared by the driver.
217 ///
218 /// # Safety
219 ///
220 /// Implementations of this trait must follow the "implementation safety" requirements documented
221 /// for each method. Callers must follow the safety requirements documented for the unsafe methods.
222 pub trait DeviceHal {
223     /// Maps in memory for a range of physical addresses shared by a VirtIO driver.
224     ///
225     /// Returns the virtual address which the device should use to access it.
226     /// # Implementation safety
227     ///
228     /// Implementations of this method must ensure that the `NonNull<u8>` returned is a
229     /// [_valid_](https://doc.rust-lang.org/std/ptr/index.html#safety) pointer, aligned to
230     /// [`PAGE_SIZE`], and won't alias any other allocations or references in the program until it
231     /// is freed by `dma_unmap`.
dma_map( paddr: PhysAddr, pages: usize, direction: BufferDirection, client_id: u16, ) -> Result<NonNull<u8>>232     unsafe fn dma_map(
233         paddr: PhysAddr,
234         pages: usize,
235         direction: BufferDirection,
236         client_id: u16,
237     ) -> Result<NonNull<u8>>;
238 
239     /// Unmaps memory previously shared by the driver.
240     ///
241     /// # Safety
242     ///
243     /// The memory must have been mapped in by `dma_map` on the same `DeviceHal` implementation, and
244     /// not yet unmapped. `pages` must be the same number passed to `dma_map` originally, and
245     /// both `paddr` and `vaddr` must be the values returned by `dma_map`.
dma_unmap(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32246     unsafe fn dma_unmap(paddr: PhysAddr, vaddr: NonNull<u8>, pages: usize) -> i32;
247 }
248 
249 /// The direction in which a buffer is passed.
250 #[derive(Copy, Clone, Debug, Eq, PartialEq)]
251 pub enum BufferDirection {
252     /// The buffer may be read or written by the driver, but only read by the device.
253     DriverToDevice,
254     /// The buffer may be read or written by the device, but only read by the driver.
255     DeviceToDriver,
256     /// The buffer may be read or written by both the device and the driver.
257     Both,
258 }
259