1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! Provide utility to communicate with an iommu in another process
6
7 use std::sync::Arc;
8
9 use anyhow::anyhow;
10 use anyhow::bail;
11 use anyhow::Context;
12 use anyhow::Result;
13 use base::error;
14 use base::AsRawDescriptor;
15 use base::AsRawDescriptors;
16 use base::Event;
17 use base::Protection;
18 use base::RawDescriptor;
19 use base::Tube;
20 use serde::Deserialize;
21 use serde::Serialize;
22 use smallvec::SmallVec;
23 use sync::Mutex;
24 use vm_memory::GuestAddress;
25 use vm_memory::GuestMemory;
26 use zerocopy::AsBytes;
27 use zerocopy::FromBytes;
28
29 use crate::virtio::memory_mapper::MemRegion;
30
31 #[derive(Serialize, Deserialize)]
32 pub(super) enum IommuRequest {
33 Export {
34 endpoint_id: u32,
35 iova: u64,
36 size: u64,
37 },
38 Release {
39 endpoint_id: u32,
40 iova: u64,
41 size: u64,
42 },
43 StartExportSession {
44 endpoint_id: u32,
45 },
46 }
47
48 #[derive(Serialize, Deserialize)]
49 pub(super) enum IommuResponse {
50 Export(Vec<MemRegion>),
51 Release,
52 StartExportSession(Event),
53 Err(String),
54 }
55
56 impl IommuRequest {
get_endpoint_id(&self) -> u3257 pub(super) fn get_endpoint_id(&self) -> u32 {
58 match self {
59 Self::Export { endpoint_id, .. } => *endpoint_id,
60 Self::Release { endpoint_id, .. } => *endpoint_id,
61 Self::StartExportSession { endpoint_id } => *endpoint_id,
62 }
63 }
64 }
65
66 /// Sends an addr translation request to another process using `Tube`, and
67 /// gets the translated addr from another `Tube`
68 pub struct IpcMemoryMapper {
69 request_tx: Tube,
70 response_rx: Tube,
71 endpoint_id: u32,
72 }
73
map_bad_resp(resp: IommuResponse) -> anyhow::Error74 fn map_bad_resp(resp: IommuResponse) -> anyhow::Error {
75 match resp {
76 IommuResponse::Err(e) => anyhow!("remote error {}", e),
77 _ => anyhow!("response type mismatch"),
78 }
79 }
80
81 impl IpcMemoryMapper {
82 /// Returns a new `IpcMemoryMapper` instance.
83 ///
84 /// # Arguments
85 ///
86 /// * `request_tx` - A tube to send `TranslateRequest` to another process.
87 /// * `response_rx` - A tube to receive `Option<Vec<MemRegion>>`
88 /// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
new(request_tx: Tube, response_rx: Tube, endpoint_id: u32) -> Self89 pub fn new(request_tx: Tube, response_rx: Tube, endpoint_id: u32) -> Self {
90 Self {
91 request_tx,
92 response_rx,
93 endpoint_id,
94 }
95 }
96
do_request(&self, req: IommuRequest) -> Result<IommuResponse>97 fn do_request(&self, req: IommuRequest) -> Result<IommuResponse> {
98 self.request_tx
99 .send(&req)
100 .context("failed to send request")?;
101 self.response_rx
102 .recv::<IommuResponse>()
103 .context("failed to get response")
104 }
105
106 /// See [crate::virtio::memory_mapper::MemoryMapper::export].
export(&mut self, iova: u64, size: u64) -> Result<Vec<MemRegion>>107 pub fn export(&mut self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {
108 let req = IommuRequest::Export {
109 endpoint_id: self.endpoint_id,
110 iova,
111 size,
112 };
113 match self.do_request(req)? {
114 IommuResponse::Export(vec) => Ok(vec),
115 e => Err(map_bad_resp(e)),
116 }
117 }
118
119 /// See [crate::virtio::memory_mapper::MemoryMapper::release].
release(&mut self, iova: u64, size: u64) -> Result<()>120 pub fn release(&mut self, iova: u64, size: u64) -> Result<()> {
121 let req = IommuRequest::Release {
122 endpoint_id: self.endpoint_id,
123 iova,
124 size,
125 };
126 match self.do_request(req)? {
127 IommuResponse::Release => Ok(()),
128 e => Err(map_bad_resp(e)),
129 }
130 }
131
132 /// See [crate::virtio::memory_mapper::MemoryMapper::start_export_session].
start_export_session(&mut self) -> Result<Event>133 pub fn start_export_session(&mut self) -> Result<Event> {
134 let req = IommuRequest::StartExportSession {
135 endpoint_id: self.endpoint_id,
136 };
137 match self.do_request(req)? {
138 IommuResponse::StartExportSession(evt) => Ok(evt),
139 e => Err(map_bad_resp(e)),
140 }
141 }
142 }
143
144 impl AsRawDescriptors for IpcMemoryMapper {
as_raw_descriptors(&self) -> Vec<RawDescriptor>145 fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
146 vec![
147 self.request_tx.as_raw_descriptor(),
148 self.response_rx.as_raw_descriptor(),
149 ]
150 }
151 }
152
153 pub struct CreateIpcMapperRet {
154 pub mapper: IpcMemoryMapper,
155 pub response_tx: Tube,
156 }
157
158 /// Returns a new `IpcMemoryMapper` instance and a response_tx for the iommu
159 /// to respond to `TranslateRequest`s.
160 ///
161 /// # Arguments
162 ///
163 /// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
164 /// * `request_tx` - A tube to send `TranslateRequest` to a remote iommu. This
165 /// should be cloned and shared between different ipc mappers
166 /// with different `endpoint_id`s.
create_ipc_mapper(endpoint_id: u32, request_tx: Tube) -> CreateIpcMapperRet167 pub fn create_ipc_mapper(endpoint_id: u32, request_tx: Tube) -> CreateIpcMapperRet {
168 let (response_tx, response_rx) = Tube::pair().expect("failed to create tube pair");
169 CreateIpcMapperRet {
170 mapper: IpcMemoryMapper::new(request_tx, response_rx, endpoint_id),
171 response_tx,
172 }
173 }
174
175 struct ExportedRegionInner {
176 regions: Vec<MemRegion>,
177 iova: u64,
178 size: u64,
179 iommu: Arc<Mutex<IpcMemoryMapper>>,
180 }
181
182 impl Drop for ExportedRegionInner {
drop(&mut self)183 fn drop(&mut self) {
184 if let Err(e) = self.iommu.lock().release(self.iova, self.size) {
185 error!("Error releasing region {:?}", e);
186 }
187 }
188 }
189
190 /// A region exported from the virtio-iommu.
191 #[derive(Clone)]
192 pub struct ExportedRegion {
193 inner: Arc<Mutex<ExportedRegionInner>>,
194 }
195
196 impl ExportedRegion {
197 /// Creates a new, fully initialized exported region.
new( mem: &GuestMemory, iommu: Arc<Mutex<IpcMemoryMapper>>, iova: u64, size: u64, ) -> Result<Self>198 pub fn new(
199 mem: &GuestMemory,
200 iommu: Arc<Mutex<IpcMemoryMapper>>,
201 iova: u64,
202 size: u64,
203 ) -> Result<Self> {
204 let regions = iommu
205 .lock()
206 .export(iova, size)
207 .context("failed to export")?;
208 for r in ®ions {
209 if !mem.is_valid_range(r.gpa, r.len) {
210 bail!("region not in memory range");
211 }
212 }
213 Ok(Self {
214 inner: Arc::new(Mutex::new(ExportedRegionInner {
215 regions,
216 iova,
217 size,
218 iommu,
219 })),
220 })
221 }
222
223 // Helper function for copying to/from [iova, iova+remaining).
do_copy<C>( &self, iova: u64, mut remaining: usize, prot: Protection, mut copy_fn: C, ) -> Result<()> where C: FnMut(usize , GuestAddress, usize ) -> Result<usize>,224 fn do_copy<C>(
225 &self,
226 iova: u64,
227 mut remaining: usize,
228 prot: Protection,
229 mut copy_fn: C,
230 ) -> Result<()>
231 where
232 C: FnMut(usize /* offset */, GuestAddress, usize /* len */) -> Result<usize>,
233 {
234 let inner = self.inner.lock();
235 let mut region_offset = iova.checked_sub(inner.iova).with_context(|| {
236 format!(
237 "out of bounds: src_iova={} region_iova={}",
238 iova, inner.iova
239 )
240 })?;
241 let mut offset = 0;
242 for r in &inner.regions {
243 if region_offset >= r.len {
244 region_offset -= r.len;
245 continue;
246 }
247
248 if !r.prot.allows(&prot) {
249 bail!("gpa is not accessible");
250 }
251
252 let len = (r.len as usize).min(remaining);
253 let copy_len = copy_fn(offset, r.gpa.unchecked_add(region_offset), len)?;
254 if len != copy_len {
255 bail!("incomplete copy: expected={}, actual={}", len, copy_len);
256 }
257
258 remaining -= len;
259 offset += len;
260 region_offset = 0;
261
262 if remaining == 0 {
263 return Ok(());
264 }
265 }
266
267 Err(anyhow!("not enough data: remaining={}", remaining))
268 }
269
270 /// Reads an object from the given iova. Fails if the specified iova range does
271 /// not lie within this region, or if part of the region isn't readable.
read_obj_from_addr<T: FromBytes>( &self, mem: &GuestMemory, iova: u64, ) -> anyhow::Result<T>272 pub fn read_obj_from_addr<T: FromBytes>(
273 &self,
274 mem: &GuestMemory,
275 iova: u64,
276 ) -> anyhow::Result<T> {
277 let mut buf = vec![0u8; std::mem::size_of::<T>()];
278 self.do_copy(iova, buf.len(), Protection::read(), |offset, gpa, len| {
279 mem.read_at_addr(&mut buf[offset..(offset + len)], gpa)
280 .context("failed to read from gpa")
281 })?;
282 T::read_from(buf.as_bytes()).context("failed to construct obj")
283 }
284
285 /// Writes an object at a given iova. Fails if the specified iova range does
286 /// not lie within this region, or if part of the region isn't writable.
write_obj_at_addr<T: AsBytes>( &self, mem: &GuestMemory, val: T, iova: u64, ) -> anyhow::Result<()>287 pub fn write_obj_at_addr<T: AsBytes>(
288 &self,
289 mem: &GuestMemory,
290 val: T,
291 iova: u64,
292 ) -> anyhow::Result<()> {
293 let buf = val.as_bytes();
294 self.do_copy(iova, buf.len(), Protection::write(), |offset, gpa, len| {
295 mem.write_at_addr(&buf[offset..(offset + len)], gpa)
296 .context("failed to write from gpa")
297 })?;
298 Ok(())
299 }
300
301 /// Validates that [iova, iova+size) lies within this region, and that
302 /// the region is valid according to mem.
is_valid(&self, mem: &GuestMemory, iova: u64, size: u64) -> bool303 pub fn is_valid(&self, mem: &GuestMemory, iova: u64, size: u64) -> bool {
304 let inner = self.inner.lock();
305 let iova_end = iova.checked_add(size);
306 if iova_end.is_none() {
307 return false;
308 }
309 if iova < inner.iova || iova_end.unwrap() > (inner.iova + inner.size) {
310 return false;
311 }
312 self.inner
313 .lock()
314 .regions
315 .iter()
316 .all(|r| mem.range_overlap(r.gpa, r.gpa.unchecked_add(r.len as u64)))
317 }
318
319 /// Gets the list of guest physical regions for the exported region.
get_mem_regions(&self) -> SmallVec<[MemRegion; 1]>320 pub fn get_mem_regions(&self) -> SmallVec<[MemRegion; 1]> {
321 SmallVec::from_slice(&self.inner.lock().regions)
322 }
323 }
324
325 #[cfg(test)]
326 mod tests {
327 use std::thread;
328
329 use base::Protection;
330 use vm_memory::GuestAddress;
331
332 use super::*;
333
334 #[test]
test()335 fn test() {
336 let (request_tx, request_rx) = Tube::pair().expect("failed to create tube pair");
337 let CreateIpcMapperRet {
338 mut mapper,
339 response_tx,
340 } = create_ipc_mapper(3, request_tx);
341 let user_handle = thread::spawn(move || {
342 assert!(mapper
343 .export(0x555, 1)
344 .unwrap()
345 .iter()
346 .zip(&vec![MemRegion {
347 gpa: GuestAddress(0x777),
348 len: 1,
349 prot: Protection::read_write(),
350 },])
351 .all(|(a, b)| a == b));
352 });
353 let iommu_handle = thread::spawn(move || {
354 let (endpoint_id, iova, size) = match request_rx.recv().unwrap() {
355 IommuRequest::Export {
356 endpoint_id,
357 iova,
358 size,
359 } => (endpoint_id, iova, size),
360 _ => unreachable!(),
361 };
362 assert_eq!(endpoint_id, 3);
363 assert_eq!(iova, 0x555);
364 assert_eq!(size, 1);
365 response_tx
366 .send(&IommuResponse::Export(vec![MemRegion {
367 gpa: GuestAddress(0x777),
368 len: 1,
369 prot: Protection::read_write(),
370 }]))
371 .unwrap();
372 // This join needs to be here because on Windows, if `response_tx`
373 // is dropped before `response_rx` can read, the connection will
374 // be severed and this test will fail.
375 user_handle.join().unwrap();
376 });
377 iommu_handle.join().unwrap();
378 }
379 }
380