1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! Provide utility to communicate with an iommu in another process
6
7 use std::sync::Arc;
8
9 use anyhow::anyhow;
10 use anyhow::bail;
11 use anyhow::Context;
12 use anyhow::Result;
13 use base::error;
14 use base::AsRawDescriptor;
15 use base::AsRawDescriptors;
16 use base::Event;
17 use base::Protection;
18 use base::RawDescriptor;
19 use base::Tube;
20 use serde::Deserialize;
21 use serde::Serialize;
22 use smallvec::SmallVec;
23 use sync::Mutex;
24 use vm_memory::GuestAddress;
25 use vm_memory::GuestMemory;
26 use zerocopy::FromBytes;
27 use zerocopy::FromZeros;
28 use zerocopy::Immutable;
29 use zerocopy::IntoBytes;
30
31 use crate::virtio::memory_mapper::MemRegion;
32
33 #[derive(Serialize, Deserialize)]
34 pub(super) enum IommuRequest {
35 Export {
36 endpoint_id: u32,
37 iova: u64,
38 size: u64,
39 },
40 Release {
41 endpoint_id: u32,
42 iova: u64,
43 size: u64,
44 },
45 StartExportSession {
46 endpoint_id: u32,
47 },
48 }
49
50 #[derive(Serialize, Deserialize)]
51 pub(super) enum IommuResponse {
52 Export(Vec<MemRegion>),
53 Release,
54 StartExportSession(Event),
55 Err(String),
56 }
57
58 impl IommuRequest {
get_endpoint_id(&self) -> u3259 pub(super) fn get_endpoint_id(&self) -> u32 {
60 match self {
61 Self::Export { endpoint_id, .. } => *endpoint_id,
62 Self::Release { endpoint_id, .. } => *endpoint_id,
63 Self::StartExportSession { endpoint_id } => *endpoint_id,
64 }
65 }
66 }
67
68 /// Sends an addr translation request to another process using `Tube`, and
69 /// gets the translated addr from another `Tube`
70 pub struct IpcMemoryMapper {
71 request_tx: Tube,
72 response_rx: Tube,
73 endpoint_id: u32,
74 }
75
76 impl std::fmt::Debug for IpcMemoryMapper {
fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result77 fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
78 f.debug_struct("IpcMemoryMapper")
79 .field("endpoint_id", &self.endpoint_id)
80 .finish()
81 }
82 }
83
map_bad_resp(resp: IommuResponse) -> anyhow::Error84 fn map_bad_resp(resp: IommuResponse) -> anyhow::Error {
85 match resp {
86 IommuResponse::Err(e) => anyhow!("remote error {}", e),
87 _ => anyhow!("response type mismatch"),
88 }
89 }
90
91 impl IpcMemoryMapper {
92 /// Returns a new `IpcMemoryMapper` instance.
93 ///
94 /// # Arguments
95 ///
96 /// * `request_tx` - A tube to send `TranslateRequest` to another process.
97 /// * `response_rx` - A tube to receive `Option<Vec<MemRegion>>`
98 /// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
new(request_tx: Tube, response_rx: Tube, endpoint_id: u32) -> Self99 pub fn new(request_tx: Tube, response_rx: Tube, endpoint_id: u32) -> Self {
100 Self {
101 request_tx,
102 response_rx,
103 endpoint_id,
104 }
105 }
106
do_request(&self, req: IommuRequest) -> Result<IommuResponse>107 fn do_request(&self, req: IommuRequest) -> Result<IommuResponse> {
108 self.request_tx
109 .send(&req)
110 .context("failed to send request")?;
111 self.response_rx
112 .recv::<IommuResponse>()
113 .context("failed to get response")
114 }
115
116 /// See [crate::virtio::memory_mapper::MemoryMapper::export].
export(&mut self, iova: u64, size: u64) -> Result<Vec<MemRegion>>117 pub fn export(&mut self, iova: u64, size: u64) -> Result<Vec<MemRegion>> {
118 let req = IommuRequest::Export {
119 endpoint_id: self.endpoint_id,
120 iova,
121 size,
122 };
123 match self.do_request(req)? {
124 IommuResponse::Export(vec) => Ok(vec),
125 e => Err(map_bad_resp(e)),
126 }
127 }
128
129 /// See [crate::virtio::memory_mapper::MemoryMapper::release].
release(&mut self, iova: u64, size: u64) -> Result<()>130 pub fn release(&mut self, iova: u64, size: u64) -> Result<()> {
131 let req = IommuRequest::Release {
132 endpoint_id: self.endpoint_id,
133 iova,
134 size,
135 };
136 match self.do_request(req)? {
137 IommuResponse::Release => Ok(()),
138 e => Err(map_bad_resp(e)),
139 }
140 }
141
142 /// See [crate::virtio::memory_mapper::MemoryMapper::start_export_session].
start_export_session(&mut self) -> Result<Event>143 pub fn start_export_session(&mut self) -> Result<Event> {
144 let req = IommuRequest::StartExportSession {
145 endpoint_id: self.endpoint_id,
146 };
147 match self.do_request(req)? {
148 IommuResponse::StartExportSession(evt) => Ok(evt),
149 e => Err(map_bad_resp(e)),
150 }
151 }
152 }
153
154 impl AsRawDescriptors for IpcMemoryMapper {
as_raw_descriptors(&self) -> Vec<RawDescriptor>155 fn as_raw_descriptors(&self) -> Vec<RawDescriptor> {
156 vec![
157 self.request_tx.as_raw_descriptor(),
158 self.response_rx.as_raw_descriptor(),
159 ]
160 }
161 }
162
163 pub struct CreateIpcMapperRet {
164 pub mapper: IpcMemoryMapper,
165 pub response_tx: Tube,
166 }
167
168 /// Returns a new `IpcMemoryMapper` instance and a response_tx for the iommu
169 /// to respond to `TranslateRequest`s.
170 ///
171 /// # Arguments
172 ///
173 /// * `endpoint_id` - For the remote iommu to identify the device/ipc mapper.
174 /// * `request_tx` - A tube to send `TranslateRequest` to a remote iommu. This should be cloned and
175 /// shared between different ipc mappers with different `endpoint_id`s.
create_ipc_mapper(endpoint_id: u32, request_tx: Tube) -> CreateIpcMapperRet176 pub fn create_ipc_mapper(endpoint_id: u32, request_tx: Tube) -> CreateIpcMapperRet {
177 let (response_tx, response_rx) = Tube::pair().expect("failed to create tube pair");
178 CreateIpcMapperRet {
179 mapper: IpcMemoryMapper::new(request_tx, response_rx, endpoint_id),
180 response_tx,
181 }
182 }
183
184 #[derive(Debug)]
185 struct ExportedRegionInner {
186 regions: Vec<MemRegion>,
187 iova: u64,
188 size: u64,
189 iommu: Arc<Mutex<IpcMemoryMapper>>,
190 }
191
192 impl Drop for ExportedRegionInner {
drop(&mut self)193 fn drop(&mut self) {
194 if let Err(e) = self.iommu.lock().release(self.iova, self.size) {
195 error!("Error releasing region {:?}", e);
196 }
197 }
198 }
199
200 /// A region exported from the virtio-iommu.
201 #[derive(Clone, Debug)]
202 pub struct ExportedRegion {
203 inner: Arc<Mutex<ExportedRegionInner>>,
204 }
205
206 impl ExportedRegion {
207 /// Creates a new, fully initialized exported region.
new( mem: &GuestMemory, iommu: Arc<Mutex<IpcMemoryMapper>>, iova: u64, size: u64, ) -> Result<Self>208 pub fn new(
209 mem: &GuestMemory,
210 iommu: Arc<Mutex<IpcMemoryMapper>>,
211 iova: u64,
212 size: u64,
213 ) -> Result<Self> {
214 let regions = iommu
215 .lock()
216 .export(iova, size)
217 .context("failed to export")?;
218 for r in ®ions {
219 if !mem.is_valid_range(r.gpa, r.len) {
220 bail!("region not in memory range");
221 }
222 }
223 Ok(Self {
224 inner: Arc::new(Mutex::new(ExportedRegionInner {
225 regions,
226 iova,
227 size,
228 iommu,
229 })),
230 })
231 }
232
233 // Helper function for copying to/from [iova, iova+remaining).
do_copy<C>( &self, iova: u64, mut remaining: usize, prot: Protection, mut copy_fn: C, ) -> Result<()> where C: FnMut(usize , GuestAddress, usize ) -> Result<usize>,234 fn do_copy<C>(
235 &self,
236 iova: u64,
237 mut remaining: usize,
238 prot: Protection,
239 mut copy_fn: C,
240 ) -> Result<()>
241 where
242 C: FnMut(usize /* offset */, GuestAddress, usize /* len */) -> Result<usize>,
243 {
244 let inner = self.inner.lock();
245 let mut region_offset = iova.checked_sub(inner.iova).with_context(|| {
246 format!(
247 "out of bounds: src_iova={} region_iova={}",
248 iova, inner.iova
249 )
250 })?;
251 let mut offset = 0;
252 for r in &inner.regions {
253 if region_offset >= r.len {
254 region_offset -= r.len;
255 continue;
256 }
257
258 if !r.prot.allows(&prot) {
259 bail!("gpa is not accessible");
260 }
261
262 let len = (r.len as usize).min(remaining);
263 let copy_len = copy_fn(offset, r.gpa.unchecked_add(region_offset), len)?;
264 if len != copy_len {
265 bail!("incomplete copy: expected={}, actual={}", len, copy_len);
266 }
267
268 remaining -= len;
269 offset += len;
270 region_offset = 0;
271
272 if remaining == 0 {
273 return Ok(());
274 }
275 }
276
277 Err(anyhow!("not enough data: remaining={}", remaining))
278 }
279
280 /// Reads an object from the given iova. Fails if the specified iova range does
281 /// not lie within this region, or if part of the region isn't readable.
read_obj_from_addr<T: IntoBytes + FromBytes + FromZeros>( &self, mem: &GuestMemory, iova: u64, ) -> anyhow::Result<T>282 pub fn read_obj_from_addr<T: IntoBytes + FromBytes + FromZeros>(
283 &self,
284 mem: &GuestMemory,
285 iova: u64,
286 ) -> anyhow::Result<T> {
287 let mut val = T::new_zeroed();
288 let buf = val.as_mut_bytes();
289 self.do_copy(iova, buf.len(), Protection::read(), |offset, gpa, len| {
290 mem.read_at_addr(&mut buf[offset..(offset + len)], gpa)
291 .context("failed to read from gpa")
292 })?;
293 Ok(val)
294 }
295
296 /// Writes an object at a given iova. Fails if the specified iova range does
297 /// not lie within this region, or if part of the region isn't writable.
write_obj_at_addr<T: Immutable + IntoBytes>( &self, mem: &GuestMemory, val: T, iova: u64, ) -> anyhow::Result<()>298 pub fn write_obj_at_addr<T: Immutable + IntoBytes>(
299 &self,
300 mem: &GuestMemory,
301 val: T,
302 iova: u64,
303 ) -> anyhow::Result<()> {
304 let buf = val.as_bytes();
305 self.do_copy(iova, buf.len(), Protection::write(), |offset, gpa, len| {
306 mem.write_at_addr(&buf[offset..(offset + len)], gpa)
307 .context("failed to write from gpa")
308 })?;
309 Ok(())
310 }
311
312 /// Validates that [iova, iova+size) lies within this region, and that
313 /// the region is valid according to mem.
is_valid(&self, mem: &GuestMemory, iova: u64, size: u64) -> bool314 pub fn is_valid(&self, mem: &GuestMemory, iova: u64, size: u64) -> bool {
315 let inner = self.inner.lock();
316 let iova_end = iova.checked_add(size);
317 if iova_end.is_none() {
318 return false;
319 }
320 if iova < inner.iova || iova_end.unwrap() > (inner.iova + inner.size) {
321 return false;
322 }
323 self.inner
324 .lock()
325 .regions
326 .iter()
327 .all(|r| mem.range_overlap(r.gpa, r.gpa.unchecked_add(r.len)))
328 }
329
330 /// Gets the list of guest physical regions for the exported region.
get_mem_regions(&self) -> SmallVec<[MemRegion; 1]>331 pub fn get_mem_regions(&self) -> SmallVec<[MemRegion; 1]> {
332 SmallVec::from_slice(&self.inner.lock().regions)
333 }
334 }
335
336 #[cfg(test)]
337 mod tests {
338 use std::thread;
339
340 use base::Protection;
341 use vm_memory::GuestAddress;
342
343 use super::*;
344
345 #[test]
test()346 fn test() {
347 let (request_tx, request_rx) = Tube::pair().expect("failed to create tube pair");
348 let CreateIpcMapperRet {
349 mut mapper,
350 response_tx,
351 } = create_ipc_mapper(3, request_tx);
352 let user_handle = thread::spawn(move || {
353 assert!(mapper
354 .export(0x555, 1)
355 .unwrap()
356 .iter()
357 .zip(&vec![MemRegion {
358 gpa: GuestAddress(0x777),
359 len: 1,
360 prot: Protection::read_write(),
361 },])
362 .all(|(a, b)| a == b));
363 });
364 let iommu_handle = thread::spawn(move || {
365 let (endpoint_id, iova, size) = match request_rx.recv().unwrap() {
366 IommuRequest::Export {
367 endpoint_id,
368 iova,
369 size,
370 } => (endpoint_id, iova, size),
371 _ => unreachable!(),
372 };
373 assert_eq!(endpoint_id, 3);
374 assert_eq!(iova, 0x555);
375 assert_eq!(size, 1);
376 response_tx
377 .send(&IommuResponse::Export(vec![MemRegion {
378 gpa: GuestAddress(0x777),
379 len: 1,
380 prot: Protection::read_write(),
381 }]))
382 .unwrap();
383 // This join needs to be here because on Windows, if `response_tx`
384 // is dropped before `response_rx` can read, the connection will
385 // be severed and this test will fail.
386 user_handle.join().unwrap();
387 });
388 iommu_handle.join().unwrap();
389 }
390 }
391