1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 pub mod vfio_wrapper;
6
7 use std::cell::RefCell;
8 use std::collections::BTreeMap;
9 use std::fs::File;
10 use std::rc::Rc;
11 use std::sync::Arc;
12
13 use base::error;
14 use base::MemoryMappingBuilder;
15 use base::TubeError;
16 use cros_async::AsyncTube;
17 use cros_async::Executor;
18 use hypervisor::MemSlot;
19 use sync::Mutex;
20 use vm_control::VirtioIOMMURequest;
21 use vm_control::VirtioIOMMUResponse;
22 use vm_control::VirtioIOMMUVfioCommand;
23 use vm_control::VirtioIOMMUVfioResult;
24
25 use self::vfio_wrapper::VfioWrapper;
26 use crate::virtio::iommu::ipc_memory_mapper::IommuRequest;
27 use crate::virtio::iommu::ipc_memory_mapper::IommuResponse;
28 use crate::virtio::iommu::DmabufRegionEntry;
29 use crate::virtio::iommu::Result;
30 use crate::virtio::iommu::State;
31 use crate::virtio::IommuError;
32 use crate::VfioContainer;
33
34 const VIRTIO_IOMMU_PAGE_SHIFT: u32 = 12;
35
36 impl State {
handle_add_vfio_device( &mut self, endpoint_addr: u32, wrapper: VfioWrapper, ) -> VirtioIOMMUVfioResult37 pub(in crate::virtio::iommu) fn handle_add_vfio_device(
38 &mut self,
39 endpoint_addr: u32,
40 wrapper: VfioWrapper,
41 ) -> VirtioIOMMUVfioResult {
42 let exists = |endpoint_addr: u32| -> bool {
43 for endpoints_range in self.hp_endpoints_ranges.iter() {
44 if endpoints_range.contains(&endpoint_addr) {
45 return true;
46 }
47 }
48 false
49 };
50
51 if !exists(endpoint_addr) {
52 return VirtioIOMMUVfioResult::NotInPCIRanges;
53 }
54
55 self.endpoints
56 .insert(endpoint_addr, Arc::new(Mutex::new(Box::new(wrapper))));
57 VirtioIOMMUVfioResult::Ok
58 }
59
handle_del_vfio_device( &mut self, pci_address: u32, ) -> VirtioIOMMUVfioResult60 pub(in crate::virtio::iommu) fn handle_del_vfio_device(
61 &mut self,
62 pci_address: u32,
63 ) -> VirtioIOMMUVfioResult {
64 if self.endpoints.remove(&pci_address).is_none() {
65 error!("There is no vfio container of {}", pci_address);
66 return VirtioIOMMUVfioResult::NoSuchDevice;
67 }
68 if let Some(domain) = self.endpoint_map.remove(&pci_address) {
69 self.domain_map.remove(&domain);
70 }
71 VirtioIOMMUVfioResult::Ok
72 }
73
handle_map_dmabuf( &mut self, mem_slot: MemSlot, gfn: u64, size: u64, dma_buf: File, ) -> VirtioIOMMUVfioResult74 pub(in crate::virtio::iommu) fn handle_map_dmabuf(
75 &mut self,
76 mem_slot: MemSlot,
77 gfn: u64,
78 size: u64,
79 dma_buf: File,
80 ) -> VirtioIOMMUVfioResult {
81 let mmap = match MemoryMappingBuilder::new(size as usize)
82 .from_file(&dma_buf)
83 .build()
84 {
85 Ok(v) => v,
86 Err(_) => {
87 error!("failed to mmap dma_buf");
88 return VirtioIOMMUVfioResult::InvalidParam;
89 }
90 };
91 self.dmabuf_mem.insert(
92 gfn << VIRTIO_IOMMU_PAGE_SHIFT,
93 DmabufRegionEntry {
94 mmap,
95 mem_slot,
96 len: size,
97 },
98 );
99
100 VirtioIOMMUVfioResult::Ok
101 }
102
handle_unmap_dmabuf( &mut self, mem_slot: MemSlot, ) -> VirtioIOMMUVfioResult103 pub(in crate::virtio::iommu) fn handle_unmap_dmabuf(
104 &mut self,
105 mem_slot: MemSlot,
106 ) -> VirtioIOMMUVfioResult {
107 if let Some(range) = self
108 .dmabuf_mem
109 .iter()
110 .find(|(_, dmabuf_entry)| dmabuf_entry.mem_slot == mem_slot)
111 .map(|entry| *entry.0)
112 {
113 self.dmabuf_mem.remove(&range);
114 VirtioIOMMUVfioResult::Ok
115 } else {
116 VirtioIOMMUVfioResult::NoSuchMappedDmabuf
117 }
118 }
119
handle_vfio( &mut self, vfio_cmd: VirtioIOMMUVfioCommand, ) -> VirtioIOMMUResponse120 pub(in crate::virtio::iommu) fn handle_vfio(
121 &mut self,
122 vfio_cmd: VirtioIOMMUVfioCommand,
123 ) -> VirtioIOMMUResponse {
124 use VirtioIOMMUVfioCommand::*;
125 let vfio_result = match vfio_cmd {
126 VfioDeviceAdd {
127 wrapper_id,
128 container,
129 endpoint_addr,
130 } => match VfioContainer::new_from_container(container) {
131 Ok(vfio_container) => {
132 let wrapper =
133 VfioWrapper::new_with_id(vfio_container, wrapper_id, self.mem.clone());
134 self.handle_add_vfio_device(endpoint_addr, wrapper)
135 }
136 Err(e) => {
137 error!("failed to verify the new container: {}", e);
138 VirtioIOMMUVfioResult::NoAvailableContainer
139 }
140 },
141 VfioDeviceDel { endpoint_addr } => self.handle_del_vfio_device(endpoint_addr),
142 VfioDmabufMap {
143 mem_slot,
144 gfn,
145 size,
146 dma_buf,
147 } => self.handle_map_dmabuf(mem_slot, gfn, size, File::from(dma_buf)),
148 VfioDmabufUnmap(mem_slot) => self.handle_unmap_dmabuf(mem_slot),
149 };
150 VirtioIOMMUResponse::VfioResponse(vfio_result)
151 }
152 }
153
handle_command_tube( state: &Rc<RefCell<State>>, command_tube: AsyncTube, ) -> Result<()>154 pub(in crate::virtio::iommu) async fn handle_command_tube(
155 state: &Rc<RefCell<State>>,
156 command_tube: AsyncTube,
157 ) -> Result<()> {
158 loop {
159 match command_tube.next::<VirtioIOMMURequest>().await {
160 Ok(command) => {
161 let response: VirtioIOMMUResponse = match command {
162 VirtioIOMMURequest::VfioCommand(vfio_cmd) => {
163 state.borrow_mut().handle_vfio(vfio_cmd)
164 }
165 };
166 if let Err(e) = command_tube.send(response).await {
167 error!("{}", IommuError::VirtioIOMMUResponseError(e));
168 }
169 }
170 Err(e) => {
171 return Err(IommuError::VirtioIOMMUReqError(e));
172 }
173 }
174 }
175 }
176
handle_translate_request( ex: &Executor, state: &Rc<RefCell<State>>, request_tube: Option<AsyncTube>, response_tubes: Option<BTreeMap<u32, AsyncTube>>, ) -> Result<()>177 pub(in crate::virtio::iommu) async fn handle_translate_request(
178 ex: &Executor,
179 state: &Rc<RefCell<State>>,
180 request_tube: Option<AsyncTube>,
181 response_tubes: Option<BTreeMap<u32, AsyncTube>>,
182 ) -> Result<()> {
183 let request_tube = match request_tube {
184 Some(r) => r,
185 None => {
186 futures::future::pending::<()>().await;
187 return Ok(());
188 }
189 };
190 let response_tubes = response_tubes.unwrap();
191 loop {
192 let req: IommuRequest = match request_tube.next().await {
193 Ok(req) => req,
194 Err(TubeError::Disconnected) => {
195 // This means the process on the other side of the tube went away. That's
196 // not a problem with virtio-iommu itself, so just exit this callback
197 // and wait for crosvm to exit.
198 return Ok(());
199 }
200 Err(e) => {
201 return Err(IommuError::Tube(e));
202 }
203 };
204 let resp = if let Some(mapper) = state.borrow().endpoints.get(&req.get_endpoint_id()) {
205 match req {
206 IommuRequest::Export { iova, size, .. } => {
207 mapper.lock().export(iova, size).map(IommuResponse::Export)
208 }
209 IommuRequest::Release { iova, size, .. } => mapper
210 .lock()
211 .release(iova, size)
212 .map(|_| IommuResponse::Release),
213 IommuRequest::StartExportSession { .. } => mapper
214 .lock()
215 .start_export_session(ex)
216 .map(IommuResponse::StartExportSession),
217 }
218 } else {
219 error!("endpoint {} not found", req.get_endpoint_id());
220 continue;
221 };
222 let resp: IommuResponse = match resp {
223 Ok(resp) => resp,
224 Err(e) => IommuResponse::Err(format!("{:?}", e)),
225 };
226 response_tubes
227 .get(&req.get_endpoint_id())
228 .unwrap()
229 .send(resp)
230 .await
231 .map_err(IommuError::Tube)?;
232 }
233 }
234