1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! This module implements the virtio wayland used by the guest to access the host's wayland server.
6 //!
7 //! The virtio wayland protocol is done over two queues: `in` and `out`. The `in` queue is used for
8 //! sending commands to the guest that are generated by the host, usually messages from the wayland
9 //! server. The `out` queue is for commands from the guest, usually requests to allocate shared
10 //! memory, open a wayland server connection, or send data over an existing connection.
11 //!
12 //! Each `WlVfd` represents one virtual file descriptor created by either the guest or the host.
13 //! Virtual file descriptors contain actual file descriptors, either a shared memory file descriptor
14 //! or a unix domain socket to the wayland server. In the shared memory case, there is also an
15 //! associated slot that indicates which hypervisor memory slot the memory is installed into, as
16 //! well as a page frame number that the guest can access the memory from.
17 //!
18 //! The types starting with `Ctrl` are structures representing the virtio wayland protocol "on the
19 //! wire." They are decoded and executed in the `execute` function and encoded as some variant of
20 //! `WlResp` for responses.
21 //!
22 //! There is one `WlState` instance that contains every known vfd and the current state of `in`
23 //! queue. The `in` queue requires extra state to buffer messages to the guest in case the `in`
24 //! queue is already full. The `WlState` also has a control socket necessary to fulfill certain
25 //! requests, such as those registering guest memory.
26 //!
27 //! The `Worker` is responsible for the poll loop over all possible events, encoding/decoding from
28 //! the virtio queue, and routing messages in and out of `WlState`. Possible events include the kill
29 //! event, available descriptors on the `in` or `out` queue, and incoming data on any vfd's socket.
30
31 use std::cell::RefCell;
32 use std::collections::btree_map::Entry;
33 use std::collections::BTreeMap as Map;
34 use std::collections::BTreeSet as Set;
35 use std::collections::VecDeque;
36 use std::convert::From;
37 use std::error::Error as StdError;
38 use std::fmt;
39 use std::fs::File;
40 use std::io;
41 use std::io::IoSliceMut;
42 use std::io::Read;
43 use std::io::Seek;
44 use std::io::SeekFrom;
45 use std::io::Write;
46 use std::mem::size_of;
47 #[cfg(feature = "minigbm")]
48 use std::os::raw::c_uint;
49 #[cfg(feature = "minigbm")]
50 use std::os::raw::c_ulonglong;
51 use std::os::unix::net::UnixStream;
52 use std::path::Path;
53 use std::path::PathBuf;
54 use std::rc::Rc;
55 use std::result;
56 use std::time::Duration;
57
58 use anyhow::anyhow;
59 use anyhow::Context;
60 use base::error;
61 #[cfg(feature = "minigbm")]
62 use base::ioctl_iow_nr;
63 use base::ioctl_iowr_nr;
64 use base::ioctl_with_ref;
65 use base::pagesize;
66 use base::pipe;
67 use base::round_up_to_page_size;
68 use base::warn;
69 use base::AsRawDescriptor;
70 use base::Error;
71 use base::Event;
72 use base::EventToken;
73 use base::EventType;
74 use base::FileFlags;
75 use base::FromRawDescriptor;
76 #[cfg(feature = "gpu")]
77 use base::IntoRawDescriptor;
78 use base::Protection;
79 use base::RawDescriptor;
80 use base::Result;
81 use base::SafeDescriptor;
82 use base::ScmSocket;
83 use base::SharedMemory;
84 use base::SharedMemoryUnix;
85 use base::Tube;
86 use base::TubeError;
87 use base::WaitContext;
88 use base::WorkerThread;
89 use data_model::*;
90 #[cfg(feature = "minigbm")]
91 use libc::EBADF;
92 #[cfg(feature = "minigbm")]
93 use libc::EINVAL;
94 use remain::sorted;
95 use resources::address_allocator::AddressAllocator;
96 use resources::AddressRange;
97 use resources::Alloc;
98 #[cfg(feature = "minigbm")]
99 use rutabaga_gfx::DrmFormat;
100 #[cfg(feature = "minigbm")]
101 use rutabaga_gfx::ImageAllocationInfo;
102 #[cfg(feature = "minigbm")]
103 use rutabaga_gfx::ImageMemoryRequirements;
104 #[cfg(feature = "minigbm")]
105 use rutabaga_gfx::RutabagaDescriptor;
106 #[cfg(feature = "minigbm")]
107 use rutabaga_gfx::RutabagaError;
108 #[cfg(feature = "minigbm")]
109 use rutabaga_gfx::RutabagaGralloc;
110 #[cfg(feature = "minigbm")]
111 use rutabaga_gfx::RutabagaGrallocFlags;
112 #[cfg(feature = "minigbm")]
113 use rutabaga_gfx::RutabagaIntoRawDescriptor;
114 use thiserror::Error as ThisError;
115 use vm_control::VmMemorySource;
116 use vm_memory::GuestAddress;
117 use vm_memory::GuestMemory;
118 use vm_memory::GuestMemoryError;
119 use zerocopy::AsBytes;
120 use zerocopy::FromBytes;
121
122 #[cfg(feature = "gpu")]
123 use super::resource_bridge::get_resource_info;
124 #[cfg(feature = "gpu")]
125 use super::resource_bridge::BufferInfo;
126 #[cfg(feature = "gpu")]
127 use super::resource_bridge::ResourceBridgeError;
128 #[cfg(feature = "gpu")]
129 use super::resource_bridge::ResourceInfo;
130 #[cfg(feature = "gpu")]
131 use super::resource_bridge::ResourceRequest;
132 use super::DeviceType;
133 use super::Interrupt;
134 use super::Queue;
135 use super::Reader;
136 use super::SharedMemoryMapper;
137 use super::SharedMemoryRegion;
138 use super::SignalableInterrupt;
139 use super::VirtioDevice;
140 use super::Writer;
141 use crate::virtio::device_constants::wl::QUEUE_SIZES;
142 use crate::virtio::device_constants::wl::VIRTIO_WL_F_SEND_FENCES;
143 use crate::virtio::device_constants::wl::VIRTIO_WL_F_TRANS_FLAGS;
144 use crate::virtio::device_constants::wl::VIRTIO_WL_F_USE_SHMEM;
145 use crate::virtio::virtio_device::Error as VirtioError;
146 use crate::virtio::VirtioDeviceSaved;
147 use crate::Suspendable;
148
149 const VIRTWL_SEND_MAX_ALLOCS: usize = 28;
150 const VIRTIO_WL_CMD_VFD_NEW: u32 = 256;
151 const VIRTIO_WL_CMD_VFD_CLOSE: u32 = 257;
152 const VIRTIO_WL_CMD_VFD_SEND: u32 = 258;
153 const VIRTIO_WL_CMD_VFD_RECV: u32 = 259;
154 const VIRTIO_WL_CMD_VFD_NEW_CTX: u32 = 260;
155 const VIRTIO_WL_CMD_VFD_NEW_PIPE: u32 = 261;
156 const VIRTIO_WL_CMD_VFD_HUP: u32 = 262;
157 #[cfg(feature = "minigbm")]
158 const VIRTIO_WL_CMD_VFD_NEW_DMABUF: u32 = 263;
159 #[cfg(feature = "minigbm")]
160 const VIRTIO_WL_CMD_VFD_DMABUF_SYNC: u32 = 264;
161 #[cfg(feature = "gpu")]
162 const VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID: u32 = 265;
163 const VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED: u32 = 266;
164 const VIRTIO_WL_RESP_OK: u32 = 4096;
165 const VIRTIO_WL_RESP_VFD_NEW: u32 = 4097;
166 #[cfg(feature = "minigbm")]
167 const VIRTIO_WL_RESP_VFD_NEW_DMABUF: u32 = 4098;
168 const VIRTIO_WL_RESP_ERR: u32 = 4352;
169 const VIRTIO_WL_RESP_OUT_OF_MEMORY: u32 = 4353;
170 const VIRTIO_WL_RESP_INVALID_ID: u32 = 4354;
171 const VIRTIO_WL_RESP_INVALID_TYPE: u32 = 4355;
172 const VIRTIO_WL_RESP_INVALID_FLAGS: u32 = 4356;
173 const VIRTIO_WL_RESP_INVALID_CMD: u32 = 4357;
174 const VIRTIO_WL_VFD_WRITE: u32 = 0x1;
175 const VIRTIO_WL_VFD_READ: u32 = 0x2;
176 const VIRTIO_WL_VFD_MAP: u32 = 0x2;
177 const VIRTIO_WL_VFD_CONTROL: u32 = 0x4;
178 const VIRTIO_WL_VFD_FENCE: u32 = 0x8;
179
180 const NEXT_VFD_ID_BASE: u32 = 0x40000000;
181 const VFD_ID_HOST_MASK: u32 = NEXT_VFD_ID_BASE;
182 // Each in-vq buffer is one page, so we need to leave space for the control header and the maximum
183 // number of allocs.
184 const IN_BUFFER_LEN: usize =
185 0x1000 - size_of::<CtrlVfdRecv>() - VIRTWL_SEND_MAX_ALLOCS * size_of::<Le32>();
186
187 #[cfg(feature = "minigbm")]
188 const VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK: u32 = 0x7;
189
190 #[cfg(feature = "minigbm")]
191 const DMA_BUF_IOCTL_BASE: c_uint = 0x62;
192
193 #[cfg(feature = "minigbm")]
194 #[repr(C)]
195 #[derive(Copy, Clone)]
196 struct dma_buf_sync {
197 flags: c_ulonglong,
198 }
199
200 #[cfg(feature = "minigbm")]
201 ioctl_iow_nr!(DMA_BUF_IOCTL_SYNC, DMA_BUF_IOCTL_BASE, 0, dma_buf_sync);
202
203 #[repr(C)]
204 #[derive(Copy, Clone, Default)]
205 struct sync_file_info {
206 name: [u8; 32],
207 status: i32,
208 flags: u32,
209 num_fences: u32,
210 pad: u32,
211 sync_fence_info: u64,
212 }
213
214 ioctl_iowr_nr!(SYNC_IOC_FILE_INFO, 0x3e, 4, sync_file_info);
215
is_fence(f: &File) -> bool216 fn is_fence(f: &File) -> bool {
217 let info = sync_file_info::default();
218 // Safe as f is a valid file
219 unsafe { ioctl_with_ref(f, SYNC_IOC_FILE_INFO(), &info) == 0 }
220 }
221
222 #[cfg(feature = "minigbm")]
223 #[derive(Debug, Default)]
224 struct GpuMemoryPlaneDesc {
225 stride: u32,
226 offset: u32,
227 }
228
229 #[cfg(feature = "minigbm")]
230 #[derive(Debug, Default)]
231 struct GpuMemoryDesc {
232 planes: [GpuMemoryPlaneDesc; 3],
233 }
234
235 const VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL: u32 = 0;
236 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU: u32 = 1;
237 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE: u32 = 2;
238 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE: u32 = 3;
239
240 const VIRTIO_WL_PFN_SHIFT: u32 = 12;
241
encode_vfd_new( writer: &mut Writer, resp: bool, vfd_id: u32, flags: u32, pfn: u64, size: u32, ) -> WlResult<()>242 fn encode_vfd_new(
243 writer: &mut Writer,
244 resp: bool,
245 vfd_id: u32,
246 flags: u32,
247 pfn: u64,
248 size: u32,
249 ) -> WlResult<()> {
250 let ctrl_vfd_new = CtrlVfdNew {
251 hdr: CtrlHeader {
252 type_: Le32::from(if resp {
253 VIRTIO_WL_RESP_VFD_NEW
254 } else {
255 VIRTIO_WL_CMD_VFD_NEW
256 }),
257 flags: Le32::from(0),
258 },
259 id: Le32::from(vfd_id),
260 flags: Le32::from(flags),
261 pfn: Le64::from(pfn),
262 size: Le32::from(size),
263 padding: Default::default(),
264 };
265
266 writer
267 .write_obj(ctrl_vfd_new)
268 .map_err(WlError::WriteResponse)
269 }
270
271 #[cfg(feature = "minigbm")]
encode_vfd_new_dmabuf( writer: &mut Writer, vfd_id: u32, flags: u32, pfn: u64, size: u32, desc: GpuMemoryDesc, ) -> WlResult<()>272 fn encode_vfd_new_dmabuf(
273 writer: &mut Writer,
274 vfd_id: u32,
275 flags: u32,
276 pfn: u64,
277 size: u32,
278 desc: GpuMemoryDesc,
279 ) -> WlResult<()> {
280 let ctrl_vfd_new_dmabuf = CtrlVfdNewDmabuf {
281 hdr: CtrlHeader {
282 type_: Le32::from(VIRTIO_WL_RESP_VFD_NEW_DMABUF),
283 flags: Le32::from(0),
284 },
285 id: Le32::from(vfd_id),
286 flags: Le32::from(flags),
287 pfn: Le64::from(pfn),
288 size: Le32::from(size),
289 width: Le32::from(0),
290 height: Le32::from(0),
291 format: Le32::from(0),
292 stride0: Le32::from(desc.planes[0].stride),
293 stride1: Le32::from(desc.planes[1].stride),
294 stride2: Le32::from(desc.planes[2].stride),
295 offset0: Le32::from(desc.planes[0].offset),
296 offset1: Le32::from(desc.planes[1].offset),
297 offset2: Le32::from(desc.planes[2].offset),
298 };
299
300 writer
301 .write_obj(ctrl_vfd_new_dmabuf)
302 .map_err(WlError::WriteResponse)
303 }
304
encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()>305 fn encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()> {
306 let ctrl_vfd_recv = CtrlVfdRecv {
307 hdr: CtrlHeader {
308 type_: Le32::from(VIRTIO_WL_CMD_VFD_RECV),
309 flags: Le32::from(0),
310 },
311 id: Le32::from(vfd_id),
312 vfd_count: Le32::from(vfd_ids.len() as u32),
313 };
314 writer
315 .write_obj(ctrl_vfd_recv)
316 .map_err(WlError::WriteResponse)?;
317
318 for &recv_vfd_id in vfd_ids.iter() {
319 writer
320 .write_obj(Le32::from(recv_vfd_id))
321 .map_err(WlError::WriteResponse)?;
322 }
323
324 writer.write_all(data).map_err(WlError::WriteResponse)
325 }
326
encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()>327 fn encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()> {
328 let ctrl_vfd_new = CtrlVfd {
329 hdr: CtrlHeader {
330 type_: Le32::from(VIRTIO_WL_CMD_VFD_HUP),
331 flags: Le32::from(0),
332 },
333 id: Le32::from(vfd_id),
334 };
335
336 writer
337 .write_obj(ctrl_vfd_new)
338 .map_err(WlError::WriteResponse)
339 }
340
encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()>341 fn encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()> {
342 match resp {
343 WlResp::VfdNew {
344 id,
345 flags,
346 pfn,
347 size,
348 resp,
349 } => encode_vfd_new(writer, resp, id, flags, pfn, size),
350 #[cfg(feature = "minigbm")]
351 WlResp::VfdNewDmabuf {
352 id,
353 flags,
354 pfn,
355 size,
356 desc,
357 } => encode_vfd_new_dmabuf(writer, id, flags, pfn, size, desc),
358 WlResp::VfdRecv { id, data, vfds } => encode_vfd_recv(writer, id, data, vfds),
359 WlResp::VfdHup { id } => encode_vfd_hup(writer, id),
360 r => writer
361 .write_obj(Le32::from(r.get_code()))
362 .map_err(WlError::WriteResponse),
363 }
364 }
365
366 #[allow(dead_code)]
367 #[sorted]
368 #[derive(ThisError, Debug)]
369 enum WlError {
370 #[error("overflow in calculation")]
371 CheckedOffset,
372 #[error("failed to synchronize DMABuf access: {0}")]
373 DmabufSync(io::Error),
374 #[error("failed to create shared memory from descriptor: {0}")]
375 FromSharedMemory(Error),
376 #[error("gralloc error: {0}")]
377 #[cfg(feature = "minigbm")]
378 GrallocError(#[from] RutabagaError),
379 #[error("access violation in guest memory: {0}")]
380 GuestMemory(#[from] GuestMemoryError),
381 #[error("invalid string: {0}")]
382 InvalidString(std::str::Utf8Error),
383 #[error("failed to create shared memory allocation: {0}")]
384 NewAlloc(Error),
385 #[error("failed to create pipe: {0}")]
386 NewPipe(Error),
387 #[error("error parsing descriptor: {0}")]
388 ParseDesc(io::Error),
389 #[error("failed to read a pipe: {0}")]
390 ReadPipe(io::Error),
391 #[error("failed to recv on a socket: {0}")]
392 RecvVfd(Error),
393 #[error("failed to send on a socket: {0}")]
394 SendVfd(Error),
395 #[error("shmem mapper failure: {0}")]
396 ShmemMapperError(anyhow::Error),
397 #[error("failed to connect socket: {0}")]
398 SocketConnect(io::Error),
399 #[error("failed to set socket as non-blocking: {0}")]
400 SocketNonBlock(io::Error),
401 #[error("unknown socket name: {0}")]
402 UnknownSocketName(String),
403 #[error("invalid response from parent VM")]
404 VmBadResponse,
405 #[error("failed to control parent VM: {0}")]
406 VmControl(TubeError),
407 #[error("access violating in guest volatile memory: {0}")]
408 VolatileMemory(#[from] VolatileMemoryError),
409 #[error("failed to listen to descriptor on wait context: {0}")]
410 WaitContextAdd(Error),
411 #[error("failed to write to a pipe: {0}")]
412 WritePipe(io::Error),
413 #[error("failed to write response: {0}")]
414 WriteResponse(io::Error),
415 }
416
417 type WlResult<T> = result::Result<T, WlError>;
418
419 pub const WL_SHMEM_ID: u8 = 0;
420 pub const WL_SHMEM_SIZE: u64 = 1 << 32;
421
422 struct VmRequesterState {
423 mapper: Box<dyn SharedMemoryMapper>,
424 #[cfg(feature = "minigbm")]
425 gralloc: RutabagaGralloc,
426
427 // Allocator for shm address space
428 address_allocator: AddressAllocator,
429
430 // Map of existing mappings in the shm address space
431 allocs: Map<u64 /* offset */, Alloc>,
432
433 // The id for the next shmem allocation
434 next_alloc: usize,
435 }
436
437 #[derive(Clone)]
438 struct VmRequester {
439 state: Rc<RefCell<VmRequesterState>>,
440 }
441
442 // The following are wrappers to avoid base dependencies in the rutabaga crate
443 #[cfg(feature = "minigbm")]
to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor444 fn to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor {
445 // Safe because we own the SafeDescriptor at this point.
446 unsafe { SafeDescriptor::from_raw_descriptor(r.into_raw_descriptor()) }
447 }
448
449 impl VmRequester {
new( mapper: Box<dyn SharedMemoryMapper>, #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc, ) -> VmRequester450 fn new(
451 mapper: Box<dyn SharedMemoryMapper>,
452 #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
453 ) -> VmRequester {
454 VmRequester {
455 state: Rc::new(RefCell::new(VmRequesterState {
456 mapper,
457 #[cfg(feature = "minigbm")]
458 gralloc,
459 address_allocator: AddressAllocator::new(
460 AddressRange::from_start_and_size(0, WL_SHMEM_SIZE).unwrap(),
461 Some(pagesize() as u64),
462 None,
463 )
464 .expect("failed to create allocator"),
465 allocs: Map::new(),
466 next_alloc: 0,
467 })),
468 }
469 }
470
unregister_memory(&self, offset: u64) -> WlResult<()>471 fn unregister_memory(&self, offset: u64) -> WlResult<()> {
472 let mut state = self.state.borrow_mut();
473 state
474 .mapper
475 .remove_mapping(offset)
476 .map_err(WlError::ShmemMapperError)?;
477 let alloc = state
478 .allocs
479 .remove(&offset)
480 .context("unknown offset")
481 .map_err(WlError::ShmemMapperError)?;
482 state
483 .address_allocator
484 .release(alloc)
485 .expect("corrupt address space");
486 Ok(())
487 }
488
489 #[cfg(feature = "minigbm")]
allocate_and_register_gpu_memory( &self, width: u32, height: u32, format: u32, ) -> WlResult<(u64, SafeDescriptor, ImageMemoryRequirements)>490 fn allocate_and_register_gpu_memory(
491 &self,
492 width: u32,
493 height: u32,
494 format: u32,
495 ) -> WlResult<(u64, SafeDescriptor, ImageMemoryRequirements)> {
496 let mut state = self.state.borrow_mut();
497
498 let img = ImageAllocationInfo {
499 width,
500 height,
501 drm_format: DrmFormat::from(format),
502 // Linear layout is a requirement as virtio wayland guest expects
503 // this for CPU access to the buffer. Scanout and texturing are
504 // optional as the consumer (wayland compositor) is expected to
505 // fall-back to a less efficient mechanisms for presentation if
506 // neccesary. In practice, linear buffers for commonly used formats
507 // will also support scanout and texturing.
508 flags: RutabagaGrallocFlags::empty().use_linear(true),
509 };
510
511 let reqs = state
512 .gralloc
513 .get_image_memory_requirements(img)
514 .map_err(WlError::GrallocError)?;
515 let handle = state
516 .gralloc
517 .allocate_memory(reqs)
518 .map_err(WlError::GrallocError)?;
519 drop(state);
520
521 let safe_descriptor = to_safe_descriptor(handle.os_handle);
522 self.register_memory(
523 safe_descriptor
524 .try_clone()
525 .context("failed to dup gfx handle")
526 .map_err(WlError::ShmemMapperError)?,
527 reqs.size,
528 )
529 .map(|info| (info, safe_descriptor, reqs))
530 }
531
register_shmem(&self, shm: &SharedMemory) -> WlResult<u64>532 fn register_shmem(&self, shm: &SharedMemory) -> WlResult<u64> {
533 self.register_memory(
534 SafeDescriptor::try_from(shm as &dyn AsRawDescriptor)
535 .context("failed to create safe descriptor")
536 .map_err(WlError::ShmemMapperError)?,
537 shm.size(),
538 )
539 }
540
register_memory(&self, descriptor: SafeDescriptor, size: u64) -> WlResult<u64>541 fn register_memory(&self, descriptor: SafeDescriptor, size: u64) -> WlResult<u64> {
542 let mut state = self.state.borrow_mut();
543 let size = round_up_to_page_size(size as usize) as u64;
544 let source = VmMemorySource::Descriptor {
545 descriptor,
546 offset: 0,
547 size,
548 };
549 let alloc = Alloc::Anon(state.next_alloc);
550 state.next_alloc += 1;
551 let offset = state
552 .address_allocator
553 .allocate(size, alloc, "virtio-wl".to_owned())
554 .context("failed to allocate offset")
555 .map_err(WlError::ShmemMapperError)?;
556
557 match state
558 .mapper
559 .add_mapping(source, offset, Protection::read_write())
560 {
561 Ok(()) => {
562 state.allocs.insert(offset, alloc);
563 Ok(offset)
564 }
565 Err(e) => {
566 // We just allocated it ourselves, it must exist.
567 state
568 .address_allocator
569 .release(alloc)
570 .expect("corrupt address space");
571 Err(WlError::ShmemMapperError(e))
572 }
573 }
574 }
575 }
576
577 #[repr(C)]
578 #[derive(Copy, Clone, Default, AsBytes, FromBytes)]
579 struct CtrlHeader {
580 type_: Le32,
581 flags: Le32,
582 }
583
584 #[repr(C)]
585 #[derive(Copy, Clone, Default, FromBytes, AsBytes)]
586 struct CtrlVfdNew {
587 hdr: CtrlHeader,
588 id: Le32,
589 flags: Le32,
590 pfn: Le64,
591 size: Le32,
592 padding: Le32,
593 }
594
595 #[repr(C)]
596 #[derive(Copy, Clone, Default, FromBytes)]
597 struct CtrlVfdNewCtxNamed {
598 hdr: CtrlHeader,
599 id: Le32,
600 flags: Le32, // Ignored.
601 pfn: Le64, // Ignored.
602 size: Le32, // Ignored.
603 name: [u8; 32],
604 }
605
606 #[repr(C)]
607 #[derive(Copy, Clone, Default, AsBytes, FromBytes)]
608 #[cfg(feature = "minigbm")]
609 struct CtrlVfdNewDmabuf {
610 hdr: CtrlHeader,
611 id: Le32,
612 flags: Le32,
613 pfn: Le64,
614 size: Le32,
615 width: Le32,
616 height: Le32,
617 format: Le32,
618 stride0: Le32,
619 stride1: Le32,
620 stride2: Le32,
621 offset0: Le32,
622 offset1: Le32,
623 offset2: Le32,
624 }
625
626 #[cfg(feature = "minigbm")]
627 #[repr(C)]
628 #[derive(Copy, Clone, Default, AsBytes, FromBytes)]
629 #[cfg(feature = "minigbm")]
630 struct CtrlVfdDmabufSync {
631 hdr: CtrlHeader,
632 id: Le32,
633 flags: Le32,
634 }
635
636 #[repr(C)]
637 #[derive(Copy, Clone, AsBytes, FromBytes)]
638 struct CtrlVfdRecv {
639 hdr: CtrlHeader,
640 id: Le32,
641 vfd_count: Le32,
642 }
643
644 #[repr(C)]
645 #[derive(Copy, Clone, Default, AsBytes, FromBytes)]
646 struct CtrlVfd {
647 hdr: CtrlHeader,
648 id: Le32,
649 }
650
651 #[repr(C)]
652 #[derive(Copy, Clone, Default, AsBytes, FromBytes)]
653 struct CtrlVfdSend {
654 hdr: CtrlHeader,
655 id: Le32,
656 vfd_count: Le32,
657 // Remainder is an array of vfd_count IDs followed by data.
658 }
659
660 #[repr(C)]
661 #[derive(Copy, Clone, Default, AsBytes, FromBytes)]
662 struct CtrlVfdSendVfd {
663 kind: Le32,
664 id: Le32,
665 }
666
667 #[repr(C)]
668 #[derive(Copy, Clone, FromBytes)]
669 union CtrlVfdSendVfdV2Payload {
670 id: Le32,
671 seqno: Le64,
672 }
673
674 #[repr(C)]
675 #[derive(Copy, Clone, FromBytes)]
676 struct CtrlVfdSendVfdV2 {
677 kind: Le32,
678 payload: CtrlVfdSendVfdV2Payload,
679 }
680
681 impl CtrlVfdSendVfdV2 {
id(&self) -> Le32682 fn id(&self) -> Le32 {
683 assert!(
684 self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL
685 || self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
686 );
687 unsafe { self.payload.id }
688 }
689 #[cfg(feature = "gpu")]
seqno(&self) -> Le64690 fn seqno(&self) -> Le64 {
691 assert!(self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE);
692 unsafe { self.payload.seqno }
693 }
694 }
695
696 #[derive(Debug)]
697 #[allow(dead_code)]
698 enum WlResp<'a> {
699 Ok,
700 VfdNew {
701 id: u32,
702 flags: u32,
703 pfn: u64,
704 size: u32,
705 // The VfdNew variant can be either a response or a command depending on this `resp`. This
706 // is important for the `get_code` method.
707 resp: bool,
708 },
709 #[cfg(feature = "minigbm")]
710 VfdNewDmabuf {
711 id: u32,
712 flags: u32,
713 pfn: u64,
714 size: u32,
715 desc: GpuMemoryDesc,
716 },
717 VfdRecv {
718 id: u32,
719 data: &'a [u8],
720 vfds: &'a [u32],
721 },
722 VfdHup {
723 id: u32,
724 },
725 Err(Box<dyn StdError>),
726 OutOfMemory,
727 InvalidId,
728 InvalidType,
729 InvalidFlags,
730 InvalidCommand,
731 }
732
733 impl<'a> WlResp<'a> {
get_code(&self) -> u32734 fn get_code(&self) -> u32 {
735 match *self {
736 WlResp::Ok => VIRTIO_WL_RESP_OK,
737 WlResp::VfdNew { resp, .. } => {
738 if resp {
739 VIRTIO_WL_RESP_VFD_NEW
740 } else {
741 VIRTIO_WL_CMD_VFD_NEW
742 }
743 }
744 #[cfg(feature = "minigbm")]
745 WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
746 WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
747 WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
748 WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
749 WlResp::OutOfMemory => VIRTIO_WL_RESP_OUT_OF_MEMORY,
750 WlResp::InvalidId => VIRTIO_WL_RESP_INVALID_ID,
751 WlResp::InvalidType => VIRTIO_WL_RESP_INVALID_TYPE,
752 WlResp::InvalidFlags => VIRTIO_WL_RESP_INVALID_FLAGS,
753 WlResp::InvalidCommand => VIRTIO_WL_RESP_INVALID_CMD,
754 }
755 }
756 }
757
758 #[derive(Default)]
759 struct WlVfd {
760 socket: Option<UnixStream>,
761 guest_shared_memory: Option<SharedMemory>,
762 remote_pipe: Option<File>,
763 local_pipe: Option<(u32 /* flags */, File)>,
764 slot: Option<(u64 /* offset */, VmRequester)>,
765 #[cfg(feature = "minigbm")]
766 is_dmabuf: bool,
767 fence: Option<File>,
768 is_fence: bool,
769 }
770
771 impl fmt::Debug for WlVfd {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result772 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
773 write!(f, "WlVfd {{")?;
774 if let Some(s) = &self.socket {
775 write!(f, " socket: {}", s.as_raw_descriptor())?;
776 }
777 if let Some((offset, _)) = &self.slot {
778 write!(f, " offset: {}", offset)?;
779 }
780 if let Some(s) = &self.remote_pipe {
781 write!(f, " remote: {}", s.as_raw_descriptor())?;
782 }
783 if let Some((_, s)) = &self.local_pipe {
784 write!(f, " local: {}", s.as_raw_descriptor())?;
785 }
786 write!(f, " }}")
787 }
788 }
789
790 impl WlVfd {
connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd>791 fn connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd> {
792 let socket = UnixStream::connect(path).map_err(WlError::SocketConnect)?;
793 let mut vfd = WlVfd::default();
794 vfd.socket = Some(socket);
795 Ok(vfd)
796 }
797
allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd>798 fn allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd> {
799 let size_page_aligned = round_up_to_page_size(size as usize) as u64;
800 let vfd_shm =
801 SharedMemory::new("virtwl_alloc", size_page_aligned).map_err(WlError::NewAlloc)?;
802
803 let offset = vm.register_shmem(&vfd_shm)?;
804
805 let mut vfd = WlVfd::default();
806 vfd.guest_shared_memory = Some(vfd_shm);
807 vfd.slot = Some((offset, vm));
808 Ok(vfd)
809 }
810
811 #[cfg(feature = "minigbm")]
dmabuf( vm: VmRequester, width: u32, height: u32, format: u32, ) -> WlResult<(WlVfd, GpuMemoryDesc)>812 fn dmabuf(
813 vm: VmRequester,
814 width: u32,
815 height: u32,
816 format: u32,
817 ) -> WlResult<(WlVfd, GpuMemoryDesc)> {
818 let (offset, desc, reqs) = vm.allocate_and_register_gpu_memory(width, height, format)?;
819 let mut vfd = WlVfd::default();
820 let vfd_shm =
821 SharedMemory::from_safe_descriptor(desc, Some(reqs.size)).map_err(WlError::NewAlloc)?;
822
823 let mut desc = GpuMemoryDesc::default();
824 for i in 0..3 {
825 desc.planes[i] = GpuMemoryPlaneDesc {
826 stride: reqs.strides[i],
827 offset: reqs.offsets[i],
828 }
829 }
830
831 vfd.guest_shared_memory = Some(vfd_shm);
832 vfd.slot = Some((offset, vm));
833 vfd.is_dmabuf = true;
834 Ok((vfd, desc))
835 }
836
837 #[cfg(feature = "minigbm")]
dmabuf_sync(&self, flags: u32) -> WlResult<()>838 fn dmabuf_sync(&self, flags: u32) -> WlResult<()> {
839 if !self.is_dmabuf {
840 return Err(WlError::DmabufSync(io::Error::from_raw_os_error(EINVAL)));
841 }
842
843 match &self.guest_shared_memory {
844 Some(descriptor) => {
845 let sync = dma_buf_sync {
846 flags: flags as u64,
847 };
848 // Safe as descriptor is a valid dmabuf and incorrect flags will return an error.
849 if unsafe { ioctl_with_ref(descriptor, DMA_BUF_IOCTL_SYNC(), &sync) } < 0 {
850 Err(WlError::DmabufSync(io::Error::last_os_error()))
851 } else {
852 Ok(())
853 }
854 }
855 None => Err(WlError::DmabufSync(io::Error::from_raw_os_error(EBADF))),
856 }
857 }
858
pipe_remote_read_local_write() -> WlResult<WlVfd>859 fn pipe_remote_read_local_write() -> WlResult<WlVfd> {
860 let (read_pipe, write_pipe) = pipe(true).map_err(WlError::NewPipe)?;
861 let mut vfd = WlVfd::default();
862 vfd.remote_pipe = Some(read_pipe);
863 vfd.local_pipe = Some((VIRTIO_WL_VFD_WRITE, write_pipe));
864 Ok(vfd)
865 }
866
pipe_remote_write_local_read() -> WlResult<WlVfd>867 fn pipe_remote_write_local_read() -> WlResult<WlVfd> {
868 let (read_pipe, write_pipe) = pipe(true).map_err(WlError::NewPipe)?;
869 let mut vfd = WlVfd::default();
870 vfd.remote_pipe = Some(write_pipe);
871 vfd.local_pipe = Some((VIRTIO_WL_VFD_READ, read_pipe));
872 Ok(vfd)
873 }
874
from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd>875 fn from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd> {
876 // We need to determine if the given file is more like shared memory or a pipe/socket. A
877 // quick and easy check is to seek to the end of the file. If it works we assume it's not a
878 // pipe/socket because those have no end. We can even use that seek location as an indicator
879 // for how big the shared memory chunk to map into guest memory is. If seeking to the end
880 // fails, we assume it's a socket or pipe with read/write semantics.
881 if descriptor.seek(SeekFrom::End(0)).is_ok() {
882 let shm = SharedMemory::from_file(descriptor).map_err(WlError::FromSharedMemory)?;
883 let offset = vm.register_shmem(&shm)?;
884
885 let mut vfd = WlVfd::default();
886 vfd.guest_shared_memory = Some(shm);
887 vfd.slot = Some((offset, vm));
888 Ok(vfd)
889 } else if is_fence(&descriptor) {
890 let mut vfd = WlVfd::default();
891 vfd.is_fence = true;
892 vfd.fence = Some(descriptor);
893 Ok(vfd)
894 } else {
895 let flags = match FileFlags::from_file(&descriptor) {
896 Ok(FileFlags::Read) => VIRTIO_WL_VFD_READ,
897 Ok(FileFlags::Write) => VIRTIO_WL_VFD_WRITE,
898 Ok(FileFlags::ReadWrite) => VIRTIO_WL_VFD_READ | VIRTIO_WL_VFD_WRITE,
899 _ => 0,
900 };
901 let mut vfd = WlVfd::default();
902 vfd.local_pipe = Some((flags, descriptor));
903 Ok(vfd)
904 }
905 }
906
flags(&self, use_transition_flags: bool) -> u32907 fn flags(&self, use_transition_flags: bool) -> u32 {
908 let mut flags = 0;
909 if use_transition_flags {
910 if self.socket.is_some() {
911 flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
912 }
913 if let Some((f, _)) = self.local_pipe {
914 flags |= f;
915 }
916 if self.is_fence {
917 flags |= VIRTIO_WL_VFD_FENCE;
918 }
919 } else {
920 if self.socket.is_some() {
921 flags |= VIRTIO_WL_VFD_CONTROL;
922 }
923 if self.slot.is_some() {
924 flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP
925 }
926 }
927 flags
928 }
929
930 // Offset within the shared memory region this VFD was mapped at.
offset(&self) -> Option<u64>931 fn offset(&self) -> Option<u64> {
932 self.slot.as_ref().map(|s| s.0)
933 }
934
935 // Size in bytes of the shared memory VFD.
size(&self) -> Option<u64>936 fn size(&self) -> Option<u64> {
937 self.guest_shared_memory.as_ref().map(|shm| shm.size())
938 }
939
940 // The descriptor that gets sent if this VFD is sent over a socket.
send_descriptor(&self) -> Option<RawDescriptor>941 fn send_descriptor(&self) -> Option<RawDescriptor> {
942 self.guest_shared_memory
943 .as_ref()
944 .map(|shm| shm.as_raw_descriptor())
945 .or(self.socket.as_ref().map(|s| s.as_raw_descriptor()))
946 .or(self.remote_pipe.as_ref().map(|p| p.as_raw_descriptor()))
947 .or(self.fence.as_ref().map(|f| f.as_raw_descriptor()))
948 }
949
950 // The FD that is used for polling for events on this VFD.
wait_descriptor(&self) -> Option<&dyn AsRawDescriptor>951 fn wait_descriptor(&self) -> Option<&dyn AsRawDescriptor> {
952 self.socket
953 .as_ref()
954 .map(|s| s as &dyn AsRawDescriptor)
955 .or_else(|| {
956 self.local_pipe
957 .as_ref()
958 .map(|(_, p)| p as &dyn AsRawDescriptor)
959 })
960 .or_else(|| self.fence.as_ref().map(|f| f as &dyn AsRawDescriptor))
961 }
962
963 // Sends data/files from the guest to the host over this VFD.
send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp>964 fn send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp> {
965 if let Some(socket) = &self.socket {
966 socket
967 .send_with_fds(&data.get_remaining(), rds)
968 .map_err(WlError::SendVfd)?;
969 // All remaining data in `data` is now considered consumed.
970 data.consume(::std::usize::MAX);
971 Ok(WlResp::Ok)
972 } else if let Some((_, local_pipe)) = &mut self.local_pipe {
973 // Impossible to send descriptors over a simple pipe.
974 if !rds.is_empty() {
975 return Ok(WlResp::InvalidType);
976 }
977 data.read_to(local_pipe, usize::max_value())
978 .map_err(WlError::WritePipe)?;
979 Ok(WlResp::Ok)
980 } else {
981 Ok(WlResp::InvalidType)
982 }
983 }
984
985 // Receives data/files from the host for this VFD and queues it for the guest.
recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>>986 fn recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>> {
987 if let Some(socket) = self.socket.take() {
988 let mut buf = vec![0; IN_BUFFER_LEN];
989 let mut fd_buf = [0; VIRTWL_SEND_MAX_ALLOCS];
990 // If any errors happen, the socket will get dropped, preventing more reading.
991 let (len, file_count) = socket
992 .recv_with_fds(IoSliceMut::new(&mut buf), &mut fd_buf)
993 .map_err(WlError::RecvVfd)?;
994 // If any data gets read, the put the socket back for future recv operations.
995 if len != 0 || file_count != 0 {
996 buf.truncate(len);
997 buf.shrink_to_fit();
998 self.socket = Some(socket);
999 // Safe because the first file_counts fds from recv_with_fds are owned by us and
1000 // valid.
1001 in_file_queue.extend(
1002 fd_buf[..file_count]
1003 .iter()
1004 .map(|&descriptor| unsafe { File::from_raw_descriptor(descriptor) }),
1005 );
1006 return Ok(buf);
1007 }
1008 Ok(Vec::new())
1009 } else if let Some((flags, mut local_pipe)) = self.local_pipe.take() {
1010 let mut buf = Vec::new();
1011 buf.resize(IN_BUFFER_LEN, 0);
1012 let len = local_pipe.read(&mut buf[..]).map_err(WlError::ReadPipe)?;
1013 if len != 0 {
1014 buf.truncate(len);
1015 buf.shrink_to_fit();
1016 self.local_pipe = Some((flags, local_pipe));
1017 return Ok(buf);
1018 }
1019 Ok(Vec::new())
1020 } else {
1021 Ok(Vec::new())
1022 }
1023 }
1024
1025 // Called after this VFD is sent over a socket to ensure the local end of the VFD receives hang
1026 // up events.
close_remote(&mut self)1027 fn close_remote(&mut self) {
1028 self.remote_pipe = None;
1029 }
1030
close(&mut self) -> WlResult<()>1031 fn close(&mut self) -> WlResult<()> {
1032 if let Some((offset, vm)) = self.slot.take() {
1033 vm.unregister_memory(offset)?;
1034 }
1035 self.socket = None;
1036 self.remote_pipe = None;
1037 self.local_pipe = None;
1038 Ok(())
1039 }
1040 }
1041
1042 impl Drop for WlVfd {
drop(&mut self)1043 fn drop(&mut self) {
1044 let _ = self.close();
1045 }
1046 }
1047
1048 #[derive(Debug)]
1049 enum WlRecv {
1050 Vfd { id: u32 },
1051 Data { buf: Vec<u8> },
1052 Hup,
1053 }
1054
1055 pub struct WlState {
1056 wayland_paths: Map<String, PathBuf>,
1057 vm: VmRequester,
1058 resource_bridge: Option<Tube>,
1059 use_transition_flags: bool,
1060 wait_ctx: WaitContext<u32>,
1061 vfds: Map<u32, WlVfd>,
1062 next_vfd_id: u32,
1063 in_file_queue: Vec<File>,
1064 in_queue: VecDeque<(u32 /* vfd_id */, WlRecv)>,
1065 current_recv_vfd: Option<u32>,
1066 recv_vfds: Vec<u32>,
1067 #[cfg(feature = "gpu")]
1068 signaled_fence: Option<SafeDescriptor>,
1069 use_send_vfd_v2: bool,
1070 address_offset: Option<u64>,
1071 }
1072
1073 impl WlState {
1074 /// Create a new `WlState` instance for running a virtio-wl device.
new( wayland_paths: Map<String, PathBuf>, mapper: Box<dyn SharedMemoryMapper>, use_transition_flags: bool, use_send_vfd_v2: bool, resource_bridge: Option<Tube>, #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc, address_offset: Option<u64>, ) -> WlState1075 pub fn new(
1076 wayland_paths: Map<String, PathBuf>,
1077 mapper: Box<dyn SharedMemoryMapper>,
1078 use_transition_flags: bool,
1079 use_send_vfd_v2: bool,
1080 resource_bridge: Option<Tube>,
1081 #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
1082 address_offset: Option<u64>,
1083 ) -> WlState {
1084 WlState {
1085 wayland_paths,
1086 vm: VmRequester::new(
1087 mapper,
1088 #[cfg(feature = "minigbm")]
1089 gralloc,
1090 ),
1091 resource_bridge,
1092 wait_ctx: WaitContext::new().expect("failed to create WaitContext"),
1093 use_transition_flags,
1094 vfds: Map::new(),
1095 next_vfd_id: NEXT_VFD_ID_BASE,
1096 in_file_queue: Vec::new(),
1097 in_queue: VecDeque::new(),
1098 current_recv_vfd: None,
1099 recv_vfds: Vec::new(),
1100 #[cfg(feature = "gpu")]
1101 signaled_fence: None,
1102 use_send_vfd_v2,
1103 address_offset,
1104 }
1105 }
1106
1107 /// This is a hack so that we can drive the inner WaitContext from an async fn. The proper
1108 /// long-term solution is to replace the WaitContext completely by spawning async workers
1109 /// instead.
wait_ctx(&self) -> &WaitContext<u32>1110 pub fn wait_ctx(&self) -> &WaitContext<u32> {
1111 &self.wait_ctx
1112 }
1113
new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp>1114 fn new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp> {
1115 if id & VFD_ID_HOST_MASK != 0 {
1116 return Ok(WlResp::InvalidId);
1117 }
1118
1119 if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ) != 0 {
1120 return Ok(WlResp::InvalidFlags);
1121 }
1122
1123 if flags & VIRTIO_WL_VFD_WRITE != 0 && flags & VIRTIO_WL_VFD_READ != 0 {
1124 return Ok(WlResp::InvalidFlags);
1125 }
1126
1127 match self.vfds.entry(id) {
1128 Entry::Vacant(entry) => {
1129 let vfd = if flags & VIRTIO_WL_VFD_WRITE != 0 {
1130 WlVfd::pipe_remote_read_local_write()?
1131 } else if flags & VIRTIO_WL_VFD_READ != 0 {
1132 WlVfd::pipe_remote_write_local_read()?
1133 } else {
1134 return Ok(WlResp::InvalidFlags);
1135 };
1136 self.wait_ctx
1137 .add(vfd.wait_descriptor().unwrap(), id)
1138 .map_err(WlError::WaitContextAdd)?;
1139 let resp = WlResp::VfdNew {
1140 id,
1141 flags: 0,
1142 pfn: 0,
1143 size: 0,
1144 resp: true,
1145 };
1146 entry.insert(vfd);
1147 Ok(resp)
1148 }
1149 Entry::Occupied(_) => Ok(WlResp::InvalidId),
1150 }
1151 }
1152
new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp>1153 fn new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp> {
1154 if id & VFD_ID_HOST_MASK != 0 {
1155 return Ok(WlResp::InvalidId);
1156 }
1157
1158 if self.use_transition_flags {
1159 if flags != 0 {
1160 return Ok(WlResp::InvalidFlags);
1161 }
1162 } else if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP) != 0 {
1163 return Ok(WlResp::Err(Box::from("invalid flags")));
1164 }
1165
1166 if self.vfds.contains_key(&id) {
1167 return Ok(WlResp::InvalidId);
1168 }
1169 let vfd = WlVfd::allocate(self.vm.clone(), size as u64)?;
1170 let resp = WlResp::VfdNew {
1171 id,
1172 flags,
1173 pfn: self.compute_pfn(&vfd.offset()),
1174 size: vfd.size().unwrap_or_default() as u32,
1175 resp: true,
1176 };
1177 self.vfds.insert(id, vfd);
1178 Ok(resp)
1179 }
1180
1181 #[cfg(feature = "minigbm")]
new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp>1182 fn new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp> {
1183 if id & VFD_ID_HOST_MASK != 0 {
1184 return Ok(WlResp::InvalidId);
1185 }
1186
1187 if self.vfds.contains_key(&id) {
1188 return Ok(WlResp::InvalidId);
1189 }
1190 let (vfd, desc) = WlVfd::dmabuf(self.vm.clone(), width, height, format)?;
1191 let resp = WlResp::VfdNewDmabuf {
1192 id,
1193 flags: 0,
1194 pfn: self.compute_pfn(&vfd.offset()),
1195 size: vfd.size().unwrap_or_default() as u32,
1196 desc,
1197 };
1198 self.vfds.insert(id, vfd);
1199 Ok(resp)
1200 }
1201
1202 #[cfg(feature = "minigbm")]
dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp>1203 fn dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp> {
1204 if flags & !(VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK) != 0 {
1205 return Ok(WlResp::InvalidFlags);
1206 }
1207
1208 match self.vfds.get_mut(&vfd_id) {
1209 Some(vfd) => {
1210 vfd.dmabuf_sync(flags)?;
1211 Ok(WlResp::Ok)
1212 }
1213 None => Ok(WlResp::InvalidId),
1214 }
1215 }
1216
new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp>1217 fn new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp> {
1218 if id & VFD_ID_HOST_MASK != 0 {
1219 return Ok(WlResp::InvalidId);
1220 }
1221
1222 let flags = if self.use_transition_flags {
1223 VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ
1224 } else {
1225 VIRTIO_WL_VFD_CONTROL
1226 };
1227
1228 match self.vfds.entry(id) {
1229 Entry::Vacant(entry) => {
1230 let vfd = entry.insert(WlVfd::connect(
1231 self.wayland_paths
1232 .get(name)
1233 .ok_or_else(|| WlError::UnknownSocketName(name.to_string()))?,
1234 )?);
1235 self.wait_ctx
1236 .add(vfd.wait_descriptor().unwrap(), id)
1237 .map_err(WlError::WaitContextAdd)?;
1238 Ok(WlResp::VfdNew {
1239 id,
1240 flags,
1241 pfn: 0,
1242 size: 0,
1243 resp: true,
1244 })
1245 }
1246 Entry::Occupied(_) => Ok(WlResp::InvalidId),
1247 }
1248 }
1249
process_wait_context(&mut self)1250 fn process_wait_context(&mut self) {
1251 let events = match self.wait_ctx.wait_timeout(Duration::from_secs(0)) {
1252 Ok(v) => v,
1253 Err(e) => {
1254 error!("failed waiting for vfd evens: {}", e);
1255 return;
1256 }
1257 };
1258
1259 for event in events.iter().filter(|e| e.is_readable) {
1260 if let Err(e) = self.recv(event.token) {
1261 error!("failed to recv from vfd: {}", e)
1262 }
1263 }
1264
1265 for event in events.iter().filter(|e| e.is_hungup) {
1266 if !event.is_readable {
1267 let vfd_id = event.token;
1268 if let Some(descriptor) =
1269 self.vfds.get(&vfd_id).and_then(|vfd| vfd.wait_descriptor())
1270 {
1271 if let Err(e) = self.wait_ctx.delete(descriptor) {
1272 warn!("failed to remove hungup vfd from poll context: {}", e);
1273 }
1274 }
1275 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1276 }
1277 }
1278 }
1279
close(&mut self, vfd_id: u32) -> WlResult<WlResp>1280 fn close(&mut self, vfd_id: u32) -> WlResult<WlResp> {
1281 let mut to_delete = Set::new();
1282 for (dest_vfd_id, q) in &self.in_queue {
1283 if *dest_vfd_id == vfd_id {
1284 if let WlRecv::Vfd { id } = q {
1285 to_delete.insert(*id);
1286 }
1287 }
1288 }
1289 for vfd_id in to_delete {
1290 // Sorry sub-error, we can't have cascading errors leaving us in an inconsistent state.
1291 let _ = self.close(vfd_id);
1292 }
1293 match self.vfds.remove(&vfd_id) {
1294 Some(mut vfd) => {
1295 self.in_queue.retain(|&(id, _)| id != vfd_id);
1296 vfd.close()?;
1297 Ok(WlResp::Ok)
1298 }
1299 None => Ok(WlResp::InvalidId),
1300 }
1301 }
1302
1303 #[cfg(feature = "gpu")]
get_info(&mut self, request: ResourceRequest) -> Option<SafeDescriptor>1304 fn get_info(&mut self, request: ResourceRequest) -> Option<SafeDescriptor> {
1305 let sock = self.resource_bridge.as_ref().unwrap();
1306 match get_resource_info(sock, request) {
1307 Ok(ResourceInfo::Buffer(BufferInfo { handle, .. })) => Some(handle),
1308 Ok(ResourceInfo::Fence { handle }) => Some(handle),
1309 Err(ResourceBridgeError::InvalidResource(req)) => {
1310 warn!("attempt to send non-existent gpu resource {}", req);
1311 None
1312 }
1313 Err(e) => {
1314 error!("{}", e);
1315 // If there was an error with the resource bridge, it can no longer be
1316 // trusted to continue to function.
1317 self.resource_bridge = None;
1318 None
1319 }
1320 }
1321 }
1322
send( &mut self, vfd_id: u32, vfd_count: usize, foreign_id: bool, reader: &mut Reader, ) -> WlResult<WlResp>1323 fn send(
1324 &mut self,
1325 vfd_id: u32,
1326 vfd_count: usize,
1327 foreign_id: bool,
1328 reader: &mut Reader,
1329 ) -> WlResult<WlResp> {
1330 // First stage gathers and normalizes all id information from guest memory.
1331 let mut send_vfd_ids = [CtrlVfdSendVfdV2 {
1332 kind: Le32::from(0),
1333 payload: CtrlVfdSendVfdV2Payload { id: Le32::from(0) },
1334 }; VIRTWL_SEND_MAX_ALLOCS];
1335 for vfd_id in send_vfd_ids.iter_mut().take(vfd_count) {
1336 *vfd_id = if foreign_id {
1337 if self.use_send_vfd_v2 {
1338 reader.read_obj().map_err(WlError::ParseDesc)?
1339 } else {
1340 let vfd: CtrlVfdSendVfd = reader.read_obj().map_err(WlError::ParseDesc)?;
1341 CtrlVfdSendVfdV2 {
1342 kind: vfd.kind,
1343 payload: CtrlVfdSendVfdV2Payload { id: vfd.id },
1344 }
1345 }
1346 } else {
1347 CtrlVfdSendVfdV2 {
1348 kind: Le32::from(VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL),
1349 payload: CtrlVfdSendVfdV2Payload {
1350 id: reader.read_obj().map_err(WlError::ParseDesc)?,
1351 },
1352 }
1353 };
1354 }
1355
1356 // Next stage collects corresponding file descriptors for each id.
1357 let mut rds = [0; VIRTWL_SEND_MAX_ALLOCS];
1358 #[cfg(feature = "gpu")]
1359 let mut bridged_files = Vec::new();
1360 for (&send_vfd_id, descriptor) in send_vfd_ids[..vfd_count].iter().zip(rds.iter_mut()) {
1361 match send_vfd_id.kind.to_native() {
1362 VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL => {
1363 match self.vfds.get(&send_vfd_id.id().to_native()) {
1364 Some(vfd) => match vfd.send_descriptor() {
1365 Some(vfd_fd) => *descriptor = vfd_fd,
1366 None => return Ok(WlResp::InvalidType),
1367 },
1368 None => {
1369 warn!(
1370 "attempt to send non-existant vfd 0x{:08x}",
1371 send_vfd_id.id().to_native()
1372 );
1373 return Ok(WlResp::InvalidId);
1374 }
1375 }
1376 }
1377 #[cfg(feature = "gpu")]
1378 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU if self.resource_bridge.is_some() => {
1379 match self.get_info(ResourceRequest::GetBuffer {
1380 id: send_vfd_id.id().to_native(),
1381 }) {
1382 Some(handle) => {
1383 *descriptor = handle.as_raw_descriptor();
1384 bridged_files.push(handle.into());
1385 }
1386 None => return Ok(WlResp::InvalidId),
1387 }
1388 }
1389 #[cfg(feature = "gpu")]
1390 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE if self.resource_bridge.is_some() => {
1391 match self.get_info(ResourceRequest::GetFence {
1392 seqno: send_vfd_id.seqno().to_native(),
1393 }) {
1394 Some(handle) => {
1395 *descriptor = handle.as_raw_descriptor();
1396 bridged_files.push(handle.into());
1397 }
1398 None => return Ok(WlResp::InvalidId),
1399 }
1400 }
1401 #[cfg(feature = "gpu")]
1402 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE
1403 if self.resource_bridge.is_some() =>
1404 {
1405 if self.signaled_fence.is_none() {
1406 // If the guest is sending a signaled fence, we know a fence
1407 // with seqno 0 must already be signaled.
1408 match self.get_info(ResourceRequest::GetFence { seqno: 0 }) {
1409 Some(handle) => self.signaled_fence = Some(handle),
1410 None => return Ok(WlResp::InvalidId),
1411 }
1412 }
1413 match self.signaled_fence.as_ref().unwrap().try_clone() {
1414 Ok(dup) => {
1415 *descriptor = dup.into_raw_descriptor();
1416 // Safe because the fd comes from a valid SafeDescriptor.
1417 let file = unsafe { File::from_raw_descriptor(*descriptor) };
1418 bridged_files.push(file);
1419 }
1420 Err(_) => return Ok(WlResp::InvalidId),
1421 }
1422 }
1423 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
1424 | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE
1425 | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE => {
1426 let _ = self.resource_bridge.as_ref();
1427 warn!("attempt to send foreign resource kind but feature is disabled");
1428 }
1429 kind => {
1430 warn!("attempt to send unknown foreign resource kind: {}", kind);
1431 return Ok(WlResp::InvalidId);
1432 }
1433 }
1434 }
1435
1436 // Final stage sends file descriptors and data to the target vfd's socket.
1437 match self.vfds.get_mut(&vfd_id) {
1438 Some(vfd) => match vfd.send(&rds[..vfd_count], reader)? {
1439 WlResp::Ok => {}
1440 _ => return Ok(WlResp::InvalidType),
1441 },
1442 None => return Ok(WlResp::InvalidId),
1443 }
1444 // The vfds with remote FDs need to be closed so that the local side can receive
1445 // hangup events.
1446 for &send_vfd_id in &send_vfd_ids[..vfd_count] {
1447 if send_vfd_id.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL {
1448 if let Some(vfd) = self.vfds.get_mut(&send_vfd_id.id().into()) {
1449 vfd.close_remote();
1450 }
1451 }
1452 }
1453 Ok(WlResp::Ok)
1454 }
1455
recv(&mut self, vfd_id: u32) -> WlResult<()>1456 fn recv(&mut self, vfd_id: u32) -> WlResult<()> {
1457 let buf = match self.vfds.get_mut(&vfd_id) {
1458 Some(vfd) => {
1459 if vfd.is_fence {
1460 if let Err(e) = self.wait_ctx.delete(vfd.wait_descriptor().unwrap()) {
1461 warn!("failed to remove hungup vfd from poll context: {}", e);
1462 }
1463 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1464 return Ok(());
1465 } else {
1466 vfd.recv(&mut self.in_file_queue)?
1467 }
1468 }
1469 None => return Ok(()),
1470 };
1471
1472 if self.in_file_queue.is_empty() && buf.is_empty() {
1473 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1474 return Ok(());
1475 }
1476 for file in self.in_file_queue.drain(..) {
1477 let vfd = WlVfd::from_file(self.vm.clone(), file)?;
1478 if let Some(wait_descriptor) = vfd.wait_descriptor() {
1479 self.wait_ctx
1480 .add(wait_descriptor, self.next_vfd_id)
1481 .map_err(WlError::WaitContextAdd)?;
1482 }
1483 self.vfds.insert(self.next_vfd_id, vfd);
1484 self.in_queue.push_back((
1485 vfd_id,
1486 WlRecv::Vfd {
1487 id: self.next_vfd_id,
1488 },
1489 ));
1490 self.next_vfd_id += 1;
1491 }
1492 self.in_queue.push_back((vfd_id, WlRecv::Data { buf }));
1493
1494 Ok(())
1495 }
1496
execute(&mut self, reader: &mut Reader) -> WlResult<WlResp>1497 fn execute(&mut self, reader: &mut Reader) -> WlResult<WlResp> {
1498 let type_ = {
1499 let mut type_reader = reader.clone();
1500 type_reader.read_obj::<Le32>().map_err(WlError::ParseDesc)?
1501 };
1502 match type_.into() {
1503 VIRTIO_WL_CMD_VFD_NEW => {
1504 let ctrl = reader
1505 .read_obj::<CtrlVfdNew>()
1506 .map_err(WlError::ParseDesc)?;
1507 self.new_alloc(ctrl.id.into(), ctrl.flags.into(), ctrl.size.into())
1508 }
1509 VIRTIO_WL_CMD_VFD_CLOSE => {
1510 let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1511 self.close(ctrl.id.into())
1512 }
1513 VIRTIO_WL_CMD_VFD_SEND => {
1514 let ctrl = reader
1515 .read_obj::<CtrlVfdSend>()
1516 .map_err(WlError::ParseDesc)?;
1517 let foreign_id = false;
1518 self.send(
1519 ctrl.id.into(),
1520 ctrl.vfd_count.to_native() as usize,
1521 foreign_id,
1522 reader,
1523 )
1524 }
1525 #[cfg(feature = "gpu")]
1526 VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID => {
1527 let ctrl = reader
1528 .read_obj::<CtrlVfdSend>()
1529 .map_err(WlError::ParseDesc)?;
1530 let foreign_id = true;
1531 self.send(
1532 ctrl.id.into(),
1533 ctrl.vfd_count.to_native() as usize,
1534 foreign_id,
1535 reader,
1536 )
1537 }
1538 VIRTIO_WL_CMD_VFD_NEW_CTX => {
1539 let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1540 self.new_context(ctrl.id.into(), "")
1541 }
1542 VIRTIO_WL_CMD_VFD_NEW_PIPE => {
1543 let ctrl = reader
1544 .read_obj::<CtrlVfdNew>()
1545 .map_err(WlError::ParseDesc)?;
1546 self.new_pipe(ctrl.id.into(), ctrl.flags.into())
1547 }
1548 #[cfg(feature = "minigbm")]
1549 VIRTIO_WL_CMD_VFD_NEW_DMABUF => {
1550 let ctrl = reader
1551 .read_obj::<CtrlVfdNewDmabuf>()
1552 .map_err(WlError::ParseDesc)?;
1553 self.new_dmabuf(
1554 ctrl.id.into(),
1555 ctrl.width.into(),
1556 ctrl.height.into(),
1557 ctrl.format.into(),
1558 )
1559 }
1560 #[cfg(feature = "minigbm")]
1561 VIRTIO_WL_CMD_VFD_DMABUF_SYNC => {
1562 let ctrl = reader
1563 .read_obj::<CtrlVfdDmabufSync>()
1564 .map_err(WlError::ParseDesc)?;
1565 self.dmabuf_sync(ctrl.id.into(), ctrl.flags.into())
1566 }
1567 VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED => {
1568 let ctrl = reader
1569 .read_obj::<CtrlVfdNewCtxNamed>()
1570 .map_err(WlError::ParseDesc)?;
1571 let name_len = ctrl
1572 .name
1573 .iter()
1574 .position(|x| x == &0)
1575 .unwrap_or(ctrl.name.len());
1576 let name =
1577 std::str::from_utf8(&ctrl.name[..name_len]).map_err(WlError::InvalidString)?;
1578 self.new_context(ctrl.id.into(), name)
1579 }
1580 op_type => {
1581 warn!("unexpected command {}", op_type);
1582 Ok(WlResp::InvalidCommand)
1583 }
1584 }
1585 }
1586
next_recv(&self) -> Option<WlResp>1587 fn next_recv(&self) -> Option<WlResp> {
1588 if let Some(q) = self.in_queue.front() {
1589 match *q {
1590 (vfd_id, WlRecv::Vfd { id }) => {
1591 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1592 match self.vfds.get(&id) {
1593 Some(vfd) => Some(WlResp::VfdNew {
1594 id,
1595 flags: vfd.flags(self.use_transition_flags),
1596 pfn: self.compute_pfn(&vfd.offset()),
1597 size: vfd.size().unwrap_or_default() as u32,
1598 resp: false,
1599 }),
1600 _ => Some(WlResp::VfdNew {
1601 id,
1602 flags: 0,
1603 pfn: 0,
1604 size: 0,
1605 resp: false,
1606 }),
1607 }
1608 } else {
1609 Some(WlResp::VfdRecv {
1610 id: self.current_recv_vfd.unwrap(),
1611 data: &[],
1612 vfds: &self.recv_vfds[..],
1613 })
1614 }
1615 }
1616 (vfd_id, WlRecv::Data { ref buf }) => {
1617 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1618 Some(WlResp::VfdRecv {
1619 id: vfd_id,
1620 data: &buf[..],
1621 vfds: &self.recv_vfds[..],
1622 })
1623 } else {
1624 Some(WlResp::VfdRecv {
1625 id: self.current_recv_vfd.unwrap(),
1626 data: &[],
1627 vfds: &self.recv_vfds[..],
1628 })
1629 }
1630 }
1631 (vfd_id, WlRecv::Hup) => Some(WlResp::VfdHup { id: vfd_id }),
1632 }
1633 } else {
1634 None
1635 }
1636 }
1637
pop_recv(&mut self)1638 fn pop_recv(&mut self) {
1639 if let Some(q) = self.in_queue.front() {
1640 match *q {
1641 (vfd_id, WlRecv::Vfd { id }) => {
1642 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1643 self.recv_vfds.push(id);
1644 self.current_recv_vfd = Some(vfd_id);
1645 } else {
1646 self.recv_vfds.clear();
1647 self.current_recv_vfd = None;
1648 return;
1649 }
1650 }
1651 (vfd_id, WlRecv::Data { .. }) => {
1652 self.recv_vfds.clear();
1653 self.current_recv_vfd = None;
1654 if !(self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id)) {
1655 return;
1656 }
1657 }
1658 (_, WlRecv::Hup) => {
1659 self.recv_vfds.clear();
1660 self.current_recv_vfd = None;
1661 }
1662 }
1663 }
1664 self.in_queue.pop_front();
1665 }
1666
compute_pfn(&self, offset: &Option<u64>) -> u641667 fn compute_pfn(&self, offset: &Option<u64>) -> u64 {
1668 let addr = match (offset, self.address_offset) {
1669 (Some(o), Some(address_offset)) => o + address_offset,
1670 (Some(o), None) => *o,
1671 // without shmem, 0 is the special address for "no_pfn"
1672 (None, Some(_)) => 0,
1673 // with shmem, WL_SHMEM_SIZE is the special address for "no_pfn"
1674 (None, None) => WL_SHMEM_SIZE,
1675 };
1676 addr >> VIRTIO_WL_PFN_SHIFT
1677 }
1678 }
1679
1680 #[derive(ThisError, Debug, PartialEq, Eq)]
1681 #[error("no descriptors available in queue")]
1682 pub struct DescriptorsExhausted;
1683
1684 /// Handle incoming events and forward them to the VM over the input queue.
process_in_queue<I: SignalableInterrupt>( interrupt: &I, in_queue: &mut Queue, mem: &GuestMemory, state: &mut WlState, ) -> ::std::result::Result<(), DescriptorsExhausted>1685 pub fn process_in_queue<I: SignalableInterrupt>(
1686 interrupt: &I,
1687 in_queue: &mut Queue,
1688 mem: &GuestMemory,
1689 state: &mut WlState,
1690 ) -> ::std::result::Result<(), DescriptorsExhausted> {
1691 state.process_wait_context();
1692
1693 let mut needs_interrupt = false;
1694 let mut exhausted_queue = false;
1695 loop {
1696 let desc = if let Some(d) = in_queue.peek(mem) {
1697 d
1698 } else {
1699 exhausted_queue = true;
1700 break;
1701 };
1702
1703 let index = desc.index;
1704 let mut should_pop = false;
1705 if let Some(in_resp) = state.next_recv() {
1706 let bytes_written = match Writer::new(mem.clone(), desc) {
1707 Ok(mut writer) => {
1708 match encode_resp(&mut writer, in_resp) {
1709 Ok(()) => {
1710 should_pop = true;
1711 }
1712 Err(e) => {
1713 error!("failed to encode response to descriptor chain: {}", e);
1714 }
1715 };
1716 writer.bytes_written() as u32
1717 }
1718 Err(e) => {
1719 error!("invalid descriptor: {}", e);
1720 0
1721 }
1722 };
1723
1724 needs_interrupt = true;
1725 in_queue.pop_peeked(mem);
1726 in_queue.add_used(mem, index, bytes_written);
1727 } else {
1728 break;
1729 }
1730 if should_pop {
1731 state.pop_recv();
1732 }
1733 }
1734
1735 if needs_interrupt {
1736 in_queue.trigger_interrupt(mem, interrupt);
1737 }
1738
1739 if exhausted_queue {
1740 Err(DescriptorsExhausted)
1741 } else {
1742 Ok(())
1743 }
1744 }
1745
1746 /// Handle messages from the output queue and forward them to the display sever, if necessary.
process_out_queue<I: SignalableInterrupt>( interrupt: &I, out_queue: &mut Queue, mem: &GuestMemory, state: &mut WlState, )1747 pub fn process_out_queue<I: SignalableInterrupt>(
1748 interrupt: &I,
1749 out_queue: &mut Queue,
1750 mem: &GuestMemory,
1751 state: &mut WlState,
1752 ) {
1753 let mut needs_interrupt = false;
1754 while let Some(desc) = out_queue.pop(mem) {
1755 let desc_index = desc.index;
1756 match (
1757 Reader::new(mem.clone(), desc.clone()),
1758 Writer::new(mem.clone(), desc),
1759 ) {
1760 (Ok(mut reader), Ok(mut writer)) => {
1761 let resp = match state.execute(&mut reader) {
1762 Ok(r) => r,
1763 Err(e) => WlResp::Err(Box::new(e)),
1764 };
1765
1766 match encode_resp(&mut writer, resp) {
1767 Ok(()) => {}
1768 Err(e) => {
1769 error!("failed to encode response to descriptor chain: {}", e);
1770 }
1771 }
1772
1773 out_queue.add_used(mem, desc_index, writer.bytes_written() as u32);
1774 needs_interrupt = true;
1775 }
1776 (_, Err(e)) | (Err(e), _) => {
1777 error!("invalid descriptor: {}", e);
1778 out_queue.add_used(mem, desc_index, 0);
1779 needs_interrupt = true;
1780 }
1781 }
1782 }
1783
1784 if needs_interrupt {
1785 out_queue.trigger_interrupt(mem, interrupt);
1786 }
1787 }
1788
1789 struct Worker {
1790 interrupt: Interrupt,
1791 mem: GuestMemory,
1792 in_queue: Queue,
1793 in_queue_evt: Event,
1794 out_queue: Queue,
1795 out_queue_evt: Event,
1796 state: WlState,
1797 }
1798
1799 impl Worker {
new( mem: GuestMemory, interrupt: Interrupt, in_queue: (Queue, Event), out_queue: (Queue, Event), wayland_paths: Map<String, PathBuf>, mapper: Box<dyn SharedMemoryMapper>, use_transition_flags: bool, use_send_vfd_v2: bool, resource_bridge: Option<Tube>, #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc, address_offset: Option<u64>, ) -> Worker1800 fn new(
1801 mem: GuestMemory,
1802 interrupt: Interrupt,
1803 in_queue: (Queue, Event),
1804 out_queue: (Queue, Event),
1805 wayland_paths: Map<String, PathBuf>,
1806 mapper: Box<dyn SharedMemoryMapper>,
1807 use_transition_flags: bool,
1808 use_send_vfd_v2: bool,
1809 resource_bridge: Option<Tube>,
1810 #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
1811 address_offset: Option<u64>,
1812 ) -> Worker {
1813 Worker {
1814 interrupt,
1815 mem,
1816 in_queue: in_queue.0,
1817 in_queue_evt: in_queue.1,
1818 out_queue: out_queue.0,
1819 out_queue_evt: out_queue.1,
1820 state: WlState::new(
1821 wayland_paths,
1822 mapper,
1823 use_transition_flags,
1824 use_send_vfd_v2,
1825 resource_bridge,
1826 #[cfg(feature = "minigbm")]
1827 gralloc,
1828 address_offset,
1829 ),
1830 }
1831 }
1832
run(mut self, kill_evt: Event) -> anyhow::Result<VirtioDeviceSaved>1833 fn run(mut self, kill_evt: Event) -> anyhow::Result<VirtioDeviceSaved> {
1834 #[derive(EventToken)]
1835 enum Token {
1836 InQueue,
1837 OutQueue,
1838 Kill,
1839 State,
1840 InterruptResample,
1841 }
1842
1843 let wait_ctx: WaitContext<Token> = WaitContext::build_with(&[
1844 (&self.in_queue_evt, Token::InQueue),
1845 (&self.out_queue_evt, Token::OutQueue),
1846 (&kill_evt, Token::Kill),
1847 (&self.state.wait_ctx, Token::State),
1848 ])
1849 .context("failed creating WaitContext")?;
1850
1851 if let Some(resample_evt) = self.interrupt.get_resample_evt() {
1852 wait_ctx
1853 .add(resample_evt, Token::InterruptResample)
1854 .context("failed adding resample event to WaitContext.")?;
1855 }
1856
1857 let mut watching_state_ctx = true;
1858 'wait: loop {
1859 let events = match wait_ctx.wait() {
1860 Ok(v) => v,
1861 Err(e) => {
1862 error!("failed waiting for events: {}", e);
1863 break;
1864 }
1865 };
1866
1867 for event in &events {
1868 match event.token {
1869 Token::InQueue => {
1870 let _ = self.in_queue_evt.wait();
1871 if !watching_state_ctx {
1872 if let Err(e) =
1873 wait_ctx.modify(&self.state.wait_ctx, EventType::Read, Token::State)
1874 {
1875 error!("Failed to modify wait_ctx descriptor for WlState: {}", e);
1876 break;
1877 }
1878 watching_state_ctx = true;
1879 }
1880 }
1881 Token::OutQueue => {
1882 let _ = self.out_queue_evt.wait();
1883 process_out_queue(
1884 &self.interrupt,
1885 &mut self.out_queue,
1886 &self.mem,
1887 &mut self.state,
1888 );
1889 }
1890 Token::Kill => break 'wait,
1891 Token::State => {
1892 if let Err(DescriptorsExhausted) = process_in_queue(
1893 &self.interrupt,
1894 &mut self.in_queue,
1895 &self.mem,
1896 &mut self.state,
1897 ) {
1898 if let Err(e) =
1899 wait_ctx.modify(&self.state.wait_ctx, EventType::None, Token::State)
1900 {
1901 error!(
1902 "Failed to stop watching wait_ctx descriptor for WlState: {}",
1903 e
1904 );
1905 break;
1906 }
1907 watching_state_ctx = false;
1908 }
1909 }
1910 Token::InterruptResample => {
1911 self.interrupt.interrupt_resample();
1912 }
1913 }
1914 }
1915 }
1916
1917 Ok(VirtioDeviceSaved {
1918 queues: vec![self.in_queue, self.out_queue],
1919 })
1920 }
1921 }
1922
1923 pub struct Wl {
1924 worker_thread: Option<WorkerThread<anyhow::Result<VirtioDeviceSaved>>>,
1925 wayland_paths: Map<String, PathBuf>,
1926 mapper: Option<Box<dyn SharedMemoryMapper>>,
1927 resource_bridge: Option<Tube>,
1928 use_transition_flags: bool,
1929 use_send_vfd_v2: bool,
1930 use_shmem: bool,
1931 base_features: u64,
1932 #[cfg(feature = "minigbm")]
1933 gralloc: Option<RutabagaGralloc>,
1934 address_offset: Option<u64>,
1935 }
1936
1937 impl Wl {
new( base_features: u64, wayland_paths: Map<String, PathBuf>, resource_bridge: Option<Tube>, ) -> Result<Wl>1938 pub fn new(
1939 base_features: u64,
1940 wayland_paths: Map<String, PathBuf>,
1941 resource_bridge: Option<Tube>,
1942 ) -> Result<Wl> {
1943 Ok(Wl {
1944 worker_thread: None,
1945 wayland_paths,
1946 mapper: None,
1947 resource_bridge,
1948 use_transition_flags: false,
1949 use_send_vfd_v2: false,
1950 use_shmem: false,
1951 base_features,
1952 #[cfg(feature = "minigbm")]
1953 gralloc: None,
1954 address_offset: None,
1955 })
1956 }
1957 }
1958
1959 impl VirtioDevice for Wl {
keep_rds(&self) -> Vec<RawDescriptor>1960 fn keep_rds(&self) -> Vec<RawDescriptor> {
1961 let mut keep_rds = Vec::new();
1962
1963 if let Some(mapper) = &self.mapper {
1964 if let Some(raw_descriptor) = mapper.as_raw_descriptor() {
1965 keep_rds.push(raw_descriptor);
1966 }
1967 }
1968 if let Some(resource_bridge) = &self.resource_bridge {
1969 keep_rds.push(resource_bridge.as_raw_descriptor());
1970 }
1971 keep_rds
1972 }
1973
1974 #[cfg(feature = "minigbm")]
on_device_sandboxed(&mut self)1975 fn on_device_sandboxed(&mut self) {
1976 // Gralloc initialization can cause some GPU drivers to create their own threads
1977 // and that must be done after sandboxing.
1978 match RutabagaGralloc::new() {
1979 Ok(g) => self.gralloc = Some(g),
1980 Err(e) => {
1981 error!("failed to initialize gralloc {:?}", e);
1982 }
1983 };
1984 }
1985
device_type(&self) -> DeviceType1986 fn device_type(&self) -> DeviceType {
1987 DeviceType::Wl
1988 }
1989
queue_max_sizes(&self) -> &[u16]1990 fn queue_max_sizes(&self) -> &[u16] {
1991 QUEUE_SIZES
1992 }
1993
features(&self) -> u641994 fn features(&self) -> u64 {
1995 self.base_features
1996 | 1 << VIRTIO_WL_F_TRANS_FLAGS
1997 | 1 << VIRTIO_WL_F_SEND_FENCES
1998 | 1 << VIRTIO_WL_F_USE_SHMEM
1999 }
2000
ack_features(&mut self, value: u64)2001 fn ack_features(&mut self, value: u64) {
2002 if value & (1 << VIRTIO_WL_F_TRANS_FLAGS) != 0 {
2003 self.use_transition_flags = true;
2004 }
2005 if value & (1 << VIRTIO_WL_F_SEND_FENCES) != 0 {
2006 self.use_send_vfd_v2 = true;
2007 }
2008 if value & (1 << VIRTIO_WL_F_USE_SHMEM) != 0 {
2009 self.use_shmem = true;
2010 }
2011 }
2012
activate( &mut self, mem: GuestMemory, interrupt: Interrupt, mut queues: Vec<(Queue, Event)>, ) -> anyhow::Result<()>2013 fn activate(
2014 &mut self,
2015 mem: GuestMemory,
2016 interrupt: Interrupt,
2017 mut queues: Vec<(Queue, Event)>,
2018 ) -> anyhow::Result<()> {
2019 if queues.len() != QUEUE_SIZES.len() {
2020 return Err(anyhow!(
2021 "expected {} queues, got {}",
2022 QUEUE_SIZES.len(),
2023 queues.len()
2024 ));
2025 }
2026
2027 let mapper = self.mapper.take().context("missing mapper")?;
2028
2029 let wayland_paths = self.wayland_paths.clone();
2030 let use_transition_flags = self.use_transition_flags;
2031 let use_send_vfd_v2 = self.use_send_vfd_v2;
2032 let resource_bridge = self.resource_bridge.take();
2033 #[cfg(feature = "minigbm")]
2034 let gralloc = self
2035 .gralloc
2036 .take()
2037 .expect("gralloc already passed to worker");
2038 let address_offset = if !self.use_shmem {
2039 self.address_offset
2040 } else {
2041 None
2042 };
2043
2044 self.worker_thread = Some(WorkerThread::start("v_wl", move |kill_evt| {
2045 Worker::new(
2046 mem,
2047 interrupt,
2048 queues.remove(0),
2049 queues.remove(0),
2050 wayland_paths,
2051 mapper,
2052 use_transition_flags,
2053 use_send_vfd_v2,
2054 resource_bridge,
2055 #[cfg(feature = "minigbm")]
2056 gralloc,
2057 address_offset,
2058 )
2059 .run(kill_evt)
2060 }));
2061
2062 Ok(())
2063 }
2064
get_shared_memory_region(&self) -> Option<SharedMemoryRegion>2065 fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
2066 Some(SharedMemoryRegion {
2067 id: WL_SHMEM_ID,
2068 length: WL_SHMEM_SIZE,
2069 })
2070 }
2071
set_shared_memory_region_base(&mut self, shmem_base: GuestAddress)2072 fn set_shared_memory_region_base(&mut self, shmem_base: GuestAddress) {
2073 self.address_offset = Some(shmem_base.0);
2074 }
2075
set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>)2076 fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
2077 self.mapper = Some(mapper);
2078 }
2079
stop(&mut self) -> std::result::Result<Option<VirtioDeviceSaved>, VirtioError>2080 fn stop(&mut self) -> std::result::Result<Option<VirtioDeviceSaved>, VirtioError> {
2081 if let Some(worker_thread) = self.worker_thread.take() {
2082 let state = worker_thread.stop().map_err(VirtioError::InThreadFailure)?;
2083 return Ok(Some(state));
2084 }
2085 Ok(None)
2086 }
2087 }
2088
2089 impl Suspendable for Wl {}
2090