1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! This module implements the virtio wayland used by the guest to access the host's wayland server.
6 //!
7 //! The virtio wayland protocol is done over two queues: `in` and `out`. The `in` queue is used for
8 //! sending commands to the guest that are generated by the host, usually messages from the wayland
9 //! server. The `out` queue is for commands from the guest, usually requests to allocate shared
10 //! memory, open a wayland server connection, or send data over an existing connection.
11 //!
12 //! Each `WlVfd` represents one virtual file descriptor created by either the guest or the host.
13 //! Virtual file descriptors contain actual file descriptors, either a shared memory file descriptor
14 //! or a unix domain socket to the wayland server. In the shared memory case, there is also an
15 //! associated slot that indicates which hypervisor memory slot the memory is installed into, as
16 //! well as a page frame number that the guest can access the memory from.
17 //!
18 //! The types starting with `Ctrl` are structures representing the virtio wayland protocol "on the
19 //! wire." They are decoded and executed in the `execute` function and encoded as some variant of
20 //! `WlResp` for responses.
21 //!
22 //! There is one `WlState` instance that contains every known vfd and the current state of `in`
23 //! queue. The `in` queue requires extra state to buffer messages to the guest in case the `in`
24 //! queue is already full. The `WlState` also has a control socket necessary to fulfill certain
25 //! requests, such as those registering guest memory.
26 //!
27 //! The `Worker` is responsible for the poll loop over all possible events, encoding/decoding from
28 //! the virtio queue, and routing messages in and out of `WlState`. Possible events include the kill
29 //! event, available descriptors on the `in` or `out` queue, and incoming data on any vfd's socket.
30
31 use std::cell::RefCell;
32 use std::collections::btree_map::Entry;
33 use std::collections::BTreeMap;
34 use std::collections::BTreeSet;
35 use std::collections::VecDeque;
36 use std::convert::From;
37 use std::error::Error as StdError;
38 use std::fmt;
39 use std::fs::File;
40 use std::io;
41 use std::io::Read;
42 use std::io::Seek;
43 use std::io::SeekFrom;
44 use std::io::Write;
45 use std::mem::size_of;
46 #[cfg(feature = "minigbm")]
47 use std::os::raw::c_uint;
48 #[cfg(feature = "minigbm")]
49 use std::os::raw::c_ulonglong;
50 use std::os::unix::net::UnixStream;
51 use std::path::Path;
52 use std::path::PathBuf;
53 use std::rc::Rc;
54 use std::result;
55 use std::time::Duration;
56
57 use anyhow::anyhow;
58 use anyhow::Context;
59 use base::error;
60 #[cfg(feature = "minigbm")]
61 use base::ioctl_iow_nr;
62 use base::ioctl_iowr_nr;
63 use base::ioctl_with_ref;
64 use base::linux::SharedMemoryLinux;
65 use base::pagesize;
66 use base::pipe;
67 use base::round_up_to_page_size;
68 use base::unix::FileFlags;
69 use base::warn;
70 use base::AsRawDescriptor;
71 use base::Error;
72 use base::Event;
73 use base::EventToken;
74 use base::EventType;
75 use base::FromRawDescriptor;
76 #[cfg(feature = "gpu")]
77 use base::IntoRawDescriptor;
78 #[cfg(feature = "minigbm")]
79 use base::MemoryMappingBuilder;
80 #[cfg(feature = "minigbm")]
81 use base::MmapError;
82 use base::Protection;
83 use base::RawDescriptor;
84 use base::Result;
85 use base::SafeDescriptor;
86 use base::ScmSocket;
87 use base::SharedMemory;
88 use base::Tube;
89 use base::TubeError;
90 use base::VolatileMemoryError;
91 use base::WaitContext;
92 use base::WorkerThread;
93 use data_model::Le32;
94 use data_model::Le64;
95 use hypervisor::MemCacheType;
96 #[cfg(feature = "minigbm")]
97 use libc::EBADF;
98 #[cfg(feature = "minigbm")]
99 use libc::EINVAL;
100 #[cfg(feature = "minigbm")]
101 use libc::ENOSYS;
102 use remain::sorted;
103 use resources::address_allocator::AddressAllocator;
104 use resources::AddressRange;
105 use resources::Alloc;
106 #[cfg(feature = "minigbm")]
107 use rutabaga_gfx::DrmFormat;
108 #[cfg(feature = "minigbm")]
109 use rutabaga_gfx::ImageAllocationInfo;
110 #[cfg(feature = "minigbm")]
111 use rutabaga_gfx::ImageMemoryRequirements;
112 #[cfg(feature = "minigbm")]
113 use rutabaga_gfx::RutabagaDescriptor;
114 #[cfg(feature = "minigbm")]
115 use rutabaga_gfx::RutabagaError;
116 #[cfg(feature = "minigbm")]
117 use rutabaga_gfx::RutabagaGralloc;
118 #[cfg(feature = "minigbm")]
119 use rutabaga_gfx::RutabagaGrallocBackendFlags;
120 #[cfg(feature = "minigbm")]
121 use rutabaga_gfx::RutabagaGrallocFlags;
122 #[cfg(feature = "minigbm")]
123 use rutabaga_gfx::RutabagaIntoRawDescriptor;
124 #[cfg(feature = "minigbm")]
125 use rutabaga_gfx::RUTABAGA_MAP_CACHE_CACHED;
126 #[cfg(feature = "minigbm")]
127 use rutabaga_gfx::RUTABAGA_MAP_CACHE_MASK;
128 use thiserror::Error as ThisError;
129 use vm_control::VmMemorySource;
130 use vm_memory::GuestAddress;
131 use vm_memory::GuestMemory;
132 use vm_memory::GuestMemoryError;
133 use zerocopy::AsBytes;
134 use zerocopy::FromBytes;
135 use zerocopy::FromZeroes;
136
137 #[cfg(feature = "gpu")]
138 use super::resource_bridge::get_resource_info;
139 #[cfg(feature = "gpu")]
140 use super::resource_bridge::BufferInfo;
141 #[cfg(feature = "gpu")]
142 use super::resource_bridge::ResourceBridgeError;
143 #[cfg(feature = "gpu")]
144 use super::resource_bridge::ResourceInfo;
145 #[cfg(feature = "gpu")]
146 use super::resource_bridge::ResourceRequest;
147 use super::DeviceType;
148 use super::Interrupt;
149 use super::Queue;
150 use super::Reader;
151 use super::SharedMemoryMapper;
152 use super::SharedMemoryRegion;
153 use super::VirtioDevice;
154 use super::Writer;
155 use crate::virtio::device_constants::wl::VIRTIO_WL_F_SEND_FENCES;
156 use crate::virtio::device_constants::wl::VIRTIO_WL_F_TRANS_FLAGS;
157 use crate::virtio::device_constants::wl::VIRTIO_WL_F_USE_SHMEM;
158
159 const QUEUE_SIZE: u16 = 256;
160 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
161
162 const VIRTWL_SEND_MAX_ALLOCS: usize = 28;
163 const VIRTIO_WL_CMD_VFD_NEW: u32 = 256;
164 const VIRTIO_WL_CMD_VFD_CLOSE: u32 = 257;
165 const VIRTIO_WL_CMD_VFD_SEND: u32 = 258;
166 const VIRTIO_WL_CMD_VFD_RECV: u32 = 259;
167 const VIRTIO_WL_CMD_VFD_NEW_CTX: u32 = 260;
168 const VIRTIO_WL_CMD_VFD_NEW_PIPE: u32 = 261;
169 const VIRTIO_WL_CMD_VFD_HUP: u32 = 262;
170 #[cfg(feature = "minigbm")]
171 const VIRTIO_WL_CMD_VFD_NEW_DMABUF: u32 = 263;
172 #[cfg(feature = "minigbm")]
173 const VIRTIO_WL_CMD_VFD_DMABUF_SYNC: u32 = 264;
174 #[cfg(feature = "gpu")]
175 const VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID: u32 = 265;
176 const VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED: u32 = 266;
177 const VIRTIO_WL_RESP_OK: u32 = 4096;
178 const VIRTIO_WL_RESP_VFD_NEW: u32 = 4097;
179 #[cfg(feature = "minigbm")]
180 const VIRTIO_WL_RESP_VFD_NEW_DMABUF: u32 = 4098;
181 const VIRTIO_WL_RESP_ERR: u32 = 4352;
182 const VIRTIO_WL_RESP_OUT_OF_MEMORY: u32 = 4353;
183 const VIRTIO_WL_RESP_INVALID_ID: u32 = 4354;
184 const VIRTIO_WL_RESP_INVALID_TYPE: u32 = 4355;
185 const VIRTIO_WL_RESP_INVALID_FLAGS: u32 = 4356;
186 const VIRTIO_WL_RESP_INVALID_CMD: u32 = 4357;
187 const VIRTIO_WL_VFD_WRITE: u32 = 0x1;
188 const VIRTIO_WL_VFD_READ: u32 = 0x2;
189 const VIRTIO_WL_VFD_MAP: u32 = 0x2;
190 const VIRTIO_WL_VFD_CONTROL: u32 = 0x4;
191 const VIRTIO_WL_VFD_FENCE: u32 = 0x8;
192
193 const NEXT_VFD_ID_BASE: u32 = 0x40000000;
194 const VFD_ID_HOST_MASK: u32 = NEXT_VFD_ID_BASE;
195 // Each in-vq buffer is one page, so we need to leave space for the control header and the maximum
196 // number of allocs.
197 const IN_BUFFER_LEN: usize =
198 0x1000 - size_of::<CtrlVfdRecv>() - VIRTWL_SEND_MAX_ALLOCS * size_of::<Le32>();
199
200 #[cfg(feature = "minigbm")]
201 const VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK: u32 = 0x7;
202
203 #[cfg(feature = "minigbm")]
204 const DMA_BUF_IOCTL_BASE: c_uint = 0x62;
205 #[cfg(feature = "minigbm")]
206 const DMA_BUF_SYNC_WRITE: c_uint = 0x2;
207 #[cfg(feature = "minigbm")]
208 const DMA_BUF_SYNC_END: c_uint = 0x4;
209
210 #[cfg(feature = "minigbm")]
211 #[repr(C)]
212 #[derive(Copy, Clone)]
213 struct dma_buf_sync {
214 flags: c_ulonglong,
215 }
216
217 #[cfg(feature = "minigbm")]
218 ioctl_iow_nr!(DMA_BUF_IOCTL_SYNC, DMA_BUF_IOCTL_BASE, 0, dma_buf_sync);
219
220 #[repr(C)]
221 #[derive(Copy, Clone, Default)]
222 struct sync_file_info {
223 name: [u8; 32],
224 status: i32,
225 flags: u32,
226 num_fences: u32,
227 pad: u32,
228 sync_fence_info: u64,
229 }
230
231 ioctl_iowr_nr!(SYNC_IOC_FILE_INFO, 0x3e, 4, sync_file_info);
232
is_fence(f: &File) -> bool233 fn is_fence(f: &File) -> bool {
234 let info = sync_file_info::default();
235 // SAFETY:
236 // Safe as f is a valid file
237 unsafe { ioctl_with_ref(f, SYNC_IOC_FILE_INFO(), &info) == 0 }
238 }
239
240 #[cfg(feature = "minigbm")]
241 #[derive(Debug, Default)]
242 struct GpuMemoryPlaneDesc {
243 stride: u32,
244 offset: u32,
245 }
246
247 #[cfg(feature = "minigbm")]
248 #[derive(Debug, Default)]
249 struct GpuMemoryDesc {
250 planes: [GpuMemoryPlaneDesc; 3],
251 }
252
253 const VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL: u32 = 0;
254 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU: u32 = 1;
255 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE: u32 = 2;
256 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE: u32 = 3;
257
258 const VIRTIO_WL_PFN_SHIFT: u32 = 12;
259
encode_vfd_new( writer: &mut Writer, resp: bool, vfd_id: u32, flags: u32, pfn: u64, size: u32, ) -> WlResult<()>260 fn encode_vfd_new(
261 writer: &mut Writer,
262 resp: bool,
263 vfd_id: u32,
264 flags: u32,
265 pfn: u64,
266 size: u32,
267 ) -> WlResult<()> {
268 let ctrl_vfd_new = CtrlVfdNew {
269 hdr: CtrlHeader {
270 type_: Le32::from(if resp {
271 VIRTIO_WL_RESP_VFD_NEW
272 } else {
273 VIRTIO_WL_CMD_VFD_NEW
274 }),
275 flags: Le32::from(0),
276 },
277 id: Le32::from(vfd_id),
278 flags: Le32::from(flags),
279 pfn: Le64::from(pfn),
280 size: Le32::from(size),
281 padding: Default::default(),
282 };
283
284 writer
285 .write_obj(ctrl_vfd_new)
286 .map_err(WlError::WriteResponse)
287 }
288
289 #[cfg(feature = "minigbm")]
encode_vfd_new_dmabuf( writer: &mut Writer, vfd_id: u32, flags: u32, pfn: u64, size: u32, desc: GpuMemoryDesc, ) -> WlResult<()>290 fn encode_vfd_new_dmabuf(
291 writer: &mut Writer,
292 vfd_id: u32,
293 flags: u32,
294 pfn: u64,
295 size: u32,
296 desc: GpuMemoryDesc,
297 ) -> WlResult<()> {
298 let ctrl_vfd_new_dmabuf = CtrlVfdNewDmabuf {
299 hdr: CtrlHeader {
300 type_: Le32::from(VIRTIO_WL_RESP_VFD_NEW_DMABUF),
301 flags: Le32::from(0),
302 },
303 id: Le32::from(vfd_id),
304 flags: Le32::from(flags),
305 pfn: Le64::from(pfn),
306 size: Le32::from(size),
307 width: Le32::from(0),
308 height: Le32::from(0),
309 format: Le32::from(0),
310 stride0: Le32::from(desc.planes[0].stride),
311 stride1: Le32::from(desc.planes[1].stride),
312 stride2: Le32::from(desc.planes[2].stride),
313 offset0: Le32::from(desc.planes[0].offset),
314 offset1: Le32::from(desc.planes[1].offset),
315 offset2: Le32::from(desc.planes[2].offset),
316 };
317
318 writer
319 .write_obj(ctrl_vfd_new_dmabuf)
320 .map_err(WlError::WriteResponse)
321 }
322
encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()>323 fn encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()> {
324 let ctrl_vfd_recv = CtrlVfdRecv {
325 hdr: CtrlHeader {
326 type_: Le32::from(VIRTIO_WL_CMD_VFD_RECV),
327 flags: Le32::from(0),
328 },
329 id: Le32::from(vfd_id),
330 vfd_count: Le32::from(vfd_ids.len() as u32),
331 };
332 writer
333 .write_obj(ctrl_vfd_recv)
334 .map_err(WlError::WriteResponse)?;
335
336 for &recv_vfd_id in vfd_ids.iter() {
337 writer
338 .write_obj(Le32::from(recv_vfd_id))
339 .map_err(WlError::WriteResponse)?;
340 }
341
342 writer.write_all(data).map_err(WlError::WriteResponse)
343 }
344
encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()>345 fn encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()> {
346 let ctrl_vfd_new = CtrlVfd {
347 hdr: CtrlHeader {
348 type_: Le32::from(VIRTIO_WL_CMD_VFD_HUP),
349 flags: Le32::from(0),
350 },
351 id: Le32::from(vfd_id),
352 };
353
354 writer
355 .write_obj(ctrl_vfd_new)
356 .map_err(WlError::WriteResponse)
357 }
358
encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()>359 fn encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()> {
360 match resp {
361 WlResp::VfdNew {
362 id,
363 flags,
364 pfn,
365 size,
366 resp,
367 } => encode_vfd_new(writer, resp, id, flags, pfn, size),
368 #[cfg(feature = "minigbm")]
369 WlResp::VfdNewDmabuf {
370 id,
371 flags,
372 pfn,
373 size,
374 desc,
375 } => encode_vfd_new_dmabuf(writer, id, flags, pfn, size, desc),
376 WlResp::VfdRecv { id, data, vfds } => encode_vfd_recv(writer, id, data, vfds),
377 WlResp::VfdHup { id } => encode_vfd_hup(writer, id),
378 r => writer
379 .write_obj(Le32::from(r.get_code()))
380 .map_err(WlError::WriteResponse),
381 }
382 }
383
384 #[allow(dead_code)]
385 #[sorted]
386 #[derive(ThisError, Debug)]
387 enum WlError {
388 #[error("overflow in calculation")]
389 CheckedOffset,
390 #[error("failed to synchronize DMABuf access: {0}")]
391 DmabufSync(io::Error),
392 #[error("failed to create shared memory from descriptor: {0}")]
393 FromSharedMemory(Error),
394 #[error("failed to get seals: {0}")]
395 GetSeals(Error),
396 #[error("gralloc error: {0}")]
397 #[cfg(feature = "minigbm")]
398 GrallocError(#[from] RutabagaError),
399 #[error("access violation in guest memory: {0}")]
400 GuestMemory(#[from] GuestMemoryError),
401 #[error("invalid string: {0}")]
402 InvalidString(std::str::Utf8Error),
403 #[error("failed to create shared memory allocation: {0}")]
404 NewAlloc(Error),
405 #[error("failed to create pipe: {0}")]
406 NewPipe(Error),
407 #[error("error parsing descriptor: {0}")]
408 ParseDesc(io::Error),
409 #[error("failed to read a pipe: {0}")]
410 ReadPipe(io::Error),
411 #[error("failed to recv on a socket: {0}")]
412 RecvVfd(io::Error),
413 #[error("failed to send on a socket: {0}")]
414 SendVfd(io::Error),
415 #[error("shmem mapper failure: {0}")]
416 ShmemMapperError(anyhow::Error),
417 #[error("failed to connect socket: {0}")]
418 SocketConnect(io::Error),
419 #[error("failed to set socket as non-blocking: {0}")]
420 SocketNonBlock(io::Error),
421 #[error("unknown socket name: {0}")]
422 UnknownSocketName(String),
423 #[error("invalid response from parent VM")]
424 VmBadResponse,
425 #[error("failed to control parent VM: {0}")]
426 VmControl(TubeError),
427 #[error("access violating in guest volatile memory: {0}")]
428 VolatileMemory(#[from] VolatileMemoryError),
429 #[error("failed to listen to descriptor on wait context: {0}")]
430 WaitContextAdd(Error),
431 #[error("failed to write to a pipe: {0}")]
432 WritePipe(io::Error),
433 #[error("failed to write response: {0}")]
434 WriteResponse(io::Error),
435 }
436
437 type WlResult<T> = result::Result<T, WlError>;
438
439 pub const WL_SHMEM_ID: u8 = 0;
440 pub const WL_SHMEM_SIZE: u64 = 1 << 32;
441
442 struct VmRequesterState {
443 mapper: Box<dyn SharedMemoryMapper>,
444 #[cfg(feature = "minigbm")]
445 gralloc: RutabagaGralloc,
446
447 // Allocator for shm address space
448 address_allocator: AddressAllocator,
449
450 // Map of existing mappings in the shm address space
451 allocs: BTreeMap<u64 /* offset */, Alloc>,
452
453 // The id for the next shmem allocation
454 next_alloc: usize,
455 }
456
457 #[derive(Clone)]
458 struct VmRequester {
459 state: Rc<RefCell<VmRequesterState>>,
460 }
461
462 // The following are wrappers to avoid base dependencies in the rutabaga crate
463 #[cfg(feature = "minigbm")]
to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor464 fn to_safe_descriptor(r: RutabagaDescriptor) -> SafeDescriptor {
465 // SAFETY:
466 // Safe because we own the SafeDescriptor at this point.
467 unsafe { SafeDescriptor::from_raw_descriptor(r.into_raw_descriptor()) }
468 }
469
470 impl VmRequester {
new( mapper: Box<dyn SharedMemoryMapper>, #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc, ) -> VmRequester471 fn new(
472 mapper: Box<dyn SharedMemoryMapper>,
473 #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
474 ) -> VmRequester {
475 VmRequester {
476 state: Rc::new(RefCell::new(VmRequesterState {
477 mapper,
478 #[cfg(feature = "minigbm")]
479 gralloc,
480 address_allocator: AddressAllocator::new(
481 AddressRange::from_start_and_size(0, WL_SHMEM_SIZE).unwrap(),
482 Some(pagesize() as u64),
483 None,
484 )
485 .expect("failed to create allocator"),
486 allocs: BTreeMap::new(),
487 next_alloc: 0,
488 })),
489 }
490 }
491
unregister_memory(&self, offset: u64) -> WlResult<()>492 fn unregister_memory(&self, offset: u64) -> WlResult<()> {
493 let mut state = self.state.borrow_mut();
494 state
495 .mapper
496 .remove_mapping(offset)
497 .map_err(WlError::ShmemMapperError)?;
498 let alloc = state
499 .allocs
500 .remove(&offset)
501 .context("unknown offset")
502 .map_err(WlError::ShmemMapperError)?;
503 state
504 .address_allocator
505 .release(alloc)
506 .expect("corrupt address space");
507 Ok(())
508 }
509
510 #[cfg(feature = "minigbm")]
allocate_and_register_gpu_memory( &self, width: u32, height: u32, format: u32, ) -> WlResult<(u64, SafeDescriptor, ImageMemoryRequirements)>511 fn allocate_and_register_gpu_memory(
512 &self,
513 width: u32,
514 height: u32,
515 format: u32,
516 ) -> WlResult<(u64, SafeDescriptor, ImageMemoryRequirements)> {
517 let mut state = self.state.borrow_mut();
518
519 let img = ImageAllocationInfo {
520 width,
521 height,
522 drm_format: DrmFormat::from(format),
523 // Linear layout is a requirement as virtio wayland guest expects
524 // this for CPU access to the buffer. Scanout and texturing are
525 // optional as the consumer (wayland compositor) is expected to
526 // fall-back to a less efficient mechanisms for presentation if
527 // neccesary. In practice, linear buffers for commonly used formats
528 // will also support scanout and texturing.
529 flags: RutabagaGrallocFlags::empty().use_linear(true),
530 };
531
532 let reqs = state
533 .gralloc
534 .get_image_memory_requirements(img)
535 .map_err(WlError::GrallocError)?;
536 let handle = state
537 .gralloc
538 .allocate_memory(reqs)
539 .map_err(WlError::GrallocError)?;
540 drop(state);
541
542 let safe_descriptor = to_safe_descriptor(handle.os_handle);
543 self.register_memory(
544 safe_descriptor
545 .try_clone()
546 .context("failed to dup gfx handle")
547 .map_err(WlError::ShmemMapperError)?,
548 reqs.size,
549 Protection::read_write(),
550 )
551 .map(|info| (info, safe_descriptor, reqs))
552 }
553
register_shmem(&self, shm: &SharedMemory) -> WlResult<u64>554 fn register_shmem(&self, shm: &SharedMemory) -> WlResult<u64> {
555 let prot = match FileFlags::from_file(shm) {
556 Ok(FileFlags::Read) => Protection::read(),
557 Ok(FileFlags::Write) => Protection::write(),
558 Ok(FileFlags::ReadWrite) => {
559 let seals = shm.get_seals().map_err(WlError::GetSeals)?;
560 if seals.write_seal() {
561 Protection::read()
562 } else {
563 Protection::read_write()
564 }
565 }
566 Err(e) => {
567 return Err(WlError::ShmemMapperError(anyhow!(
568 "failed to get file descriptor flags with error: {:?}",
569 e
570 )))
571 }
572 };
573 self.register_memory(
574 SafeDescriptor::try_from(shm as &dyn AsRawDescriptor)
575 .context("failed to create safe descriptor")
576 .map_err(WlError::ShmemMapperError)?,
577 shm.size(),
578 prot,
579 )
580 }
581
register_memory( &self, descriptor: SafeDescriptor, size: u64, prot: Protection, ) -> WlResult<u64>582 fn register_memory(
583 &self,
584 descriptor: SafeDescriptor,
585 size: u64,
586 prot: Protection,
587 ) -> WlResult<u64> {
588 let mut state = self.state.borrow_mut();
589 let size = round_up_to_page_size(size as usize) as u64;
590
591 let source = VmMemorySource::Descriptor {
592 descriptor,
593 offset: 0,
594 size,
595 };
596 let alloc = Alloc::Anon(state.next_alloc);
597 state.next_alloc += 1;
598 let offset = state
599 .address_allocator
600 .allocate(size, alloc, "virtio-wl".to_owned())
601 .context("failed to allocate offset")
602 .map_err(WlError::ShmemMapperError)?;
603
604 match state
605 .mapper
606 .add_mapping(source, offset, prot, MemCacheType::CacheCoherent)
607 {
608 Ok(()) => {
609 state.allocs.insert(offset, alloc);
610 Ok(offset)
611 }
612 Err(e) => {
613 // We just allocated it ourselves, it must exist.
614 state
615 .address_allocator
616 .release(alloc)
617 .expect("corrupt address space");
618 Err(WlError::ShmemMapperError(e))
619 }
620 }
621 }
622 }
623
624 #[repr(C)]
625 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
626 struct CtrlHeader {
627 type_: Le32,
628 flags: Le32,
629 }
630
631 #[repr(C)]
632 #[derive(Copy, Clone, Default, FromZeroes, FromBytes, AsBytes)]
633 struct CtrlVfdNew {
634 hdr: CtrlHeader,
635 id: Le32,
636 flags: Le32,
637 pfn: Le64,
638 size: Le32,
639 padding: Le32,
640 }
641
642 #[repr(C)]
643 #[derive(Copy, Clone, Default, FromZeroes, FromBytes)]
644 struct CtrlVfdNewCtxNamed {
645 hdr: CtrlHeader,
646 id: Le32,
647 flags: Le32, // Ignored.
648 pfn: Le64, // Ignored.
649 size: Le32, // Ignored.
650 name: [u8; 32],
651 }
652
653 #[repr(C)]
654 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
655 #[cfg(feature = "minigbm")]
656 struct CtrlVfdNewDmabuf {
657 hdr: CtrlHeader,
658 id: Le32,
659 flags: Le32,
660 pfn: Le64,
661 size: Le32,
662 width: Le32,
663 height: Le32,
664 format: Le32,
665 stride0: Le32,
666 stride1: Le32,
667 stride2: Le32,
668 offset0: Le32,
669 offset1: Le32,
670 offset2: Le32,
671 }
672
673 #[cfg(feature = "minigbm")]
674 #[repr(C)]
675 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
676 #[cfg(feature = "minigbm")]
677 struct CtrlVfdDmabufSync {
678 hdr: CtrlHeader,
679 id: Le32,
680 flags: Le32,
681 }
682
683 #[repr(C)]
684 #[derive(Copy, Clone, AsBytes, FromZeroes, FromBytes)]
685 struct CtrlVfdRecv {
686 hdr: CtrlHeader,
687 id: Le32,
688 vfd_count: Le32,
689 }
690
691 #[repr(C)]
692 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
693 struct CtrlVfd {
694 hdr: CtrlHeader,
695 id: Le32,
696 }
697
698 #[repr(C)]
699 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
700 struct CtrlVfdSend {
701 hdr: CtrlHeader,
702 id: Le32,
703 vfd_count: Le32,
704 // Remainder is an array of vfd_count IDs followed by data.
705 }
706
707 #[repr(C)]
708 #[derive(Copy, Clone, Default, AsBytes, FromZeroes, FromBytes)]
709 struct CtrlVfdSendVfd {
710 kind: Le32,
711 id: Le32,
712 }
713
714 #[repr(C)]
715 #[derive(Copy, Clone, FromZeroes, FromBytes)]
716 union CtrlVfdSendVfdV2Payload {
717 id: Le32,
718 seqno: Le64,
719 }
720
721 #[repr(C)]
722 #[derive(Copy, Clone, FromZeroes, FromBytes)]
723 struct CtrlVfdSendVfdV2 {
724 kind: Le32,
725 payload: CtrlVfdSendVfdV2Payload,
726 }
727
728 impl CtrlVfdSendVfdV2 {
id(&self) -> Le32729 fn id(&self) -> Le32 {
730 assert!(
731 self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL
732 || self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
733 );
734 // SAFETY: trivially safe given we assert kind
735 unsafe { self.payload.id }
736 }
737 #[cfg(feature = "gpu")]
seqno(&self) -> Le64738 fn seqno(&self) -> Le64 {
739 assert!(self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE);
740 // SAFETY: trivially safe given we assert kind
741 unsafe { self.payload.seqno }
742 }
743 }
744
745 #[derive(Debug)]
746 #[allow(dead_code)]
747 enum WlResp<'a> {
748 Ok,
749 VfdNew {
750 id: u32,
751 flags: u32,
752 pfn: u64,
753 size: u32,
754 // The VfdNew variant can be either a response or a command depending on this `resp`. This
755 // is important for the `get_code` method.
756 resp: bool,
757 },
758 #[cfg(feature = "minigbm")]
759 VfdNewDmabuf {
760 id: u32,
761 flags: u32,
762 pfn: u64,
763 size: u32,
764 desc: GpuMemoryDesc,
765 },
766 VfdRecv {
767 id: u32,
768 data: &'a [u8],
769 vfds: &'a [u32],
770 },
771 VfdHup {
772 id: u32,
773 },
774 Err(Box<dyn StdError>),
775 OutOfMemory,
776 InvalidId,
777 InvalidType,
778 InvalidFlags,
779 InvalidCommand,
780 }
781
782 impl<'a> WlResp<'a> {
get_code(&self) -> u32783 fn get_code(&self) -> u32 {
784 match *self {
785 WlResp::Ok => VIRTIO_WL_RESP_OK,
786 WlResp::VfdNew { resp, .. } => {
787 if resp {
788 VIRTIO_WL_RESP_VFD_NEW
789 } else {
790 VIRTIO_WL_CMD_VFD_NEW
791 }
792 }
793 #[cfg(feature = "minigbm")]
794 WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
795 WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
796 WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
797 WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
798 WlResp::OutOfMemory => VIRTIO_WL_RESP_OUT_OF_MEMORY,
799 WlResp::InvalidId => VIRTIO_WL_RESP_INVALID_ID,
800 WlResp::InvalidType => VIRTIO_WL_RESP_INVALID_TYPE,
801 WlResp::InvalidFlags => VIRTIO_WL_RESP_INVALID_FLAGS,
802 WlResp::InvalidCommand => VIRTIO_WL_RESP_INVALID_CMD,
803 }
804 }
805 }
806
807 #[derive(Default)]
808 struct WlVfd {
809 socket: Option<ScmSocket<UnixStream>>,
810 guest_shared_memory: Option<SharedMemory>,
811 remote_pipe: Option<File>,
812 local_pipe: Option<(u32 /* flags */, File)>,
813 slot: Option<(u64 /* offset */, VmRequester)>,
814 #[cfg(feature = "minigbm")]
815 is_dmabuf: bool,
816 #[cfg(feature = "minigbm")]
817 map_info: u32,
818 fence: Option<File>,
819 is_fence: bool,
820 }
821
822 impl fmt::Debug for WlVfd {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result823 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
824 write!(f, "WlVfd {{")?;
825 if let Some(s) = &self.socket {
826 write!(f, " socket: {}", s.as_raw_descriptor())?;
827 }
828 if let Some((offset, _)) = &self.slot {
829 write!(f, " offset: {}", offset)?;
830 }
831 if let Some(s) = &self.remote_pipe {
832 write!(f, " remote: {}", s.as_raw_descriptor())?;
833 }
834 if let Some((_, s)) = &self.local_pipe {
835 write!(f, " local: {}", s.as_raw_descriptor())?;
836 }
837 write!(f, " }}")
838 }
839 }
840
841 #[cfg(feature = "minigbm")]
flush_shared_memory(shared_memory: &SharedMemory) -> Result<()>842 fn flush_shared_memory(shared_memory: &SharedMemory) -> Result<()> {
843 let mmap = match MemoryMappingBuilder::new(shared_memory.size as usize)
844 .from_shared_memory(shared_memory)
845 .build()
846 {
847 Ok(v) => v,
848 Err(_) => return Err(Error::new(EINVAL)),
849 };
850 if let Err(err) = mmap.flush_all() {
851 base::error!("failed to flush shared memory: {}", err);
852 return match err {
853 MmapError::NotImplemented(_) => Err(Error::new(ENOSYS)),
854 _ => Err(Error::new(EINVAL)),
855 };
856 }
857 Ok(())
858 }
859
860 impl WlVfd {
connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd>861 fn connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd> {
862 let socket = UnixStream::connect(path).map_err(WlError::SocketConnect)?;
863 let mut vfd = WlVfd::default();
864 vfd.socket = Some(socket.try_into().map_err(WlError::SocketConnect)?);
865 Ok(vfd)
866 }
867
allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd>868 fn allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd> {
869 let size_page_aligned = round_up_to_page_size(size as usize) as u64;
870 let vfd_shm =
871 SharedMemory::new("virtwl_alloc", size_page_aligned).map_err(WlError::NewAlloc)?;
872
873 let offset = vm.register_shmem(&vfd_shm)?;
874
875 let mut vfd = WlVfd::default();
876 vfd.guest_shared_memory = Some(vfd_shm);
877 vfd.slot = Some((offset, vm));
878 Ok(vfd)
879 }
880
881 #[cfg(feature = "minigbm")]
dmabuf( vm: VmRequester, width: u32, height: u32, format: u32, ) -> WlResult<(WlVfd, GpuMemoryDesc)>882 fn dmabuf(
883 vm: VmRequester,
884 width: u32,
885 height: u32,
886 format: u32,
887 ) -> WlResult<(WlVfd, GpuMemoryDesc)> {
888 let (offset, desc, reqs) = vm.allocate_and_register_gpu_memory(width, height, format)?;
889 let mut vfd = WlVfd::default();
890 let vfd_shm =
891 SharedMemory::from_safe_descriptor(desc, reqs.size).map_err(WlError::NewAlloc)?;
892
893 let mut desc = GpuMemoryDesc::default();
894 for i in 0..3 {
895 desc.planes[i] = GpuMemoryPlaneDesc {
896 stride: reqs.strides[i],
897 offset: reqs.offsets[i],
898 }
899 }
900
901 vfd.guest_shared_memory = Some(vfd_shm);
902 vfd.slot = Some((offset, vm));
903 vfd.is_dmabuf = true;
904 vfd.map_info = reqs.map_info;
905 Ok((vfd, desc))
906 }
907
908 #[cfg(feature = "minigbm")]
dmabuf_sync(&self, flags: u32) -> WlResult<()>909 fn dmabuf_sync(&self, flags: u32) -> WlResult<()> {
910 if !self.is_dmabuf {
911 return Err(WlError::DmabufSync(io::Error::from_raw_os_error(EINVAL)));
912 }
913
914 match &self.guest_shared_memory {
915 Some(descriptor) => {
916 let sync = dma_buf_sync {
917 flags: flags as u64,
918 };
919 // SAFETY:
920 // Safe as descriptor is a valid dmabuf and incorrect flags will return an error.
921 if unsafe { ioctl_with_ref(descriptor, DMA_BUF_IOCTL_SYNC(), &sync) } < 0 {
922 return Err(WlError::DmabufSync(io::Error::last_os_error()));
923 }
924
925 // virtio-wl kernel driver always maps dmabufs with WB memory type, regardless of
926 // the host memory type (which is wrong). However, to avoid changing the protocol,
927 // assume that all guest writes are cached and ensure clflush-like ops on all mapped
928 // cachelines if the host mapping is not cached.
929 const END_WRITE_MASK: u32 = DMA_BUF_SYNC_WRITE | DMA_BUF_SYNC_END;
930 if (flags & END_WRITE_MASK) == END_WRITE_MASK
931 && (self.map_info & RUTABAGA_MAP_CACHE_MASK) != RUTABAGA_MAP_CACHE_CACHED
932 {
933 if let Err(err) = flush_shared_memory(descriptor) {
934 base::warn!("failed to flush cached dmabuf mapping: {:?}", err);
935 return Err(WlError::DmabufSync(io::Error::from_raw_os_error(
936 err.errno(),
937 )));
938 }
939 }
940 Ok(())
941 }
942 None => Err(WlError::DmabufSync(io::Error::from_raw_os_error(EBADF))),
943 }
944 }
945
pipe_remote_read_local_write() -> WlResult<WlVfd>946 fn pipe_remote_read_local_write() -> WlResult<WlVfd> {
947 let (read_pipe, write_pipe) = pipe().map_err(WlError::NewPipe)?;
948 let mut vfd = WlVfd::default();
949 vfd.remote_pipe = Some(read_pipe);
950 vfd.local_pipe = Some((VIRTIO_WL_VFD_WRITE, write_pipe));
951 Ok(vfd)
952 }
953
pipe_remote_write_local_read() -> WlResult<WlVfd>954 fn pipe_remote_write_local_read() -> WlResult<WlVfd> {
955 let (read_pipe, write_pipe) = pipe().map_err(WlError::NewPipe)?;
956 let mut vfd = WlVfd::default();
957 vfd.remote_pipe = Some(write_pipe);
958 vfd.local_pipe = Some((VIRTIO_WL_VFD_READ, read_pipe));
959 Ok(vfd)
960 }
961
from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd>962 fn from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd> {
963 // We need to determine if the given file is more like shared memory or a pipe/socket. A
964 // quick and easy check is to seek to the end of the file. If it works we assume it's not a
965 // pipe/socket because those have no end. We can even use that seek location as an indicator
966 // for how big the shared memory chunk to map into guest memory is. If seeking to the end
967 // fails, we assume it's a socket or pipe with read/write semantics.
968 if descriptor.seek(SeekFrom::End(0)).is_ok() {
969 let shm = SharedMemory::from_file(descriptor).map_err(WlError::FromSharedMemory)?;
970 let offset = vm.register_shmem(&shm)?;
971
972 let mut vfd = WlVfd::default();
973 vfd.guest_shared_memory = Some(shm);
974 vfd.slot = Some((offset, vm));
975 Ok(vfd)
976 } else if is_fence(&descriptor) {
977 let mut vfd = WlVfd::default();
978 vfd.is_fence = true;
979 vfd.fence = Some(descriptor);
980 Ok(vfd)
981 } else {
982 let flags = match FileFlags::from_file(&descriptor) {
983 Ok(FileFlags::Read) => VIRTIO_WL_VFD_READ,
984 Ok(FileFlags::Write) => VIRTIO_WL_VFD_WRITE,
985 Ok(FileFlags::ReadWrite) => VIRTIO_WL_VFD_READ | VIRTIO_WL_VFD_WRITE,
986 _ => 0,
987 };
988 let mut vfd = WlVfd::default();
989 vfd.local_pipe = Some((flags, descriptor));
990 Ok(vfd)
991 }
992 }
993
flags(&self, use_transition_flags: bool) -> u32994 fn flags(&self, use_transition_flags: bool) -> u32 {
995 let mut flags = 0;
996 if use_transition_flags {
997 if self.socket.is_some() {
998 flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
999 }
1000 if let Some((f, _)) = self.local_pipe {
1001 flags |= f;
1002 }
1003 if self.is_fence {
1004 flags |= VIRTIO_WL_VFD_FENCE;
1005 }
1006 } else {
1007 if self.socket.is_some() {
1008 flags |= VIRTIO_WL_VFD_CONTROL;
1009 }
1010 if self.slot.is_some() {
1011 flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP
1012 }
1013 }
1014 flags
1015 }
1016
1017 // Offset within the shared memory region this VFD was mapped at.
offset(&self) -> Option<u64>1018 fn offset(&self) -> Option<u64> {
1019 self.slot.as_ref().map(|s| s.0)
1020 }
1021
1022 // Size in bytes of the shared memory VFD.
size(&self) -> Option<u64>1023 fn size(&self) -> Option<u64> {
1024 self.guest_shared_memory.as_ref().map(|shm| shm.size())
1025 }
1026
1027 // The descriptor that gets sent if this VFD is sent over a socket.
send_descriptor(&self) -> Option<RawDescriptor>1028 fn send_descriptor(&self) -> Option<RawDescriptor> {
1029 self.guest_shared_memory
1030 .as_ref()
1031 .map(|shm| shm.as_raw_descriptor())
1032 .or(self.socket.as_ref().map(|s| s.as_raw_descriptor()))
1033 .or(self.remote_pipe.as_ref().map(|p| p.as_raw_descriptor()))
1034 .or(self.fence.as_ref().map(|f| f.as_raw_descriptor()))
1035 }
1036
1037 // The FD that is used for polling for events on this VFD.
wait_descriptor(&self) -> Option<&dyn AsRawDescriptor>1038 fn wait_descriptor(&self) -> Option<&dyn AsRawDescriptor> {
1039 self.socket
1040 .as_ref()
1041 .map(|s| s as &dyn AsRawDescriptor)
1042 .or_else(|| {
1043 self.local_pipe
1044 .as_ref()
1045 .map(|(_, p)| p as &dyn AsRawDescriptor)
1046 })
1047 .or_else(|| self.fence.as_ref().map(|f| f as &dyn AsRawDescriptor))
1048 }
1049
1050 // Sends data/files from the guest to the host over this VFD.
send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp>1051 fn send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp> {
1052 if let Some(socket) = &self.socket {
1053 socket
1054 .send_vectored_with_fds(&data.get_remaining(), rds)
1055 .map_err(WlError::SendVfd)?;
1056 // All remaining data in `data` is now considered consumed.
1057 data.consume(::std::usize::MAX);
1058 Ok(WlResp::Ok)
1059 } else if let Some((_, local_pipe)) = &mut self.local_pipe {
1060 // Impossible to send descriptors over a simple pipe.
1061 if !rds.is_empty() {
1062 return Ok(WlResp::InvalidType);
1063 }
1064 data.read_to(local_pipe, usize::max_value())
1065 .map_err(WlError::WritePipe)?;
1066 Ok(WlResp::Ok)
1067 } else {
1068 Ok(WlResp::InvalidType)
1069 }
1070 }
1071
1072 // Receives data/files from the host for this VFD and queues it for the guest.
recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>>1073 fn recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>> {
1074 if let Some(socket) = self.socket.take() {
1075 let mut buf = vec![0; IN_BUFFER_LEN];
1076 // If any errors happen, the socket will get dropped, preventing more reading.
1077 let (len, descriptors) = socket
1078 .recv_with_fds(&mut buf, VIRTWL_SEND_MAX_ALLOCS)
1079 .map_err(WlError::RecvVfd)?;
1080 // If any data gets read, the put the socket back for future recv operations.
1081 if len != 0 || !descriptors.is_empty() {
1082 buf.truncate(len);
1083 buf.shrink_to_fit();
1084 self.socket = Some(socket);
1085 in_file_queue.extend(descriptors.into_iter().map(File::from));
1086 return Ok(buf);
1087 }
1088 Ok(Vec::new())
1089 } else if let Some((flags, mut local_pipe)) = self.local_pipe.take() {
1090 let mut buf = vec![0; IN_BUFFER_LEN];
1091 let len = local_pipe.read(&mut buf[..]).map_err(WlError::ReadPipe)?;
1092 if len != 0 {
1093 buf.truncate(len);
1094 buf.shrink_to_fit();
1095 self.local_pipe = Some((flags, local_pipe));
1096 return Ok(buf);
1097 }
1098 Ok(Vec::new())
1099 } else {
1100 Ok(Vec::new())
1101 }
1102 }
1103
1104 // Called after this VFD is sent over a socket to ensure the local end of the VFD receives hang
1105 // up events.
close_remote(&mut self)1106 fn close_remote(&mut self) {
1107 self.remote_pipe = None;
1108 }
1109
close(&mut self) -> WlResult<()>1110 fn close(&mut self) -> WlResult<()> {
1111 if let Some((offset, vm)) = self.slot.take() {
1112 vm.unregister_memory(offset)?;
1113 }
1114 self.socket = None;
1115 self.remote_pipe = None;
1116 self.local_pipe = None;
1117 Ok(())
1118 }
1119 }
1120
1121 impl Drop for WlVfd {
drop(&mut self)1122 fn drop(&mut self) {
1123 let _ = self.close();
1124 }
1125 }
1126
1127 #[derive(Debug)]
1128 enum WlRecv {
1129 Vfd { id: u32 },
1130 Data { buf: Vec<u8> },
1131 Hup,
1132 }
1133
1134 pub struct WlState {
1135 wayland_paths: BTreeMap<String, PathBuf>,
1136 vm: VmRequester,
1137 resource_bridge: Option<Tube>,
1138 use_transition_flags: bool,
1139 wait_ctx: WaitContext<u32>,
1140 vfds: BTreeMap<u32, WlVfd>,
1141 next_vfd_id: u32,
1142 in_file_queue: Vec<File>,
1143 in_queue: VecDeque<(u32 /* vfd_id */, WlRecv)>,
1144 current_recv_vfd: Option<u32>,
1145 recv_vfds: Vec<u32>,
1146 #[cfg(feature = "gpu")]
1147 signaled_fence: Option<SafeDescriptor>,
1148 use_send_vfd_v2: bool,
1149 address_offset: Option<u64>,
1150 }
1151
1152 impl WlState {
1153 /// Create a new `WlState` instance for running a virtio-wl device.
new( wayland_paths: BTreeMap<String, PathBuf>, mapper: Box<dyn SharedMemoryMapper>, use_transition_flags: bool, use_send_vfd_v2: bool, resource_bridge: Option<Tube>, #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc, address_offset: Option<u64>, ) -> WlState1154 pub fn new(
1155 wayland_paths: BTreeMap<String, PathBuf>,
1156 mapper: Box<dyn SharedMemoryMapper>,
1157 use_transition_flags: bool,
1158 use_send_vfd_v2: bool,
1159 resource_bridge: Option<Tube>,
1160 #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
1161 address_offset: Option<u64>,
1162 ) -> WlState {
1163 WlState {
1164 wayland_paths,
1165 vm: VmRequester::new(
1166 mapper,
1167 #[cfg(feature = "minigbm")]
1168 gralloc,
1169 ),
1170 resource_bridge,
1171 wait_ctx: WaitContext::new().expect("failed to create WaitContext"),
1172 use_transition_flags,
1173 vfds: BTreeMap::new(),
1174 next_vfd_id: NEXT_VFD_ID_BASE,
1175 in_file_queue: Vec::new(),
1176 in_queue: VecDeque::new(),
1177 current_recv_vfd: None,
1178 recv_vfds: Vec::new(),
1179 #[cfg(feature = "gpu")]
1180 signaled_fence: None,
1181 use_send_vfd_v2,
1182 address_offset,
1183 }
1184 }
1185
1186 /// This is a hack so that we can drive the inner WaitContext from an async fn. The proper
1187 /// long-term solution is to replace the WaitContext completely by spawning async workers
1188 /// instead.
wait_ctx(&self) -> &WaitContext<u32>1189 pub fn wait_ctx(&self) -> &WaitContext<u32> {
1190 &self.wait_ctx
1191 }
1192
new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp>1193 fn new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp> {
1194 if id & VFD_ID_HOST_MASK != 0 {
1195 return Ok(WlResp::InvalidId);
1196 }
1197
1198 if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ) != 0 {
1199 return Ok(WlResp::InvalidFlags);
1200 }
1201
1202 if flags & VIRTIO_WL_VFD_WRITE != 0 && flags & VIRTIO_WL_VFD_READ != 0 {
1203 return Ok(WlResp::InvalidFlags);
1204 }
1205
1206 match self.vfds.entry(id) {
1207 Entry::Vacant(entry) => {
1208 let vfd = if flags & VIRTIO_WL_VFD_WRITE != 0 {
1209 WlVfd::pipe_remote_read_local_write()?
1210 } else if flags & VIRTIO_WL_VFD_READ != 0 {
1211 WlVfd::pipe_remote_write_local_read()?
1212 } else {
1213 return Ok(WlResp::InvalidFlags);
1214 };
1215 self.wait_ctx
1216 .add(vfd.wait_descriptor().unwrap(), id)
1217 .map_err(WlError::WaitContextAdd)?;
1218 let resp = WlResp::VfdNew {
1219 id,
1220 flags: 0,
1221 pfn: 0,
1222 size: 0,
1223 resp: true,
1224 };
1225 entry.insert(vfd);
1226 Ok(resp)
1227 }
1228 Entry::Occupied(_) => Ok(WlResp::InvalidId),
1229 }
1230 }
1231
new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp>1232 fn new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp> {
1233 if id & VFD_ID_HOST_MASK != 0 {
1234 return Ok(WlResp::InvalidId);
1235 }
1236
1237 if self.use_transition_flags {
1238 if flags != 0 {
1239 return Ok(WlResp::InvalidFlags);
1240 }
1241 } else if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP) != 0 {
1242 return Ok(WlResp::Err(Box::from("invalid flags")));
1243 }
1244
1245 if self.vfds.contains_key(&id) {
1246 return Ok(WlResp::InvalidId);
1247 }
1248 let vfd = WlVfd::allocate(self.vm.clone(), size as u64)?;
1249 let resp = WlResp::VfdNew {
1250 id,
1251 flags,
1252 pfn: self.compute_pfn(&vfd.offset()),
1253 size: vfd.size().unwrap_or_default() as u32,
1254 resp: true,
1255 };
1256 self.vfds.insert(id, vfd);
1257 Ok(resp)
1258 }
1259
1260 #[cfg(feature = "minigbm")]
new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp>1261 fn new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp> {
1262 if id & VFD_ID_HOST_MASK != 0 {
1263 return Ok(WlResp::InvalidId);
1264 }
1265
1266 if self.vfds.contains_key(&id) {
1267 return Ok(WlResp::InvalidId);
1268 }
1269 let (vfd, desc) = WlVfd::dmabuf(self.vm.clone(), width, height, format)?;
1270 let resp = WlResp::VfdNewDmabuf {
1271 id,
1272 flags: 0,
1273 pfn: self.compute_pfn(&vfd.offset()),
1274 size: vfd.size().unwrap_or_default() as u32,
1275 desc,
1276 };
1277 self.vfds.insert(id, vfd);
1278 Ok(resp)
1279 }
1280
1281 #[cfg(feature = "minigbm")]
dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp>1282 fn dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp> {
1283 if flags & !(VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK) != 0 {
1284 return Ok(WlResp::InvalidFlags);
1285 }
1286
1287 match self.vfds.get_mut(&vfd_id) {
1288 Some(vfd) => {
1289 vfd.dmabuf_sync(flags)?;
1290 Ok(WlResp::Ok)
1291 }
1292 None => Ok(WlResp::InvalidId),
1293 }
1294 }
1295
new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp>1296 fn new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp> {
1297 if id & VFD_ID_HOST_MASK != 0 {
1298 return Ok(WlResp::InvalidId);
1299 }
1300
1301 let flags = if self.use_transition_flags {
1302 VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ
1303 } else {
1304 VIRTIO_WL_VFD_CONTROL
1305 };
1306
1307 match self.vfds.entry(id) {
1308 Entry::Vacant(entry) => {
1309 let vfd = entry.insert(WlVfd::connect(
1310 self.wayland_paths
1311 .get(name)
1312 .ok_or_else(|| WlError::UnknownSocketName(name.to_string()))?,
1313 )?);
1314 self.wait_ctx
1315 .add(vfd.wait_descriptor().unwrap(), id)
1316 .map_err(WlError::WaitContextAdd)?;
1317 Ok(WlResp::VfdNew {
1318 id,
1319 flags,
1320 pfn: 0,
1321 size: 0,
1322 resp: true,
1323 })
1324 }
1325 Entry::Occupied(_) => Ok(WlResp::InvalidId),
1326 }
1327 }
1328
process_wait_context(&mut self)1329 fn process_wait_context(&mut self) {
1330 let events = match self.wait_ctx.wait_timeout(Duration::from_secs(0)) {
1331 Ok(v) => v,
1332 Err(e) => {
1333 error!("failed waiting for vfd evens: {}", e);
1334 return;
1335 }
1336 };
1337
1338 for event in events.iter().filter(|e| e.is_readable) {
1339 if let Err(e) = self.recv(event.token) {
1340 error!("failed to recv from vfd: {}", e)
1341 }
1342 }
1343
1344 for event in events.iter().filter(|e| e.is_hungup) {
1345 if !event.is_readable {
1346 let vfd_id = event.token;
1347 if let Some(descriptor) =
1348 self.vfds.get(&vfd_id).and_then(|vfd| vfd.wait_descriptor())
1349 {
1350 if let Err(e) = self.wait_ctx.delete(descriptor) {
1351 warn!("failed to remove hungup vfd from poll context: {}", e);
1352 }
1353 }
1354 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1355 }
1356 }
1357 }
1358
close(&mut self, vfd_id: u32) -> WlResult<WlResp>1359 fn close(&mut self, vfd_id: u32) -> WlResult<WlResp> {
1360 let mut to_delete = BTreeSet::new();
1361 for (dest_vfd_id, q) in &self.in_queue {
1362 if *dest_vfd_id == vfd_id {
1363 if let WlRecv::Vfd { id } = q {
1364 to_delete.insert(*id);
1365 }
1366 }
1367 }
1368 for vfd_id in to_delete {
1369 // Sorry sub-error, we can't have cascading errors leaving us in an inconsistent state.
1370 let _ = self.close(vfd_id);
1371 }
1372 match self.vfds.remove(&vfd_id) {
1373 Some(mut vfd) => {
1374 self.in_queue.retain(|&(id, _)| id != vfd_id);
1375 vfd.close()?;
1376 Ok(WlResp::Ok)
1377 }
1378 None => Ok(WlResp::InvalidId),
1379 }
1380 }
1381
1382 #[cfg(feature = "gpu")]
get_info(&mut self, request: ResourceRequest) -> Option<SafeDescriptor>1383 fn get_info(&mut self, request: ResourceRequest) -> Option<SafeDescriptor> {
1384 let sock = self.resource_bridge.as_ref().unwrap();
1385 match get_resource_info(sock, request) {
1386 Ok(ResourceInfo::Buffer(BufferInfo { handle, .. })) => Some(handle),
1387 Ok(ResourceInfo::Fence { handle }) => Some(handle),
1388 Err(ResourceBridgeError::InvalidResource(req)) => {
1389 warn!("attempt to send non-existent gpu resource {}", req);
1390 None
1391 }
1392 Err(e) => {
1393 error!("{}", e);
1394 // If there was an error with the resource bridge, it can no longer be
1395 // trusted to continue to function.
1396 self.resource_bridge = None;
1397 None
1398 }
1399 }
1400 }
1401
send( &mut self, vfd_id: u32, vfd_count: usize, foreign_id: bool, reader: &mut Reader, ) -> WlResult<WlResp>1402 fn send(
1403 &mut self,
1404 vfd_id: u32,
1405 vfd_count: usize,
1406 foreign_id: bool,
1407 reader: &mut Reader,
1408 ) -> WlResult<WlResp> {
1409 // First stage gathers and normalizes all id information from guest memory.
1410 let mut send_vfd_ids = [CtrlVfdSendVfdV2 {
1411 kind: Le32::from(0),
1412 payload: CtrlVfdSendVfdV2Payload { id: Le32::from(0) },
1413 }; VIRTWL_SEND_MAX_ALLOCS];
1414 for vfd_id in send_vfd_ids.iter_mut().take(vfd_count) {
1415 *vfd_id = if foreign_id {
1416 if self.use_send_vfd_v2 {
1417 reader.read_obj().map_err(WlError::ParseDesc)?
1418 } else {
1419 let vfd: CtrlVfdSendVfd = reader.read_obj().map_err(WlError::ParseDesc)?;
1420 CtrlVfdSendVfdV2 {
1421 kind: vfd.kind,
1422 payload: CtrlVfdSendVfdV2Payload { id: vfd.id },
1423 }
1424 }
1425 } else {
1426 CtrlVfdSendVfdV2 {
1427 kind: Le32::from(VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL),
1428 payload: CtrlVfdSendVfdV2Payload {
1429 id: reader.read_obj().map_err(WlError::ParseDesc)?,
1430 },
1431 }
1432 };
1433 }
1434
1435 // Next stage collects corresponding file descriptors for each id.
1436 let mut rds = [0; VIRTWL_SEND_MAX_ALLOCS];
1437 #[cfg(feature = "gpu")]
1438 let mut bridged_files = Vec::new();
1439 for (&send_vfd_id, descriptor) in send_vfd_ids[..vfd_count].iter().zip(rds.iter_mut()) {
1440 match send_vfd_id.kind.to_native() {
1441 VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL => {
1442 match self.vfds.get(&send_vfd_id.id().to_native()) {
1443 Some(vfd) => match vfd.send_descriptor() {
1444 Some(vfd_fd) => *descriptor = vfd_fd,
1445 None => return Ok(WlResp::InvalidType),
1446 },
1447 None => {
1448 warn!(
1449 "attempt to send non-existant vfd 0x{:08x}",
1450 send_vfd_id.id().to_native()
1451 );
1452 return Ok(WlResp::InvalidId);
1453 }
1454 }
1455 }
1456 #[cfg(feature = "gpu")]
1457 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU if self.resource_bridge.is_some() => {
1458 match self.get_info(ResourceRequest::GetBuffer {
1459 id: send_vfd_id.id().to_native(),
1460 }) {
1461 Some(handle) => {
1462 *descriptor = handle.as_raw_descriptor();
1463 bridged_files.push(handle.into());
1464 }
1465 None => return Ok(WlResp::InvalidId),
1466 }
1467 }
1468 #[cfg(feature = "gpu")]
1469 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE if self.resource_bridge.is_some() => {
1470 match self.get_info(ResourceRequest::GetFence {
1471 seqno: send_vfd_id.seqno().to_native(),
1472 }) {
1473 Some(handle) => {
1474 *descriptor = handle.as_raw_descriptor();
1475 bridged_files.push(handle.into());
1476 }
1477 None => return Ok(WlResp::InvalidId),
1478 }
1479 }
1480 #[cfg(feature = "gpu")]
1481 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE
1482 if self.resource_bridge.is_some() =>
1483 {
1484 if self.signaled_fence.is_none() {
1485 // If the guest is sending a signaled fence, we know a fence
1486 // with seqno 0 must already be signaled.
1487 match self.get_info(ResourceRequest::GetFence { seqno: 0 }) {
1488 Some(handle) => self.signaled_fence = Some(handle),
1489 None => return Ok(WlResp::InvalidId),
1490 }
1491 }
1492 match self.signaled_fence.as_ref().unwrap().try_clone() {
1493 Ok(dup) => {
1494 *descriptor = dup.into_raw_descriptor();
1495 // SAFETY:
1496 // Safe because the fd comes from a valid SafeDescriptor.
1497 let file = unsafe { File::from_raw_descriptor(*descriptor) };
1498 bridged_files.push(file);
1499 }
1500 Err(_) => return Ok(WlResp::InvalidId),
1501 }
1502 }
1503 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
1504 | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE
1505 | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE => {
1506 let _ = self.resource_bridge.as_ref();
1507 warn!("attempt to send foreign resource kind but feature is disabled");
1508 }
1509 kind => {
1510 warn!("attempt to send unknown foreign resource kind: {}", kind);
1511 return Ok(WlResp::InvalidId);
1512 }
1513 }
1514 }
1515
1516 // Final stage sends file descriptors and data to the target vfd's socket.
1517 match self.vfds.get_mut(&vfd_id) {
1518 Some(vfd) => match vfd.send(&rds[..vfd_count], reader)? {
1519 WlResp::Ok => {}
1520 _ => return Ok(WlResp::InvalidType),
1521 },
1522 None => return Ok(WlResp::InvalidId),
1523 }
1524 // The vfds with remote FDs need to be closed so that the local side can receive
1525 // hangup events.
1526 for &send_vfd_id in &send_vfd_ids[..vfd_count] {
1527 if send_vfd_id.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL {
1528 if let Some(vfd) = self.vfds.get_mut(&send_vfd_id.id().into()) {
1529 vfd.close_remote();
1530 }
1531 }
1532 }
1533 Ok(WlResp::Ok)
1534 }
1535
recv(&mut self, vfd_id: u32) -> WlResult<()>1536 fn recv(&mut self, vfd_id: u32) -> WlResult<()> {
1537 let buf = match self.vfds.get_mut(&vfd_id) {
1538 Some(vfd) => {
1539 if vfd.is_fence {
1540 if let Err(e) = self.wait_ctx.delete(vfd.wait_descriptor().unwrap()) {
1541 warn!("failed to remove hungup vfd from poll context: {}", e);
1542 }
1543 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1544 return Ok(());
1545 } else {
1546 vfd.recv(&mut self.in_file_queue)?
1547 }
1548 }
1549 None => return Ok(()),
1550 };
1551
1552 if self.in_file_queue.is_empty() && buf.is_empty() {
1553 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1554 return Ok(());
1555 }
1556 for file in self.in_file_queue.drain(..) {
1557 let vfd = WlVfd::from_file(self.vm.clone(), file)?;
1558 if let Some(wait_descriptor) = vfd.wait_descriptor() {
1559 self.wait_ctx
1560 .add(wait_descriptor, self.next_vfd_id)
1561 .map_err(WlError::WaitContextAdd)?;
1562 }
1563 // Only necessary if we somehow wrap the id counter. The try_insert
1564 // API would be nicer, but that's currently experimental.
1565 while self.vfds.contains_key(&self.next_vfd_id) {
1566 self.next_vfd_id += 1;
1567 }
1568 self.vfds.insert(self.next_vfd_id, vfd);
1569 self.in_queue.push_back((
1570 vfd_id,
1571 WlRecv::Vfd {
1572 id: self.next_vfd_id,
1573 },
1574 ));
1575 self.next_vfd_id += 1;
1576 }
1577 self.in_queue.push_back((vfd_id, WlRecv::Data { buf }));
1578
1579 Ok(())
1580 }
1581
execute(&mut self, reader: &mut Reader) -> WlResult<WlResp>1582 fn execute(&mut self, reader: &mut Reader) -> WlResult<WlResp> {
1583 let type_: Le32 = reader.peek_obj::<Le32>().map_err(WlError::ParseDesc)?;
1584 match type_.into() {
1585 VIRTIO_WL_CMD_VFD_NEW => {
1586 let ctrl = reader
1587 .read_obj::<CtrlVfdNew>()
1588 .map_err(WlError::ParseDesc)?;
1589 self.new_alloc(ctrl.id.into(), ctrl.flags.into(), ctrl.size.into())
1590 }
1591 VIRTIO_WL_CMD_VFD_CLOSE => {
1592 let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1593 self.close(ctrl.id.into())
1594 }
1595 VIRTIO_WL_CMD_VFD_SEND => {
1596 let ctrl = reader
1597 .read_obj::<CtrlVfdSend>()
1598 .map_err(WlError::ParseDesc)?;
1599 let foreign_id = false;
1600 self.send(
1601 ctrl.id.into(),
1602 ctrl.vfd_count.to_native() as usize,
1603 foreign_id,
1604 reader,
1605 )
1606 }
1607 #[cfg(feature = "gpu")]
1608 VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID => {
1609 let ctrl = reader
1610 .read_obj::<CtrlVfdSend>()
1611 .map_err(WlError::ParseDesc)?;
1612 let foreign_id = true;
1613 self.send(
1614 ctrl.id.into(),
1615 ctrl.vfd_count.to_native() as usize,
1616 foreign_id,
1617 reader,
1618 )
1619 }
1620 VIRTIO_WL_CMD_VFD_NEW_CTX => {
1621 let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1622 self.new_context(ctrl.id.into(), "")
1623 }
1624 VIRTIO_WL_CMD_VFD_NEW_PIPE => {
1625 let ctrl = reader
1626 .read_obj::<CtrlVfdNew>()
1627 .map_err(WlError::ParseDesc)?;
1628 self.new_pipe(ctrl.id.into(), ctrl.flags.into())
1629 }
1630 #[cfg(feature = "minigbm")]
1631 VIRTIO_WL_CMD_VFD_NEW_DMABUF => {
1632 let ctrl = reader
1633 .read_obj::<CtrlVfdNewDmabuf>()
1634 .map_err(WlError::ParseDesc)?;
1635 self.new_dmabuf(
1636 ctrl.id.into(),
1637 ctrl.width.into(),
1638 ctrl.height.into(),
1639 ctrl.format.into(),
1640 )
1641 }
1642 #[cfg(feature = "minigbm")]
1643 VIRTIO_WL_CMD_VFD_DMABUF_SYNC => {
1644 let ctrl = reader
1645 .read_obj::<CtrlVfdDmabufSync>()
1646 .map_err(WlError::ParseDesc)?;
1647 self.dmabuf_sync(ctrl.id.into(), ctrl.flags.into())
1648 }
1649 VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED => {
1650 let ctrl = reader
1651 .read_obj::<CtrlVfdNewCtxNamed>()
1652 .map_err(WlError::ParseDesc)?;
1653 let name_len = ctrl
1654 .name
1655 .iter()
1656 .position(|x| x == &0)
1657 .unwrap_or(ctrl.name.len());
1658 let name =
1659 std::str::from_utf8(&ctrl.name[..name_len]).map_err(WlError::InvalidString)?;
1660 self.new_context(ctrl.id.into(), name)
1661 }
1662 op_type => {
1663 warn!("unexpected command {}", op_type);
1664 Ok(WlResp::InvalidCommand)
1665 }
1666 }
1667 }
1668
next_recv(&self) -> Option<WlResp>1669 fn next_recv(&self) -> Option<WlResp> {
1670 if let Some(q) = self.in_queue.front() {
1671 match *q {
1672 (vfd_id, WlRecv::Vfd { id }) => {
1673 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1674 match self.vfds.get(&id) {
1675 Some(vfd) => Some(WlResp::VfdNew {
1676 id,
1677 flags: vfd.flags(self.use_transition_flags),
1678 pfn: self.compute_pfn(&vfd.offset()),
1679 size: vfd.size().unwrap_or_default() as u32,
1680 resp: false,
1681 }),
1682 _ => Some(WlResp::VfdNew {
1683 id,
1684 flags: 0,
1685 pfn: 0,
1686 size: 0,
1687 resp: false,
1688 }),
1689 }
1690 } else {
1691 Some(WlResp::VfdRecv {
1692 id: self.current_recv_vfd.unwrap(),
1693 data: &[],
1694 vfds: &self.recv_vfds[..],
1695 })
1696 }
1697 }
1698 (vfd_id, WlRecv::Data { ref buf }) => {
1699 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1700 Some(WlResp::VfdRecv {
1701 id: vfd_id,
1702 data: &buf[..],
1703 vfds: &self.recv_vfds[..],
1704 })
1705 } else {
1706 Some(WlResp::VfdRecv {
1707 id: self.current_recv_vfd.unwrap(),
1708 data: &[],
1709 vfds: &self.recv_vfds[..],
1710 })
1711 }
1712 }
1713 (vfd_id, WlRecv::Hup) => Some(WlResp::VfdHup { id: vfd_id }),
1714 }
1715 } else {
1716 None
1717 }
1718 }
1719
pop_recv(&mut self)1720 fn pop_recv(&mut self) {
1721 if let Some(q) = self.in_queue.front() {
1722 match *q {
1723 (vfd_id, WlRecv::Vfd { id }) => {
1724 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1725 self.recv_vfds.push(id);
1726 self.current_recv_vfd = Some(vfd_id);
1727 } else {
1728 self.recv_vfds.clear();
1729 self.current_recv_vfd = None;
1730 return;
1731 }
1732 }
1733 (vfd_id, WlRecv::Data { .. }) => {
1734 self.recv_vfds.clear();
1735 self.current_recv_vfd = None;
1736 if !(self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id)) {
1737 return;
1738 }
1739 }
1740 (_, WlRecv::Hup) => {
1741 self.recv_vfds.clear();
1742 self.current_recv_vfd = None;
1743 }
1744 }
1745 }
1746 self.in_queue.pop_front();
1747 }
1748
compute_pfn(&self, offset: &Option<u64>) -> u641749 fn compute_pfn(&self, offset: &Option<u64>) -> u64 {
1750 let addr = match (offset, self.address_offset) {
1751 (Some(o), Some(address_offset)) => o + address_offset,
1752 (Some(o), None) => *o,
1753 // without shmem, 0 is the special address for "no_pfn"
1754 (None, Some(_)) => 0,
1755 // with shmem, WL_SHMEM_SIZE is the special address for "no_pfn"
1756 (None, None) => WL_SHMEM_SIZE,
1757 };
1758 addr >> VIRTIO_WL_PFN_SHIFT
1759 }
1760 }
1761
1762 #[derive(ThisError, Debug, PartialEq, Eq)]
1763 #[error("no descriptors available in queue")]
1764 pub struct DescriptorsExhausted;
1765
1766 /// Handle incoming events and forward them to the VM over the input queue.
process_in_queue( interrupt: &Interrupt, in_queue: &mut Queue, state: &mut WlState, ) -> ::std::result::Result<(), DescriptorsExhausted>1767 pub fn process_in_queue(
1768 interrupt: &Interrupt,
1769 in_queue: &mut Queue,
1770 state: &mut WlState,
1771 ) -> ::std::result::Result<(), DescriptorsExhausted> {
1772 state.process_wait_context();
1773
1774 let mut needs_interrupt = false;
1775 let mut exhausted_queue = false;
1776 loop {
1777 let mut desc = if let Some(d) = in_queue.peek() {
1778 d
1779 } else {
1780 exhausted_queue = true;
1781 break;
1782 };
1783
1784 let mut should_pop = false;
1785 if let Some(in_resp) = state.next_recv() {
1786 match encode_resp(&mut desc.writer, in_resp) {
1787 Ok(()) => {
1788 should_pop = true;
1789 }
1790 Err(e) => {
1791 error!("failed to encode response to descriptor chain: {}", e);
1792 }
1793 }
1794 let bytes_written = desc.writer.bytes_written() as u32;
1795 needs_interrupt = true;
1796 let desc = desc.pop();
1797 in_queue.add_used(desc, bytes_written);
1798 } else {
1799 break;
1800 }
1801 if should_pop {
1802 state.pop_recv();
1803 }
1804 }
1805
1806 if needs_interrupt {
1807 in_queue.trigger_interrupt(interrupt);
1808 }
1809
1810 if exhausted_queue {
1811 Err(DescriptorsExhausted)
1812 } else {
1813 Ok(())
1814 }
1815 }
1816
1817 /// Handle messages from the output queue and forward them to the display sever, if necessary.
process_out_queue(interrupt: &Interrupt, out_queue: &mut Queue, state: &mut WlState)1818 pub fn process_out_queue(interrupt: &Interrupt, out_queue: &mut Queue, state: &mut WlState) {
1819 let mut needs_interrupt = false;
1820 while let Some(mut desc) = out_queue.pop() {
1821 let resp = match state.execute(&mut desc.reader) {
1822 Ok(r) => r,
1823 Err(e) => WlResp::Err(Box::new(e)),
1824 };
1825
1826 match encode_resp(&mut desc.writer, resp) {
1827 Ok(()) => {}
1828 Err(e) => {
1829 error!("failed to encode response to descriptor chain: {}", e);
1830 }
1831 }
1832
1833 let len = desc.writer.bytes_written() as u32;
1834 out_queue.add_used(desc, len);
1835 needs_interrupt = true;
1836 }
1837
1838 if needs_interrupt {
1839 out_queue.trigger_interrupt(interrupt);
1840 }
1841 }
1842
1843 struct Worker {
1844 interrupt: Interrupt,
1845 in_queue: Queue,
1846 out_queue: Queue,
1847 state: WlState,
1848 }
1849
1850 impl Worker {
new( interrupt: Interrupt, in_queue: Queue, out_queue: Queue, wayland_paths: BTreeMap<String, PathBuf>, mapper: Box<dyn SharedMemoryMapper>, use_transition_flags: bool, use_send_vfd_v2: bool, resource_bridge: Option<Tube>, #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc, address_offset: Option<u64>, ) -> Worker1851 fn new(
1852 interrupt: Interrupt,
1853 in_queue: Queue,
1854 out_queue: Queue,
1855 wayland_paths: BTreeMap<String, PathBuf>,
1856 mapper: Box<dyn SharedMemoryMapper>,
1857 use_transition_flags: bool,
1858 use_send_vfd_v2: bool,
1859 resource_bridge: Option<Tube>,
1860 #[cfg(feature = "minigbm")] gralloc: RutabagaGralloc,
1861 address_offset: Option<u64>,
1862 ) -> Worker {
1863 Worker {
1864 interrupt,
1865 in_queue,
1866 out_queue,
1867 state: WlState::new(
1868 wayland_paths,
1869 mapper,
1870 use_transition_flags,
1871 use_send_vfd_v2,
1872 resource_bridge,
1873 #[cfg(feature = "minigbm")]
1874 gralloc,
1875 address_offset,
1876 ),
1877 }
1878 }
1879
run(mut self, kill_evt: Event) -> anyhow::Result<Vec<Queue>>1880 fn run(mut self, kill_evt: Event) -> anyhow::Result<Vec<Queue>> {
1881 #[derive(EventToken)]
1882 enum Token {
1883 InQueue,
1884 OutQueue,
1885 Kill,
1886 State,
1887 InterruptResample,
1888 }
1889
1890 let wait_ctx: WaitContext<Token> = WaitContext::build_with(&[
1891 (self.in_queue.event(), Token::InQueue),
1892 (self.out_queue.event(), Token::OutQueue),
1893 (&kill_evt, Token::Kill),
1894 (&self.state.wait_ctx, Token::State),
1895 ])
1896 .context("failed creating WaitContext")?;
1897
1898 if let Some(resample_evt) = self.interrupt.get_resample_evt() {
1899 wait_ctx
1900 .add(resample_evt, Token::InterruptResample)
1901 .context("failed adding resample event to WaitContext.")?;
1902 }
1903
1904 let mut watching_state_ctx = true;
1905 'wait: loop {
1906 let events = match wait_ctx.wait() {
1907 Ok(v) => v,
1908 Err(e) => {
1909 error!("failed waiting for events: {}", e);
1910 break;
1911 }
1912 };
1913
1914 for event in &events {
1915 match event.token {
1916 Token::InQueue => {
1917 let _ = self.in_queue.event().wait();
1918 if !watching_state_ctx {
1919 if let Err(e) =
1920 wait_ctx.modify(&self.state.wait_ctx, EventType::Read, Token::State)
1921 {
1922 error!("Failed to modify wait_ctx descriptor for WlState: {}", e);
1923 break;
1924 }
1925 watching_state_ctx = true;
1926 }
1927 }
1928 Token::OutQueue => {
1929 let _ = self.out_queue.event().wait();
1930 process_out_queue(&self.interrupt, &mut self.out_queue, &mut self.state);
1931 }
1932 Token::Kill => break 'wait,
1933 Token::State => {
1934 if let Err(DescriptorsExhausted) =
1935 process_in_queue(&self.interrupt, &mut self.in_queue, &mut self.state)
1936 {
1937 if let Err(e) =
1938 wait_ctx.modify(&self.state.wait_ctx, EventType::None, Token::State)
1939 {
1940 error!(
1941 "Failed to stop watching wait_ctx descriptor for WlState: {}",
1942 e
1943 );
1944 break;
1945 }
1946 watching_state_ctx = false;
1947 }
1948 }
1949 Token::InterruptResample => {
1950 self.interrupt.interrupt_resample();
1951 }
1952 }
1953 }
1954 }
1955
1956 let in_queue = self.in_queue;
1957 let out_queue = self.out_queue;
1958
1959 Ok(vec![in_queue, out_queue])
1960 }
1961 }
1962
1963 pub struct Wl {
1964 worker_thread: Option<WorkerThread<anyhow::Result<Vec<Queue>>>>,
1965 wayland_paths: BTreeMap<String, PathBuf>,
1966 mapper: Option<Box<dyn SharedMemoryMapper>>,
1967 resource_bridge: Option<Tube>,
1968 base_features: u64,
1969 acked_features: u64,
1970 #[cfg(feature = "minigbm")]
1971 gralloc: Option<RutabagaGralloc>,
1972 address_offset: Option<u64>,
1973 }
1974
1975 impl Wl {
new( base_features: u64, wayland_paths: BTreeMap<String, PathBuf>, resource_bridge: Option<Tube>, ) -> Result<Wl>1976 pub fn new(
1977 base_features: u64,
1978 wayland_paths: BTreeMap<String, PathBuf>,
1979 resource_bridge: Option<Tube>,
1980 ) -> Result<Wl> {
1981 Ok(Wl {
1982 worker_thread: None,
1983 wayland_paths,
1984 mapper: None,
1985 resource_bridge,
1986 base_features,
1987 acked_features: 0,
1988 #[cfg(feature = "minigbm")]
1989 gralloc: None,
1990 address_offset: None,
1991 })
1992 }
1993 }
1994
1995 impl VirtioDevice for Wl {
keep_rds(&self) -> Vec<RawDescriptor>1996 fn keep_rds(&self) -> Vec<RawDescriptor> {
1997 let mut keep_rds = Vec::new();
1998
1999 if let Some(mapper) = &self.mapper {
2000 if let Some(raw_descriptor) = mapper.as_raw_descriptor() {
2001 keep_rds.push(raw_descriptor);
2002 }
2003 }
2004 if let Some(resource_bridge) = &self.resource_bridge {
2005 keep_rds.push(resource_bridge.as_raw_descriptor());
2006 }
2007 keep_rds
2008 }
2009
2010 #[cfg(feature = "minigbm")]
on_device_sandboxed(&mut self)2011 fn on_device_sandboxed(&mut self) {
2012 // Gralloc initialization can cause some GPU drivers to create their own threads
2013 // and that must be done after sandboxing.
2014 match RutabagaGralloc::new(RutabagaGrallocBackendFlags::new()) {
2015 Ok(g) => self.gralloc = Some(g),
2016 Err(e) => {
2017 error!("failed to initialize gralloc {:?}", e);
2018 }
2019 };
2020 }
2021
device_type(&self) -> DeviceType2022 fn device_type(&self) -> DeviceType {
2023 DeviceType::Wl
2024 }
2025
queue_max_sizes(&self) -> &[u16]2026 fn queue_max_sizes(&self) -> &[u16] {
2027 QUEUE_SIZES
2028 }
2029
features(&self) -> u642030 fn features(&self) -> u64 {
2031 self.base_features
2032 | 1 << VIRTIO_WL_F_TRANS_FLAGS
2033 | 1 << VIRTIO_WL_F_SEND_FENCES
2034 | 1 << VIRTIO_WL_F_USE_SHMEM
2035 }
2036
ack_features(&mut self, value: u64)2037 fn ack_features(&mut self, value: u64) {
2038 self.acked_features |= value;
2039 }
2040
activate( &mut self, _mem: GuestMemory, interrupt: Interrupt, mut queues: BTreeMap<usize, Queue>, ) -> anyhow::Result<()>2041 fn activate(
2042 &mut self,
2043 _mem: GuestMemory,
2044 interrupt: Interrupt,
2045 mut queues: BTreeMap<usize, Queue>,
2046 ) -> anyhow::Result<()> {
2047 if queues.len() != QUEUE_SIZES.len() {
2048 return Err(anyhow!(
2049 "expected {} queues, got {}",
2050 QUEUE_SIZES.len(),
2051 queues.len()
2052 ));
2053 }
2054
2055 let mapper = self.mapper.take().context("missing mapper")?;
2056
2057 let wayland_paths = self.wayland_paths.clone();
2058 let use_transition_flags = self.acked_features & (1 << VIRTIO_WL_F_TRANS_FLAGS) != 0;
2059 let use_send_vfd_v2 = self.acked_features & (1 << VIRTIO_WL_F_SEND_FENCES) != 0;
2060 let use_shmem = self.acked_features & (1 << VIRTIO_WL_F_USE_SHMEM) != 0;
2061 let resource_bridge = self.resource_bridge.take();
2062 #[cfg(feature = "minigbm")]
2063 let gralloc = self
2064 .gralloc
2065 .take()
2066 .expect("gralloc already passed to worker");
2067 let address_offset = if !use_shmem {
2068 self.address_offset
2069 } else {
2070 None
2071 };
2072
2073 self.worker_thread = Some(WorkerThread::start("v_wl", move |kill_evt| {
2074 Worker::new(
2075 interrupt,
2076 queues.pop_first().unwrap().1,
2077 queues.pop_first().unwrap().1,
2078 wayland_paths,
2079 mapper,
2080 use_transition_flags,
2081 use_send_vfd_v2,
2082 resource_bridge,
2083 #[cfg(feature = "minigbm")]
2084 gralloc,
2085 address_offset,
2086 )
2087 .run(kill_evt)
2088 }));
2089
2090 Ok(())
2091 }
2092
get_shared_memory_region(&self) -> Option<SharedMemoryRegion>2093 fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
2094 Some(SharedMemoryRegion {
2095 id: WL_SHMEM_ID,
2096 length: WL_SHMEM_SIZE,
2097 })
2098 }
2099
set_shared_memory_region_base(&mut self, shmem_base: GuestAddress)2100 fn set_shared_memory_region_base(&mut self, shmem_base: GuestAddress) {
2101 self.address_offset = Some(shmem_base.0);
2102 }
2103
set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>)2104 fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
2105 self.mapper = Some(mapper);
2106 }
2107
virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>>2108 fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
2109 if let Some(worker_thread) = self.worker_thread.take() {
2110 let queues = worker_thread.stop()?;
2111 return Ok(Some(BTreeMap::from_iter(queues.into_iter().enumerate())));
2112 }
2113 Ok(None)
2114 }
2115
virtio_wake( &mut self, device_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>, ) -> anyhow::Result<()>2116 fn virtio_wake(
2117 &mut self,
2118 device_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
2119 ) -> anyhow::Result<()> {
2120 match device_state {
2121 None => Ok(()),
2122 Some((mem, interrupt, queues)) => {
2123 // TODO: activate is just what we want at the moment, but we should probably move
2124 // it into a "start workers" function to make it obvious that it isn't strictly
2125 // used for activate events.
2126 self.activate(mem, interrupt, queues)?;
2127 Ok(())
2128 }
2129 }
2130 }
2131
2132 // ANDROID: Add empty implementations for successful snapshot taking. Change to full
2133 // implementation as part of b/266514618
2134 // virtio-wl is not used, but is created. As such, virtio_snapshot/restore will be called when
2135 // cuttlefish attempts to take a snapshot.
virtio_snapshot(&mut self) -> anyhow::Result<serde_json::Value>2136 fn virtio_snapshot(&mut self) -> anyhow::Result<serde_json::Value> {
2137 Ok(serde_json::Value::Null)
2138 }
2139
virtio_restore(&mut self, data: serde_json::Value) -> anyhow::Result<()>2140 fn virtio_restore(&mut self, data: serde_json::Value) -> anyhow::Result<()> {
2141 anyhow::ensure!(
2142 data == serde_json::Value::Null,
2143 "unexpected snapshot data: should be null, got {}",
2144 data,
2145 );
2146 Ok(())
2147 }
2148 }
2149