1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! This module implements the virtio wayland used by the guest to access the host's wayland server.
6 //!
7 //! The virtio wayland protocol is done over two queues: `in` and `out`. The `in` queue is used for
8 //! sending commands to the guest that are generated by the host, usually messages from the wayland
9 //! server. The `out` queue is for commands from the guest, usually requests to allocate shared
10 //! memory, open a wayland server connection, or send data over an existing connection.
11 //!
12 //! Each `WlVfd` represents one virtual file descriptor created by either the guest or the host.
13 //! Virtual file descriptors contain actual file descriptors, either a shared memory file descriptor
14 //! or a unix domain socket to the wayland server. In the shared memory case, there is also an
15 //! associated slot that indicates which hypervisor memory slot the memory is installed into, as
16 //! well as a page frame number that the guest can access the memory from.
17 //!
18 //! The types starting with `Ctrl` are structures representing the virtio wayland protocol "on the
19 //! wire." They are decoded and executed in the `execute` function and encoded as some variant of
20 //! `WlResp` for responses.
21 //!
22 //! There is one `WlState` instance that contains every known vfd and the current state of `in`
23 //! queue. The `in` queue requires extra state to buffer messages to the guest in case the `in`
24 //! queue is already full. The `WlState` also has a control socket necessary to fulfill certain
25 //! requests, such as those registering guest memory.
26 //!
27 //! The `Worker` is responsible for the poll loop over all possible events, encoding/decoding from
28 //! the virtio queue, and routing messages in and out of `WlState`. Possible events include the kill
29 //! event, available descriptors on the `in` or `out` queue, and incoming data on any vfd's socket.
30
31 use std::collections::btree_map::Entry;
32 use std::collections::{BTreeMap as Map, BTreeSet as Set, VecDeque};
33 use std::convert::From;
34 use std::error::Error as StdError;
35 use std::fmt::{self, Display};
36 use std::fs::File;
37 use std::io::{self, Read, Seek, SeekFrom, Write};
38 use std::mem::size_of;
39 #[cfg(feature = "minigbm")]
40 use std::os::raw::{c_uint, c_ulonglong};
41 use std::os::unix::net::UnixStream;
42 use std::path::{Path, PathBuf};
43 use std::rc::Rc;
44 use std::result;
45 use std::thread;
46 use std::time::Duration;
47
48 #[cfg(feature = "minigbm")]
49 use libc::{EBADF, EINVAL};
50
51 use data_model::*;
52
53 use base::{
54 error, pipe, round_up_to_page_size, warn, AsRawDescriptor, Error, Event, FileFlags,
55 FromRawDescriptor, PollToken, RawDescriptor, Result, ScmSocket, SharedMemory, SharedMemoryUnix,
56 Tube, TubeError, WaitContext,
57 };
58 #[cfg(feature = "minigbm")]
59 use base::{ioctl_iow_nr, ioctl_with_ref};
60 #[cfg(feature = "gpu")]
61 use base::{IntoRawDescriptor, SafeDescriptor};
62 use vm_memory::{GuestMemory, GuestMemoryError};
63
64 #[cfg(feature = "minigbm")]
65 use vm_control::GpuMemoryDesc;
66
67 use super::resource_bridge::{
68 get_resource_info, BufferInfo, ResourceBridgeError, ResourceInfo, ResourceRequest,
69 };
70 use super::{
71 DescriptorChain, Interrupt, Queue, Reader, SignalableInterrupt, VirtioDevice, Writer, TYPE_WL,
72 };
73 use vm_control::{MemSlot, VmMemoryRequest, VmMemoryResponse};
74
75 const VIRTWL_SEND_MAX_ALLOCS: usize = 28;
76 const VIRTIO_WL_CMD_VFD_NEW: u32 = 256;
77 const VIRTIO_WL_CMD_VFD_CLOSE: u32 = 257;
78 const VIRTIO_WL_CMD_VFD_SEND: u32 = 258;
79 const VIRTIO_WL_CMD_VFD_RECV: u32 = 259;
80 const VIRTIO_WL_CMD_VFD_NEW_CTX: u32 = 260;
81 const VIRTIO_WL_CMD_VFD_NEW_PIPE: u32 = 261;
82 const VIRTIO_WL_CMD_VFD_HUP: u32 = 262;
83 #[cfg(feature = "minigbm")]
84 const VIRTIO_WL_CMD_VFD_NEW_DMABUF: u32 = 263;
85 #[cfg(feature = "minigbm")]
86 const VIRTIO_WL_CMD_VFD_DMABUF_SYNC: u32 = 264;
87 #[cfg(feature = "gpu")]
88 const VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID: u32 = 265;
89 const VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED: u32 = 266;
90 const VIRTIO_WL_RESP_OK: u32 = 4096;
91 const VIRTIO_WL_RESP_VFD_NEW: u32 = 4097;
92 #[cfg(feature = "minigbm")]
93 const VIRTIO_WL_RESP_VFD_NEW_DMABUF: u32 = 4098;
94 const VIRTIO_WL_RESP_ERR: u32 = 4352;
95 const VIRTIO_WL_RESP_OUT_OF_MEMORY: u32 = 4353;
96 const VIRTIO_WL_RESP_INVALID_ID: u32 = 4354;
97 const VIRTIO_WL_RESP_INVALID_TYPE: u32 = 4355;
98 const VIRTIO_WL_RESP_INVALID_FLAGS: u32 = 4356;
99 const VIRTIO_WL_RESP_INVALID_CMD: u32 = 4357;
100 const VIRTIO_WL_VFD_WRITE: u32 = 0x1;
101 const VIRTIO_WL_VFD_READ: u32 = 0x2;
102 const VIRTIO_WL_VFD_MAP: u32 = 0x2;
103 const VIRTIO_WL_VFD_CONTROL: u32 = 0x4;
104 const VIRTIO_WL_F_TRANS_FLAGS: u32 = 0x01;
105 const VIRTIO_WL_F_SEND_FENCES: u32 = 0x02;
106
107 const QUEUE_SIZE: u16 = 16;
108 const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
109
110 const NEXT_VFD_ID_BASE: u32 = 0x40000000;
111 const VFD_ID_HOST_MASK: u32 = NEXT_VFD_ID_BASE;
112 // Each in-vq buffer is one page, so we need to leave space for the control header and the maximum
113 // number of allocs.
114 const IN_BUFFER_LEN: usize =
115 0x1000 - size_of::<CtrlVfdRecv>() - VIRTWL_SEND_MAX_ALLOCS * size_of::<Le32>();
116
117 #[cfg(feature = "minigbm")]
118 const VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK: u32 = 0x7;
119
120 #[cfg(feature = "minigbm")]
121 const DMA_BUF_IOCTL_BASE: c_uint = 0x62;
122
123 #[cfg(feature = "minigbm")]
124 #[repr(C)]
125 #[derive(Copy, Clone)]
126 struct dma_buf_sync {
127 flags: c_ulonglong,
128 }
129
130 #[cfg(feature = "minigbm")]
131 ioctl_iow_nr!(DMA_BUF_IOCTL_SYNC, DMA_BUF_IOCTL_BASE, 0, dma_buf_sync);
132
133 const VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL: u32 = 0;
134 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU: u32 = 1;
135 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE: u32 = 2;
136 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE: u32 = 3;
137
encode_vfd_new( writer: &mut Writer, resp: bool, vfd_id: u32, flags: u32, pfn: u64, size: u32, ) -> WlResult<()>138 fn encode_vfd_new(
139 writer: &mut Writer,
140 resp: bool,
141 vfd_id: u32,
142 flags: u32,
143 pfn: u64,
144 size: u32,
145 ) -> WlResult<()> {
146 let ctrl_vfd_new = CtrlVfdNew {
147 hdr: CtrlHeader {
148 type_: Le32::from(if resp {
149 VIRTIO_WL_RESP_VFD_NEW
150 } else {
151 VIRTIO_WL_CMD_VFD_NEW
152 }),
153 flags: Le32::from(0),
154 },
155 id: Le32::from(vfd_id),
156 flags: Le32::from(flags),
157 pfn: Le64::from(pfn),
158 size: Le32::from(size),
159 };
160
161 writer
162 .write_obj(ctrl_vfd_new)
163 .map_err(WlError::WriteResponse)
164 }
165
166 #[cfg(feature = "minigbm")]
encode_vfd_new_dmabuf( writer: &mut Writer, vfd_id: u32, flags: u32, pfn: u64, size: u32, desc: GpuMemoryDesc, ) -> WlResult<()>167 fn encode_vfd_new_dmabuf(
168 writer: &mut Writer,
169 vfd_id: u32,
170 flags: u32,
171 pfn: u64,
172 size: u32,
173 desc: GpuMemoryDesc,
174 ) -> WlResult<()> {
175 let ctrl_vfd_new_dmabuf = CtrlVfdNewDmabuf {
176 hdr: CtrlHeader {
177 type_: Le32::from(VIRTIO_WL_RESP_VFD_NEW_DMABUF),
178 flags: Le32::from(0),
179 },
180 id: Le32::from(vfd_id),
181 flags: Le32::from(flags),
182 pfn: Le64::from(pfn),
183 size: Le32::from(size),
184 width: Le32::from(0),
185 height: Le32::from(0),
186 format: Le32::from(0),
187 stride0: Le32::from(desc.planes[0].stride),
188 stride1: Le32::from(desc.planes[1].stride),
189 stride2: Le32::from(desc.planes[2].stride),
190 offset0: Le32::from(desc.planes[0].offset),
191 offset1: Le32::from(desc.planes[1].offset),
192 offset2: Le32::from(desc.planes[2].offset),
193 };
194
195 writer
196 .write_obj(ctrl_vfd_new_dmabuf)
197 .map_err(WlError::WriteResponse)
198 }
199
encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()>200 fn encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()> {
201 let ctrl_vfd_recv = CtrlVfdRecv {
202 hdr: CtrlHeader {
203 type_: Le32::from(VIRTIO_WL_CMD_VFD_RECV),
204 flags: Le32::from(0),
205 },
206 id: Le32::from(vfd_id),
207 vfd_count: Le32::from(vfd_ids.len() as u32),
208 };
209 writer
210 .write_obj(ctrl_vfd_recv)
211 .map_err(WlError::WriteResponse)?;
212
213 for &recv_vfd_id in vfd_ids.iter() {
214 writer
215 .write_obj(Le32::from(recv_vfd_id))
216 .map_err(WlError::WriteResponse)?;
217 }
218
219 writer.write_all(data).map_err(WlError::WriteResponse)
220 }
221
encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()>222 fn encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()> {
223 let ctrl_vfd_new = CtrlVfd {
224 hdr: CtrlHeader {
225 type_: Le32::from(VIRTIO_WL_CMD_VFD_HUP),
226 flags: Le32::from(0),
227 },
228 id: Le32::from(vfd_id),
229 };
230
231 writer
232 .write_obj(ctrl_vfd_new)
233 .map_err(WlError::WriteResponse)
234 }
235
encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()>236 fn encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()> {
237 match resp {
238 WlResp::VfdNew {
239 id,
240 flags,
241 pfn,
242 size,
243 resp,
244 } => encode_vfd_new(writer, resp, id, flags, pfn, size),
245 #[cfg(feature = "minigbm")]
246 WlResp::VfdNewDmabuf {
247 id,
248 flags,
249 pfn,
250 size,
251 desc,
252 } => encode_vfd_new_dmabuf(writer, id, flags, pfn, size, desc),
253 WlResp::VfdRecv { id, data, vfds } => encode_vfd_recv(writer, id, data, vfds),
254 WlResp::VfdHup { id } => encode_vfd_hup(writer, id),
255 r => writer
256 .write_obj(Le32::from(r.get_code()))
257 .map_err(WlError::WriteResponse),
258 }
259 }
260
261 #[allow(dead_code)]
262 #[derive(Debug)]
263 enum WlError {
264 NewAlloc(Error),
265 NewPipe(Error),
266 SocketConnect(io::Error),
267 SocketNonBlock(io::Error),
268 VmControl(TubeError),
269 VmBadResponse,
270 CheckedOffset,
271 ParseDesc(io::Error),
272 GuestMemory(GuestMemoryError),
273 VolatileMemory(VolatileMemoryError),
274 SendVfd(Error),
275 WritePipe(io::Error),
276 RecvVfd(Error),
277 ReadPipe(io::Error),
278 WaitContextAdd(Error),
279 DmabufSync(io::Error),
280 FromSharedMemory(Error),
281 WriteResponse(io::Error),
282 InvalidString(std::str::Utf8Error),
283 UnknownSocketName(String),
284 }
285
286 impl Display for WlError {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result287 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
288 use self::WlError::*;
289
290 match self {
291 NewAlloc(e) => write!(f, "failed to create shared memory allocation: {}", e),
292 NewPipe(e) => write!(f, "failed to create pipe: {}", e),
293 SocketConnect(e) => write!(f, "failed to connect socket: {}", e),
294 SocketNonBlock(e) => write!(f, "failed to set socket as non-blocking: {}", e),
295 VmControl(e) => write!(f, "failed to control parent VM: {}", e),
296 VmBadResponse => write!(f, "invalid response from parent VM"),
297 CheckedOffset => write!(f, "overflow in calculation"),
298 ParseDesc(e) => write!(f, "error parsing descriptor: {}", e),
299 GuestMemory(e) => write!(f, "access violation in guest memory: {}", e),
300 VolatileMemory(e) => write!(f, "access violating in guest volatile memory: {}", e),
301 SendVfd(e) => write!(f, "failed to send on a socket: {}", e),
302 WritePipe(e) => write!(f, "failed to write to a pipe: {}", e),
303 RecvVfd(e) => write!(f, "failed to recv on a socket: {}", e),
304 ReadPipe(e) => write!(f, "failed to read a pipe: {}", e),
305 WaitContextAdd(e) => write!(f, "failed to listen to descriptor on wait context: {}", e),
306 DmabufSync(e) => write!(f, "failed to synchronize DMABuf access: {}", e),
307 FromSharedMemory(e) => {
308 write!(f, "failed to create shared memory from descriptor: {}", e)
309 }
310 WriteResponse(e) => write!(f, "failed to write response: {}", e),
311 InvalidString(e) => write!(f, "invalid string: {}", e),
312 UnknownSocketName(name) => write!(f, "unknown socket name: {}", name),
313 }
314 }
315 }
316
317 impl std::error::Error for WlError {}
318
319 type WlResult<T> = result::Result<T, WlError>;
320
321 impl From<GuestMemoryError> for WlError {
from(e: GuestMemoryError) -> WlError322 fn from(e: GuestMemoryError) -> WlError {
323 WlError::GuestMemory(e)
324 }
325 }
326
327 impl From<VolatileMemoryError> for WlError {
from(e: VolatileMemoryError) -> WlError328 fn from(e: VolatileMemoryError) -> WlError {
329 WlError::VolatileMemory(e)
330 }
331 }
332
333 #[derive(Clone)]
334 struct VmRequester {
335 inner: Rc<Tube>,
336 }
337
338 impl VmRequester {
new(vm_socket: Tube) -> VmRequester339 fn new(vm_socket: Tube) -> VmRequester {
340 VmRequester {
341 inner: Rc::new(vm_socket),
342 }
343 }
344
request(&self, request: &VmMemoryRequest) -> WlResult<VmMemoryResponse>345 fn request(&self, request: &VmMemoryRequest) -> WlResult<VmMemoryResponse> {
346 self.inner.send(&request).map_err(WlError::VmControl)?;
347 self.inner.recv().map_err(WlError::VmControl)
348 }
349
register_memory(&self, shm: SharedMemory) -> WlResult<(SharedMemory, VmMemoryResponse)>350 fn register_memory(&self, shm: SharedMemory) -> WlResult<(SharedMemory, VmMemoryResponse)> {
351 let request = VmMemoryRequest::RegisterMemory(shm);
352 let response = self.request(&request)?;
353 match request {
354 VmMemoryRequest::RegisterMemory(shm) => Ok((shm, response)),
355 _ => unreachable!(),
356 }
357 }
358 }
359
360 #[repr(C)]
361 #[derive(Copy, Clone, Default)]
362 struct CtrlHeader {
363 type_: Le32,
364 flags: Le32,
365 }
366
367 #[repr(C)]
368 #[derive(Copy, Clone, Default)]
369 struct CtrlVfdNew {
370 hdr: CtrlHeader,
371 id: Le32,
372 flags: Le32,
373 pfn: Le64,
374 size: Le32,
375 }
376
377 unsafe impl DataInit for CtrlVfdNew {}
378
379 #[repr(C)]
380 #[derive(Copy, Clone, Default)]
381 struct CtrlVfdNewCtxNamed {
382 hdr: CtrlHeader,
383 id: Le32,
384 flags: Le32, // Ignored.
385 pfn: Le64, // Ignored.
386 size: Le32, // Ignored.
387 name: [u8; 32],
388 }
389
390 unsafe impl DataInit for CtrlVfdNewCtxNamed {}
391
392 #[repr(C)]
393 #[derive(Copy, Clone, Default)]
394 #[cfg(feature = "minigbm")]
395 struct CtrlVfdNewDmabuf {
396 hdr: CtrlHeader,
397 id: Le32,
398 flags: Le32,
399 pfn: Le64,
400 size: Le32,
401 width: Le32,
402 height: Le32,
403 format: Le32,
404 stride0: Le32,
405 stride1: Le32,
406 stride2: Le32,
407 offset0: Le32,
408 offset1: Le32,
409 offset2: Le32,
410 }
411
412 #[cfg(feature = "minigbm")]
413 unsafe impl DataInit for CtrlVfdNewDmabuf {}
414
415 #[repr(C)]
416 #[derive(Copy, Clone, Default)]
417 #[cfg(feature = "minigbm")]
418 struct CtrlVfdDmabufSync {
419 hdr: CtrlHeader,
420 id: Le32,
421 flags: Le32,
422 }
423
424 #[cfg(feature = "minigbm")]
425 unsafe impl DataInit for CtrlVfdDmabufSync {}
426
427 #[repr(C)]
428 #[derive(Copy, Clone)]
429 struct CtrlVfdRecv {
430 hdr: CtrlHeader,
431 id: Le32,
432 vfd_count: Le32,
433 }
434
435 unsafe impl DataInit for CtrlVfdRecv {}
436
437 #[repr(C)]
438 #[derive(Copy, Clone, Default)]
439 struct CtrlVfd {
440 hdr: CtrlHeader,
441 id: Le32,
442 }
443
444 unsafe impl DataInit for CtrlVfd {}
445
446 #[repr(C)]
447 #[derive(Copy, Clone, Default)]
448 struct CtrlVfdSend {
449 hdr: CtrlHeader,
450 id: Le32,
451 vfd_count: Le32,
452 // Remainder is an array of vfd_count IDs followed by data.
453 }
454
455 unsafe impl DataInit for CtrlVfdSend {}
456
457 #[repr(C)]
458 #[derive(Copy, Clone, Default)]
459 struct CtrlVfdSendVfd {
460 kind: Le32,
461 id: Le32,
462 }
463
464 unsafe impl DataInit for CtrlVfdSendVfd {}
465
466 #[repr(C)]
467 #[derive(Copy, Clone)]
468 union CtrlVfdSendVfdV2Payload {
469 id: Le32,
470 seqno: Le64,
471 }
472
473 unsafe impl DataInit for CtrlVfdSendVfdV2Payload {}
474
475 #[repr(C)]
476 #[derive(Copy, Clone)]
477 struct CtrlVfdSendVfdV2 {
478 kind: Le32,
479 payload: CtrlVfdSendVfdV2Payload,
480 }
481
482 unsafe impl DataInit for CtrlVfdSendVfdV2 {}
483
484 impl CtrlVfdSendVfdV2 {
id(&self) -> Le32485 fn id(&self) -> Le32 {
486 assert!(
487 self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL
488 || self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
489 );
490 unsafe { self.payload.id }
491 }
seqno(&self) -> Le64492 fn seqno(&self) -> Le64 {
493 assert!(self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE);
494 unsafe { self.payload.seqno }
495 }
496 }
497
498 #[derive(Debug)]
499 #[allow(dead_code)]
500 enum WlResp<'a> {
501 Ok,
502 VfdNew {
503 id: u32,
504 flags: u32,
505 pfn: u64,
506 size: u32,
507 // The VfdNew variant can be either a response or a command depending on this `resp`. This
508 // is important for the `get_code` method.
509 resp: bool,
510 },
511 #[cfg(feature = "minigbm")]
512 VfdNewDmabuf {
513 id: u32,
514 flags: u32,
515 pfn: u64,
516 size: u32,
517 desc: GpuMemoryDesc,
518 },
519 VfdRecv {
520 id: u32,
521 data: &'a [u8],
522 vfds: &'a [u32],
523 },
524 VfdHup {
525 id: u32,
526 },
527 Err(Box<dyn StdError>),
528 OutOfMemory,
529 InvalidId,
530 InvalidType,
531 InvalidFlags,
532 InvalidCommand,
533 }
534
535 impl<'a> WlResp<'a> {
get_code(&self) -> u32536 fn get_code(&self) -> u32 {
537 match *self {
538 WlResp::Ok => VIRTIO_WL_RESP_OK,
539 WlResp::VfdNew { resp, .. } => {
540 if resp {
541 VIRTIO_WL_RESP_VFD_NEW
542 } else {
543 VIRTIO_WL_CMD_VFD_NEW
544 }
545 }
546 #[cfg(feature = "minigbm")]
547 WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
548 WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
549 WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
550 WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
551 WlResp::OutOfMemory => VIRTIO_WL_RESP_OUT_OF_MEMORY,
552 WlResp::InvalidId => VIRTIO_WL_RESP_INVALID_ID,
553 WlResp::InvalidType => VIRTIO_WL_RESP_INVALID_TYPE,
554 WlResp::InvalidFlags => VIRTIO_WL_RESP_INVALID_FLAGS,
555 WlResp::InvalidCommand => VIRTIO_WL_RESP_INVALID_CMD,
556 }
557 }
558 }
559
560 #[derive(Default)]
561 struct WlVfd {
562 socket: Option<UnixStream>,
563 guest_shared_memory: Option<SharedMemory>,
564 remote_pipe: Option<File>,
565 local_pipe: Option<(u32 /* flags */, File)>,
566 slot: Option<(MemSlot, u64 /* pfn */, VmRequester)>,
567 #[cfg(feature = "minigbm")]
568 is_dmabuf: bool,
569 }
570
571 impl fmt::Debug for WlVfd {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result572 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
573 write!(f, "WlVfd {{")?;
574 if let Some(s) = &self.socket {
575 write!(f, " socket: {}", s.as_raw_descriptor())?;
576 }
577 if let Some((slot, pfn, _)) = &self.slot {
578 write!(f, " slot: {} pfn: {}", slot, pfn)?;
579 }
580 if let Some(s) = &self.remote_pipe {
581 write!(f, " remote: {}", s.as_raw_descriptor())?;
582 }
583 if let Some((_, s)) = &self.local_pipe {
584 write!(f, " local: {}", s.as_raw_descriptor())?;
585 }
586 write!(f, " }}")
587 }
588 }
589
590 impl WlVfd {
connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd>591 fn connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd> {
592 let socket = UnixStream::connect(path).map_err(WlError::SocketConnect)?;
593 let mut vfd = WlVfd::default();
594 vfd.socket = Some(socket);
595 Ok(vfd)
596 }
597
allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd>598 fn allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd> {
599 let size_page_aligned = round_up_to_page_size(size as usize) as u64;
600 let vfd_shm =
601 SharedMemory::named("virtwl_alloc", size_page_aligned).map_err(WlError::NewAlloc)?;
602
603 let register_request = VmMemoryRequest::RegisterMemory(vfd_shm);
604 let register_response = vm.request(®ister_request)?;
605 match register_response {
606 VmMemoryResponse::RegisterMemory { pfn, slot } => {
607 let mut vfd = WlVfd::default();
608 let vfd_shm = match register_request {
609 VmMemoryRequest::RegisterMemory(shm) => shm,
610 _ => unreachable!(),
611 };
612 vfd.guest_shared_memory = Some(vfd_shm);
613 vfd.slot = Some((slot, pfn, vm));
614 Ok(vfd)
615 }
616 _ => Err(WlError::VmBadResponse),
617 }
618 }
619
620 #[cfg(feature = "minigbm")]
dmabuf( vm: VmRequester, width: u32, height: u32, format: u32, ) -> WlResult<(WlVfd, GpuMemoryDesc)>621 fn dmabuf(
622 vm: VmRequester,
623 width: u32,
624 height: u32,
625 format: u32,
626 ) -> WlResult<(WlVfd, GpuMemoryDesc)> {
627 let allocate_and_register_gpu_memory_response =
628 vm.request(&VmMemoryRequest::AllocateAndRegisterGpuMemory {
629 width,
630 height,
631 format,
632 })?;
633 match allocate_and_register_gpu_memory_response {
634 VmMemoryResponse::AllocateAndRegisterGpuMemory {
635 descriptor,
636 pfn,
637 slot,
638 desc,
639 } => {
640 let mut vfd = WlVfd::default();
641 let vfd_shm =
642 SharedMemory::from_safe_descriptor(descriptor).map_err(WlError::NewAlloc)?;
643 vfd.guest_shared_memory = Some(vfd_shm);
644 vfd.slot = Some((slot, pfn, vm));
645 vfd.is_dmabuf = true;
646 Ok((vfd, desc))
647 }
648 _ => Err(WlError::VmBadResponse),
649 }
650 }
651
652 #[cfg(feature = "minigbm")]
dmabuf_sync(&self, flags: u32) -> WlResult<()>653 fn dmabuf_sync(&self, flags: u32) -> WlResult<()> {
654 if !self.is_dmabuf {
655 return Err(WlError::DmabufSync(io::Error::from_raw_os_error(EINVAL)));
656 }
657
658 match &self.guest_shared_memory {
659 Some(descriptor) => {
660 let sync = dma_buf_sync {
661 flags: flags as u64,
662 };
663 // Safe as descriptor is a valid dmabuf and incorrect flags will return an error.
664 if unsafe { ioctl_with_ref(descriptor, DMA_BUF_IOCTL_SYNC(), &sync) } < 0 {
665 Err(WlError::DmabufSync(io::Error::last_os_error()))
666 } else {
667 Ok(())
668 }
669 }
670 None => Err(WlError::DmabufSync(io::Error::from_raw_os_error(EBADF))),
671 }
672 }
673
pipe_remote_read_local_write() -> WlResult<WlVfd>674 fn pipe_remote_read_local_write() -> WlResult<WlVfd> {
675 let (read_pipe, write_pipe) = pipe(true).map_err(WlError::NewPipe)?;
676 let mut vfd = WlVfd::default();
677 vfd.remote_pipe = Some(read_pipe);
678 vfd.local_pipe = Some((VIRTIO_WL_VFD_WRITE, write_pipe));
679 Ok(vfd)
680 }
681
pipe_remote_write_local_read() -> WlResult<WlVfd>682 fn pipe_remote_write_local_read() -> WlResult<WlVfd> {
683 let (read_pipe, write_pipe) = pipe(true).map_err(WlError::NewPipe)?;
684 let mut vfd = WlVfd::default();
685 vfd.remote_pipe = Some(write_pipe);
686 vfd.local_pipe = Some((VIRTIO_WL_VFD_READ, read_pipe));
687 Ok(vfd)
688 }
689
from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd>690 fn from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd> {
691 // We need to determine if the given file is more like shared memory or a pipe/socket. A
692 // quick and easy check is to seek to the end of the file. If it works we assume it's not a
693 // pipe/socket because those have no end. We can even use that seek location as an indicator
694 // for how big the shared memory chunk to map into guest memory is. If seeking to the end
695 // fails, we assume it's a socket or pipe with read/write semantics.
696 match descriptor.seek(SeekFrom::End(0)) {
697 Ok(_) => {
698 let shm = SharedMemory::from_file(descriptor).map_err(WlError::FromSharedMemory)?;
699 let (shm, register_response) = vm.register_memory(shm)?;
700
701 match register_response {
702 VmMemoryResponse::RegisterMemory { pfn, slot } => {
703 let mut vfd = WlVfd::default();
704 vfd.guest_shared_memory = Some(shm);
705 vfd.slot = Some((slot, pfn, vm));
706 Ok(vfd)
707 }
708 _ => Err(WlError::VmBadResponse),
709 }
710 }
711 _ => {
712 let flags = match FileFlags::from_file(&descriptor) {
713 Ok(FileFlags::Read) => VIRTIO_WL_VFD_READ,
714 Ok(FileFlags::Write) => VIRTIO_WL_VFD_WRITE,
715 Ok(FileFlags::ReadWrite) => VIRTIO_WL_VFD_READ | VIRTIO_WL_VFD_WRITE,
716 _ => 0,
717 };
718 let mut vfd = WlVfd::default();
719 vfd.local_pipe = Some((flags, descriptor));
720 Ok(vfd)
721 }
722 }
723 }
724
flags(&self, use_transition_flags: bool) -> u32725 fn flags(&self, use_transition_flags: bool) -> u32 {
726 let mut flags = 0;
727 if use_transition_flags {
728 if self.socket.is_some() {
729 flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
730 }
731 if let Some((f, _)) = self.local_pipe {
732 flags |= f;
733 }
734 } else {
735 if self.socket.is_some() {
736 flags |= VIRTIO_WL_VFD_CONTROL;
737 }
738 if self.slot.is_some() {
739 flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP
740 }
741 }
742 flags
743 }
744
745 // Page frame number in the guest this VFD was mapped at.
pfn(&self) -> Option<u64>746 fn pfn(&self) -> Option<u64> {
747 self.slot.as_ref().map(|s| s.1)
748 }
749
750 // Size in bytes of the shared memory VFD.
size(&self) -> Option<u64>751 fn size(&self) -> Option<u64> {
752 self.guest_shared_memory.as_ref().map(|shm| shm.size())
753 }
754
755 // The FD that gets sent if this VFD is sent over a socket.
send_descriptor(&self) -> Option<RawDescriptor>756 fn send_descriptor(&self) -> Option<RawDescriptor> {
757 self.guest_shared_memory
758 .as_ref()
759 .map(|shm| shm.as_raw_descriptor())
760 .or(self.socket.as_ref().map(|s| s.as_raw_descriptor()))
761 .or(self.remote_pipe.as_ref().map(|p| p.as_raw_descriptor()))
762 }
763
764 // The FD that is used for polling for events on this VFD.
wait_descriptor(&self) -> Option<&dyn AsRawDescriptor>765 fn wait_descriptor(&self) -> Option<&dyn AsRawDescriptor> {
766 self.socket
767 .as_ref()
768 .map(|s| s as &dyn AsRawDescriptor)
769 .or_else(|| {
770 self.local_pipe
771 .as_ref()
772 .map(|(_, p)| p as &dyn AsRawDescriptor)
773 })
774 }
775
776 // Sends data/files from the guest to the host over this VFD.
send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp>777 fn send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp> {
778 if let Some(socket) = &self.socket {
779 socket
780 .send_with_fds(&data.get_remaining(), rds)
781 .map_err(WlError::SendVfd)?;
782 // All remaining data in `data` is now considered consumed.
783 data.consume(::std::usize::MAX);
784 Ok(WlResp::Ok)
785 } else if let Some((_, local_pipe)) = &mut self.local_pipe {
786 // Impossible to send descriptors over a simple pipe.
787 if !rds.is_empty() {
788 return Ok(WlResp::InvalidType);
789 }
790 data.read_to(local_pipe, usize::max_value())
791 .map_err(WlError::WritePipe)?;
792 Ok(WlResp::Ok)
793 } else {
794 Ok(WlResp::InvalidType)
795 }
796 }
797
798 // Receives data/files from the host for this VFD and queues it for the guest.
recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>>799 fn recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>> {
800 if let Some(socket) = self.socket.take() {
801 let mut buf = vec![0; IN_BUFFER_LEN];
802 let mut fd_buf = [0; VIRTWL_SEND_MAX_ALLOCS];
803 // If any errors happen, the socket will get dropped, preventing more reading.
804 let (len, file_count) = socket
805 .recv_with_fds(&mut buf[..], &mut fd_buf)
806 .map_err(WlError::RecvVfd)?;
807 // If any data gets read, the put the socket back for future recv operations.
808 if len != 0 || file_count != 0 {
809 buf.truncate(len);
810 buf.shrink_to_fit();
811 self.socket = Some(socket);
812 // Safe because the first file_counts fds from recv_with_fds are owned by us and
813 // valid.
814 in_file_queue.extend(
815 fd_buf[..file_count]
816 .iter()
817 .map(|&descriptor| unsafe { File::from_raw_descriptor(descriptor) }),
818 );
819 return Ok(buf);
820 }
821 Ok(Vec::new())
822 } else if let Some((flags, mut local_pipe)) = self.local_pipe.take() {
823 let mut buf = Vec::new();
824 buf.resize(IN_BUFFER_LEN, 0);
825 let len = local_pipe.read(&mut buf[..]).map_err(WlError::ReadPipe)?;
826 if len != 0 {
827 buf.truncate(len);
828 buf.shrink_to_fit();
829 self.local_pipe = Some((flags, local_pipe));
830 return Ok(buf);
831 }
832 Ok(Vec::new())
833 } else {
834 Ok(Vec::new())
835 }
836 }
837
838 // Called after this VFD is sent over a socket to ensure the local end of the VFD receives hang
839 // up events.
close_remote(&mut self)840 fn close_remote(&mut self) {
841 self.remote_pipe = None;
842 }
843
close(&mut self) -> WlResult<()>844 fn close(&mut self) -> WlResult<()> {
845 if let Some((slot, _, vm)) = self.slot.take() {
846 vm.request(&VmMemoryRequest::UnregisterMemory(slot))?;
847 }
848 self.socket = None;
849 self.remote_pipe = None;
850 self.local_pipe = None;
851 Ok(())
852 }
853 }
854
855 impl Drop for WlVfd {
drop(&mut self)856 fn drop(&mut self) {
857 let _ = self.close();
858 }
859 }
860
861 #[derive(Debug)]
862 enum WlRecv {
863 Vfd { id: u32 },
864 Data { buf: Vec<u8> },
865 Hup,
866 }
867
868 struct WlState {
869 wayland_paths: Map<String, PathBuf>,
870 vm: VmRequester,
871 resource_bridge: Option<Tube>,
872 use_transition_flags: bool,
873 wait_ctx: WaitContext<u32>,
874 vfds: Map<u32, WlVfd>,
875 next_vfd_id: u32,
876 in_file_queue: Vec<File>,
877 in_queue: VecDeque<(u32 /* vfd_id */, WlRecv)>,
878 current_recv_vfd: Option<u32>,
879 recv_vfds: Vec<u32>,
880 #[cfg(feature = "gpu")]
881 signaled_fence: Option<SafeDescriptor>,
882 use_send_vfd_v2: bool,
883 }
884
885 impl WlState {
new( wayland_paths: Map<String, PathBuf>, vm_tube: Tube, use_transition_flags: bool, use_send_vfd_v2: bool, resource_bridge: Option<Tube>, ) -> WlState886 fn new(
887 wayland_paths: Map<String, PathBuf>,
888 vm_tube: Tube,
889 use_transition_flags: bool,
890 use_send_vfd_v2: bool,
891 resource_bridge: Option<Tube>,
892 ) -> WlState {
893 WlState {
894 wayland_paths,
895 vm: VmRequester::new(vm_tube),
896 resource_bridge,
897 wait_ctx: WaitContext::new().expect("failed to create WaitContext"),
898 use_transition_flags,
899 vfds: Map::new(),
900 next_vfd_id: NEXT_VFD_ID_BASE,
901 in_file_queue: Vec::new(),
902 in_queue: VecDeque::new(),
903 current_recv_vfd: None,
904 recv_vfds: Vec::new(),
905 #[cfg(feature = "gpu")]
906 signaled_fence: None,
907 use_send_vfd_v2,
908 }
909 }
910
new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp>911 fn new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp> {
912 if id & VFD_ID_HOST_MASK != 0 {
913 return Ok(WlResp::InvalidId);
914 }
915
916 if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ) != 0 {
917 return Ok(WlResp::InvalidFlags);
918 }
919
920 if flags & VIRTIO_WL_VFD_WRITE != 0 && flags & VIRTIO_WL_VFD_READ != 0 {
921 return Ok(WlResp::InvalidFlags);
922 }
923
924 match self.vfds.entry(id) {
925 Entry::Vacant(entry) => {
926 let vfd = if flags & VIRTIO_WL_VFD_WRITE != 0 {
927 WlVfd::pipe_remote_read_local_write()?
928 } else if flags & VIRTIO_WL_VFD_READ != 0 {
929 WlVfd::pipe_remote_write_local_read()?
930 } else {
931 return Ok(WlResp::InvalidFlags);
932 };
933 self.wait_ctx
934 .add(vfd.wait_descriptor().unwrap(), id)
935 .map_err(WlError::WaitContextAdd)?;
936 let resp = WlResp::VfdNew {
937 id,
938 flags: 0,
939 pfn: 0,
940 size: 0,
941 resp: true,
942 };
943 entry.insert(vfd);
944 Ok(resp)
945 }
946 Entry::Occupied(_) => Ok(WlResp::InvalidId),
947 }
948 }
949
new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp>950 fn new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp> {
951 if id & VFD_ID_HOST_MASK != 0 {
952 return Ok(WlResp::InvalidId);
953 }
954
955 if self.use_transition_flags {
956 if flags != 0 {
957 return Ok(WlResp::InvalidFlags);
958 }
959 } else if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP) != 0 {
960 return Ok(WlResp::Err(Box::from("invalid flags")));
961 }
962
963 match self.vfds.entry(id) {
964 Entry::Vacant(entry) => {
965 let vfd = WlVfd::allocate(self.vm.clone(), size as u64)?;
966 let resp = WlResp::VfdNew {
967 id,
968 flags,
969 pfn: vfd.pfn().unwrap_or_default(),
970 size: vfd.size().unwrap_or_default() as u32,
971 resp: true,
972 };
973 entry.insert(vfd);
974 Ok(resp)
975 }
976 Entry::Occupied(_) => Ok(WlResp::InvalidId),
977 }
978 }
979
980 #[cfg(feature = "minigbm")]
new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp>981 fn new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp> {
982 if id & VFD_ID_HOST_MASK != 0 {
983 return Ok(WlResp::InvalidId);
984 }
985
986 match self.vfds.entry(id) {
987 Entry::Vacant(entry) => {
988 let (vfd, desc) = WlVfd::dmabuf(self.vm.clone(), width, height, format)?;
989 let resp = WlResp::VfdNewDmabuf {
990 id,
991 flags: 0,
992 pfn: vfd.pfn().unwrap_or_default(),
993 size: vfd.size().unwrap_or_default() as u32,
994 desc,
995 };
996 entry.insert(vfd);
997 Ok(resp)
998 }
999 Entry::Occupied(_) => Ok(WlResp::InvalidId),
1000 }
1001 }
1002
1003 #[cfg(feature = "minigbm")]
dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp>1004 fn dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp> {
1005 if flags & !(VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK) != 0 {
1006 return Ok(WlResp::InvalidFlags);
1007 }
1008
1009 match self.vfds.get_mut(&vfd_id) {
1010 Some(vfd) => {
1011 vfd.dmabuf_sync(flags)?;
1012 Ok(WlResp::Ok)
1013 }
1014 None => Ok(WlResp::InvalidId),
1015 }
1016 }
1017
new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp>1018 fn new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp> {
1019 if id & VFD_ID_HOST_MASK != 0 {
1020 return Ok(WlResp::InvalidId);
1021 }
1022
1023 let flags = if self.use_transition_flags {
1024 VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ
1025 } else {
1026 VIRTIO_WL_VFD_CONTROL
1027 };
1028
1029 match self.vfds.entry(id) {
1030 Entry::Vacant(entry) => {
1031 let vfd = entry.insert(WlVfd::connect(
1032 &self
1033 .wayland_paths
1034 .get(name)
1035 .ok_or_else(|| WlError::UnknownSocketName(name.to_string()))?,
1036 )?);
1037 self.wait_ctx
1038 .add(vfd.wait_descriptor().unwrap(), id)
1039 .map_err(WlError::WaitContextAdd)?;
1040 Ok(WlResp::VfdNew {
1041 id,
1042 flags,
1043 pfn: 0,
1044 size: 0,
1045 resp: true,
1046 })
1047 }
1048 Entry::Occupied(_) => Ok(WlResp::InvalidId),
1049 }
1050 }
1051
process_wait_context(&mut self)1052 fn process_wait_context(&mut self) {
1053 let events = match self.wait_ctx.wait_timeout(Duration::from_secs(0)) {
1054 Ok(v) => v,
1055 Err(e) => {
1056 error!("failed polling for vfd evens: {}", e);
1057 return;
1058 }
1059 };
1060
1061 for event in events.iter().filter(|e| e.is_readable) {
1062 if let Err(e) = self.recv(event.token) {
1063 error!("failed to recv from vfd: {}", e)
1064 }
1065 }
1066
1067 for event in events.iter().filter(|e| e.is_hungup) {
1068 if !event.is_readable {
1069 let vfd_id = event.token;
1070 if let Some(descriptor) =
1071 self.vfds.get(&vfd_id).and_then(|vfd| vfd.wait_descriptor())
1072 {
1073 if let Err(e) = self.wait_ctx.delete(descriptor) {
1074 warn!("failed to remove hungup vfd from poll context: {}", e);
1075 }
1076 }
1077 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1078 }
1079 }
1080 }
1081
close(&mut self, vfd_id: u32) -> WlResult<WlResp>1082 fn close(&mut self, vfd_id: u32) -> WlResult<WlResp> {
1083 let mut to_delete = Set::new();
1084 for (dest_vfd_id, q) in &self.in_queue {
1085 if *dest_vfd_id == vfd_id {
1086 if let WlRecv::Vfd { id } = q {
1087 to_delete.insert(*id);
1088 }
1089 }
1090 }
1091 for vfd_id in to_delete {
1092 // Sorry sub-error, we can't have cascading errors leaving us in an inconsistent state.
1093 let _ = self.close(vfd_id);
1094 }
1095 match self.vfds.remove(&vfd_id) {
1096 Some(mut vfd) => {
1097 self.in_queue.retain(|&(id, _)| id != vfd_id);
1098 vfd.close()?;
1099 Ok(WlResp::Ok)
1100 }
1101 None => Ok(WlResp::InvalidId),
1102 }
1103 }
1104
1105 #[cfg(feature = "gpu")]
get_info(&mut self, request: ResourceRequest) -> Option<File>1106 fn get_info(&mut self, request: ResourceRequest) -> Option<File> {
1107 let sock = self.resource_bridge.as_ref().unwrap();
1108 match get_resource_info(sock, request) {
1109 Ok(ResourceInfo::Buffer(BufferInfo { file, .. })) => Some(file),
1110 Ok(ResourceInfo::Fence { file }) => Some(file),
1111 Err(ResourceBridgeError::InvalidResource(req)) => {
1112 warn!("attempt to send non-existent gpu resource {}", req);
1113 None
1114 }
1115 Err(e) => {
1116 error!("{}", e);
1117 // If there was an error with the resource bridge, it can no longer be
1118 // trusted to continue to function.
1119 self.resource_bridge = None;
1120 None
1121 }
1122 }
1123 }
1124
send( &mut self, vfd_id: u32, vfd_count: usize, foreign_id: bool, reader: &mut Reader, ) -> WlResult<WlResp>1125 fn send(
1126 &mut self,
1127 vfd_id: u32,
1128 vfd_count: usize,
1129 foreign_id: bool,
1130 reader: &mut Reader,
1131 ) -> WlResult<WlResp> {
1132 // First stage gathers and normalizes all id information from guest memory.
1133 let mut send_vfd_ids = [CtrlVfdSendVfdV2 {
1134 kind: Le32::from(0),
1135 payload: CtrlVfdSendVfdV2Payload { id: Le32::from(0) },
1136 }; VIRTWL_SEND_MAX_ALLOCS];
1137 for vfd_id in send_vfd_ids.iter_mut().take(vfd_count) {
1138 *vfd_id = if foreign_id {
1139 if self.use_send_vfd_v2 {
1140 reader.read_obj().map_err(WlError::ParseDesc)?
1141 } else {
1142 let vfd: CtrlVfdSendVfd = reader.read_obj().map_err(WlError::ParseDesc)?;
1143 CtrlVfdSendVfdV2 {
1144 kind: vfd.kind,
1145 payload: CtrlVfdSendVfdV2Payload { id: vfd.id },
1146 }
1147 }
1148 } else {
1149 CtrlVfdSendVfdV2 {
1150 kind: Le32::from(VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL),
1151 payload: CtrlVfdSendVfdV2Payload {
1152 id: reader.read_obj().map_err(WlError::ParseDesc)?,
1153 },
1154 }
1155 };
1156 }
1157
1158 // Next stage collects corresponding file descriptors for each id.
1159 let mut rds = [0; VIRTWL_SEND_MAX_ALLOCS];
1160 #[cfg(feature = "gpu")]
1161 let mut bridged_files = Vec::new();
1162 for (&send_vfd_id, descriptor) in send_vfd_ids[..vfd_count].iter().zip(rds.iter_mut()) {
1163 match send_vfd_id.kind.to_native() {
1164 VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL => {
1165 match self.vfds.get(&send_vfd_id.id().to_native()) {
1166 Some(vfd) => match vfd.send_descriptor() {
1167 Some(vfd_fd) => *descriptor = vfd_fd,
1168 None => return Ok(WlResp::InvalidType),
1169 },
1170 None => {
1171 warn!(
1172 "attempt to send non-existant vfd 0x{:08x}",
1173 send_vfd_id.id().to_native()
1174 );
1175 return Ok(WlResp::InvalidId);
1176 }
1177 }
1178 }
1179 #[cfg(feature = "gpu")]
1180 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU if self.resource_bridge.is_some() => {
1181 match self.get_info(ResourceRequest::GetBuffer {
1182 id: send_vfd_id.id().to_native(),
1183 }) {
1184 Some(file) => {
1185 *descriptor = file.as_raw_descriptor();
1186 bridged_files.push(file);
1187 }
1188 None => return Ok(WlResp::InvalidId),
1189 }
1190 }
1191 #[cfg(feature = "gpu")]
1192 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE if self.resource_bridge.is_some() => {
1193 match self.get_info(ResourceRequest::GetFence {
1194 seqno: send_vfd_id.seqno().to_native(),
1195 }) {
1196 Some(file) => {
1197 *descriptor = file.as_raw_descriptor();
1198 bridged_files.push(file);
1199 }
1200 None => return Ok(WlResp::InvalidId),
1201 }
1202 }
1203 #[cfg(feature = "gpu")]
1204 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE
1205 if self.resource_bridge.is_some() =>
1206 {
1207 if self.signaled_fence.is_none() {
1208 // If the guest is sending a signaled fence, we know a fence
1209 // with seqno 0 must already be signaled.
1210 match self.get_info(ResourceRequest::GetFence { seqno: 0 }) {
1211 Some(file) => {
1212 // Safe since get_info returned a valid File.
1213 let safe_descriptor = unsafe {
1214 SafeDescriptor::from_raw_descriptor(file.into_raw_descriptor())
1215 };
1216 self.signaled_fence = Some(safe_descriptor)
1217 }
1218 None => return Ok(WlResp::InvalidId),
1219 }
1220 }
1221 match self.signaled_fence.as_ref().unwrap().try_clone() {
1222 Ok(dup) => {
1223 *descriptor = dup.into_raw_descriptor();
1224 // Safe because the fd comes from a valid SafeDescriptor.
1225 let file = unsafe { File::from_raw_descriptor(*descriptor) };
1226 bridged_files.push(file);
1227 }
1228 Err(_) => return Ok(WlResp::InvalidId),
1229 }
1230 }
1231 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
1232 | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE
1233 | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE => {
1234 let _ = self.resource_bridge.as_ref();
1235 warn!("attempt to send foreign resource kind but feature is disabled");
1236 }
1237 kind => {
1238 warn!("attempt to send unknown foreign resource kind: {}", kind);
1239 return Ok(WlResp::InvalidId);
1240 }
1241 }
1242 }
1243
1244 // Final stage sends file descriptors and data to the target vfd's socket.
1245 match self.vfds.get_mut(&vfd_id) {
1246 Some(vfd) => match vfd.send(&rds[..vfd_count], reader)? {
1247 WlResp::Ok => {}
1248 _ => return Ok(WlResp::InvalidType),
1249 },
1250 None => return Ok(WlResp::InvalidId),
1251 }
1252 // The vfds with remote FDs need to be closed so that the local side can receive
1253 // hangup events.
1254 for &send_vfd_id in &send_vfd_ids[..vfd_count] {
1255 if send_vfd_id.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL {
1256 if let Some(vfd) = self.vfds.get_mut(&send_vfd_id.id().into()) {
1257 vfd.close_remote();
1258 }
1259 }
1260 }
1261 Ok(WlResp::Ok)
1262 }
1263
recv(&mut self, vfd_id: u32) -> WlResult<()>1264 fn recv(&mut self, vfd_id: u32) -> WlResult<()> {
1265 let buf = match self.vfds.get_mut(&vfd_id) {
1266 Some(vfd) => vfd.recv(&mut self.in_file_queue)?,
1267 None => return Ok(()),
1268 };
1269 if self.in_file_queue.is_empty() && buf.is_empty() {
1270 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1271 return Ok(());
1272 }
1273 for file in self.in_file_queue.drain(..) {
1274 let vfd = WlVfd::from_file(self.vm.clone(), file)?;
1275 if let Some(wait_descriptor) = vfd.wait_descriptor() {
1276 self.wait_ctx
1277 .add(wait_descriptor, self.next_vfd_id)
1278 .map_err(WlError::WaitContextAdd)?;
1279 }
1280 self.vfds.insert(self.next_vfd_id, vfd);
1281 self.in_queue.push_back((
1282 vfd_id,
1283 WlRecv::Vfd {
1284 id: self.next_vfd_id,
1285 },
1286 ));
1287 self.next_vfd_id += 1;
1288 }
1289 self.in_queue.push_back((vfd_id, WlRecv::Data { buf }));
1290
1291 Ok(())
1292 }
1293
execute(&mut self, reader: &mut Reader) -> WlResult<WlResp>1294 fn execute(&mut self, reader: &mut Reader) -> WlResult<WlResp> {
1295 let type_ = {
1296 let mut type_reader = reader.clone();
1297 type_reader.read_obj::<Le32>().map_err(WlError::ParseDesc)?
1298 };
1299 match type_.into() {
1300 VIRTIO_WL_CMD_VFD_NEW => {
1301 let ctrl = reader
1302 .read_obj::<CtrlVfdNew>()
1303 .map_err(WlError::ParseDesc)?;
1304 self.new_alloc(ctrl.id.into(), ctrl.flags.into(), ctrl.size.into())
1305 }
1306 VIRTIO_WL_CMD_VFD_CLOSE => {
1307 let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1308 self.close(ctrl.id.into())
1309 }
1310 VIRTIO_WL_CMD_VFD_SEND => {
1311 let ctrl = reader
1312 .read_obj::<CtrlVfdSend>()
1313 .map_err(WlError::ParseDesc)?;
1314 let foreign_id = false;
1315 self.send(
1316 ctrl.id.into(),
1317 ctrl.vfd_count.to_native() as usize,
1318 foreign_id,
1319 reader,
1320 )
1321 }
1322 #[cfg(feature = "gpu")]
1323 VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID => {
1324 let ctrl = reader
1325 .read_obj::<CtrlVfdSend>()
1326 .map_err(WlError::ParseDesc)?;
1327 let foreign_id = true;
1328 self.send(
1329 ctrl.id.into(),
1330 ctrl.vfd_count.to_native() as usize,
1331 foreign_id,
1332 reader,
1333 )
1334 }
1335 VIRTIO_WL_CMD_VFD_NEW_CTX => {
1336 let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1337 self.new_context(ctrl.id.into(), "")
1338 }
1339 VIRTIO_WL_CMD_VFD_NEW_PIPE => {
1340 let ctrl = reader
1341 .read_obj::<CtrlVfdNew>()
1342 .map_err(WlError::ParseDesc)?;
1343 self.new_pipe(ctrl.id.into(), ctrl.flags.into())
1344 }
1345 #[cfg(feature = "minigbm")]
1346 VIRTIO_WL_CMD_VFD_NEW_DMABUF => {
1347 let ctrl = reader
1348 .read_obj::<CtrlVfdNewDmabuf>()
1349 .map_err(WlError::ParseDesc)?;
1350 self.new_dmabuf(
1351 ctrl.id.into(),
1352 ctrl.width.into(),
1353 ctrl.height.into(),
1354 ctrl.format.into(),
1355 )
1356 }
1357 #[cfg(feature = "minigbm")]
1358 VIRTIO_WL_CMD_VFD_DMABUF_SYNC => {
1359 let ctrl = reader
1360 .read_obj::<CtrlVfdDmabufSync>()
1361 .map_err(WlError::ParseDesc)?;
1362 self.dmabuf_sync(ctrl.id.into(), ctrl.flags.into())
1363 }
1364 VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED => {
1365 let ctrl = reader
1366 .read_obj::<CtrlVfdNewCtxNamed>()
1367 .map_err(WlError::ParseDesc)?;
1368 let name_len = ctrl
1369 .name
1370 .iter()
1371 .position(|x| x == &0)
1372 .unwrap_or(ctrl.name.len());
1373 let name =
1374 std::str::from_utf8(&ctrl.name[..name_len]).map_err(WlError::InvalidString)?;
1375 self.new_context(ctrl.id.into(), name)
1376 }
1377 op_type => {
1378 warn!("unexpected command {}", op_type);
1379 Ok(WlResp::InvalidCommand)
1380 }
1381 }
1382 }
1383
next_recv(&self) -> Option<WlResp>1384 fn next_recv(&self) -> Option<WlResp> {
1385 if let Some(q) = self.in_queue.front() {
1386 match *q {
1387 (vfd_id, WlRecv::Vfd { id }) => {
1388 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1389 match self.vfds.get(&id) {
1390 Some(vfd) => Some(WlResp::VfdNew {
1391 id,
1392 flags: vfd.flags(self.use_transition_flags),
1393 pfn: vfd.pfn().unwrap_or_default(),
1394 size: vfd.size().unwrap_or_default() as u32,
1395 resp: false,
1396 }),
1397 _ => Some(WlResp::VfdNew {
1398 id,
1399 flags: 0,
1400 pfn: 0,
1401 size: 0,
1402 resp: false,
1403 }),
1404 }
1405 } else {
1406 Some(WlResp::VfdRecv {
1407 id: self.current_recv_vfd.unwrap(),
1408 data: &[],
1409 vfds: &self.recv_vfds[..],
1410 })
1411 }
1412 }
1413 (vfd_id, WlRecv::Data { ref buf }) => {
1414 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1415 Some(WlResp::VfdRecv {
1416 id: vfd_id,
1417 data: &buf[..],
1418 vfds: &self.recv_vfds[..],
1419 })
1420 } else {
1421 Some(WlResp::VfdRecv {
1422 id: self.current_recv_vfd.unwrap(),
1423 data: &[],
1424 vfds: &self.recv_vfds[..],
1425 })
1426 }
1427 }
1428 (vfd_id, WlRecv::Hup) => Some(WlResp::VfdHup { id: vfd_id }),
1429 }
1430 } else {
1431 None
1432 }
1433 }
1434
pop_recv(&mut self)1435 fn pop_recv(&mut self) {
1436 if let Some(q) = self.in_queue.front() {
1437 match *q {
1438 (vfd_id, WlRecv::Vfd { id }) => {
1439 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1440 self.recv_vfds.push(id);
1441 self.current_recv_vfd = Some(vfd_id);
1442 } else {
1443 self.recv_vfds.clear();
1444 self.current_recv_vfd = None;
1445 return;
1446 }
1447 }
1448 (vfd_id, WlRecv::Data { .. }) => {
1449 self.recv_vfds.clear();
1450 self.current_recv_vfd = None;
1451 if !(self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id)) {
1452 return;
1453 }
1454 }
1455 (_, WlRecv::Hup) => {
1456 self.recv_vfds.clear();
1457 self.current_recv_vfd = None;
1458 }
1459 }
1460 }
1461 self.in_queue.pop_front();
1462 }
1463 }
1464
1465 struct Worker {
1466 interrupt: Interrupt,
1467 mem: GuestMemory,
1468 in_queue: Queue,
1469 out_queue: Queue,
1470 state: WlState,
1471 }
1472
1473 impl Worker {
new( mem: GuestMemory, interrupt: Interrupt, in_queue: Queue, out_queue: Queue, wayland_paths: Map<String, PathBuf>, vm_tube: Tube, use_transition_flags: bool, use_send_vfd_v2: bool, resource_bridge: Option<Tube>, ) -> Worker1474 fn new(
1475 mem: GuestMemory,
1476 interrupt: Interrupt,
1477 in_queue: Queue,
1478 out_queue: Queue,
1479 wayland_paths: Map<String, PathBuf>,
1480 vm_tube: Tube,
1481 use_transition_flags: bool,
1482 use_send_vfd_v2: bool,
1483 resource_bridge: Option<Tube>,
1484 ) -> Worker {
1485 Worker {
1486 interrupt,
1487 mem,
1488 in_queue,
1489 out_queue,
1490 state: WlState::new(
1491 wayland_paths,
1492 vm_tube,
1493 use_transition_flags,
1494 use_send_vfd_v2,
1495 resource_bridge,
1496 ),
1497 }
1498 }
1499
run(&mut self, mut queue_evts: Vec<Event>, kill_evt: Event)1500 fn run(&mut self, mut queue_evts: Vec<Event>, kill_evt: Event) {
1501 let mut in_desc_chains: VecDeque<DescriptorChain> =
1502 VecDeque::with_capacity(QUEUE_SIZE as usize);
1503 let in_queue_evt = queue_evts.remove(0);
1504 let out_queue_evt = queue_evts.remove(0);
1505 #[derive(PollToken)]
1506 enum Token {
1507 InQueue,
1508 OutQueue,
1509 Kill,
1510 State,
1511 InterruptResample,
1512 }
1513
1514 let wait_ctx: WaitContext<Token> = match WaitContext::build_with(&[
1515 (&in_queue_evt, Token::InQueue),
1516 (&out_queue_evt, Token::OutQueue),
1517 (&kill_evt, Token::Kill),
1518 (&self.state.wait_ctx, Token::State),
1519 ]) {
1520 Ok(pc) => pc,
1521 Err(e) => {
1522 error!("failed creating WaitContext: {}", e);
1523 return;
1524 }
1525 };
1526 if let Some(resample_evt) = self.interrupt.get_resample_evt() {
1527 if wait_ctx
1528 .add(resample_evt, Token::InterruptResample)
1529 .is_err()
1530 {
1531 error!("failed adding resample event to WaitContext.");
1532 return;
1533 }
1534 }
1535
1536 'wait: loop {
1537 let mut signal_used_in = false;
1538 let mut signal_used_out = false;
1539 let events = match wait_ctx.wait() {
1540 Ok(v) => v,
1541 Err(e) => {
1542 error!("failed polling for events: {}", e);
1543 break;
1544 }
1545 };
1546
1547 for event in &events {
1548 match event.token {
1549 Token::InQueue => {
1550 let _ = in_queue_evt.read();
1551 // Used to buffer descriptor indexes that are invalid for our uses.
1552 let mut rejects = [0u16; QUEUE_SIZE as usize];
1553 let mut rejects_len = 0;
1554 let min_in_desc_len = (size_of::<CtrlVfdRecv>()
1555 + size_of::<Le32>() * VIRTWL_SEND_MAX_ALLOCS)
1556 as u32;
1557 in_desc_chains.extend(self.in_queue.iter(&self.mem).filter(|d| {
1558 if d.len >= min_in_desc_len && d.is_write_only() {
1559 true
1560 } else {
1561 // Can not use queue.add_used directly because it's being borrowed
1562 // for the iterator chain, so we buffer the descriptor index in
1563 // rejects.
1564 rejects[rejects_len] = d.index;
1565 rejects_len += 1;
1566 false
1567 }
1568 }));
1569 for &reject in &rejects[..rejects_len] {
1570 signal_used_in = true;
1571 self.in_queue.add_used(&self.mem, reject, 0);
1572 }
1573 }
1574 Token::OutQueue => {
1575 let _ = out_queue_evt.read();
1576 while let Some(desc) = self.out_queue.pop(&self.mem) {
1577 let desc_index = desc.index;
1578 match (
1579 Reader::new(self.mem.clone(), desc.clone()),
1580 Writer::new(self.mem.clone(), desc),
1581 ) {
1582 (Ok(mut reader), Ok(mut writer)) => {
1583 let resp = match self.state.execute(&mut reader) {
1584 Ok(r) => r,
1585 Err(e) => WlResp::Err(Box::new(e)),
1586 };
1587
1588 match encode_resp(&mut writer, resp) {
1589 Ok(()) => {}
1590 Err(e) => {
1591 error!(
1592 "failed to encode response to descriptor chain: {}",
1593 e
1594 );
1595 }
1596 }
1597
1598 self.out_queue.add_used(
1599 &self.mem,
1600 desc_index,
1601 writer.bytes_written() as u32,
1602 );
1603 signal_used_out = true;
1604 }
1605 (_, Err(e)) | (Err(e), _) => {
1606 error!("invalid descriptor: {}", e);
1607 self.out_queue.add_used(&self.mem, desc_index, 0);
1608 signal_used_out = true;
1609 }
1610 }
1611 }
1612 }
1613 Token::Kill => break 'wait,
1614 Token::State => self.state.process_wait_context(),
1615 Token::InterruptResample => {
1616 self.interrupt.interrupt_resample();
1617 }
1618 }
1619 }
1620
1621 // Because this loop should be retried after the in queue is usable or after one of the
1622 // VFDs was read, we do it after the poll event responses.
1623 while !in_desc_chains.is_empty() {
1624 let mut should_pop = false;
1625 if let Some(in_resp) = self.state.next_recv() {
1626 // in_desc_chains is not empty (checked by loop condition) so unwrap is safe.
1627 let desc = in_desc_chains.pop_front().unwrap();
1628 let index = desc.index;
1629 match Writer::new(self.mem.clone(), desc) {
1630 Ok(mut writer) => {
1631 match encode_resp(&mut writer, in_resp) {
1632 Ok(()) => {
1633 should_pop = true;
1634 }
1635 Err(e) => {
1636 error!("failed to encode response to descriptor chain: {}", e);
1637 }
1638 };
1639 signal_used_in = true;
1640 self.in_queue
1641 .add_used(&self.mem, index, writer.bytes_written() as u32);
1642 }
1643 Err(e) => {
1644 error!("invalid descriptor: {}", e);
1645 self.in_queue.add_used(&self.mem, index, 0);
1646 signal_used_in = true;
1647 }
1648 }
1649 } else {
1650 break;
1651 }
1652 if should_pop {
1653 self.state.pop_recv();
1654 }
1655 }
1656
1657 if signal_used_in {
1658 self.interrupt.signal_used_queue(self.in_queue.vector);
1659 }
1660
1661 if signal_used_out {
1662 self.interrupt.signal_used_queue(self.out_queue.vector);
1663 }
1664 }
1665 }
1666 }
1667
1668 pub struct Wl {
1669 kill_evt: Option<Event>,
1670 worker_thread: Option<thread::JoinHandle<()>>,
1671 wayland_paths: Map<String, PathBuf>,
1672 vm_socket: Option<Tube>,
1673 resource_bridge: Option<Tube>,
1674 use_transition_flags: bool,
1675 use_send_vfd_v2: bool,
1676 base_features: u64,
1677 }
1678
1679 impl Wl {
new( base_features: u64, wayland_paths: Map<String, PathBuf>, vm_tube: Tube, resource_bridge: Option<Tube>, ) -> Result<Wl>1680 pub fn new(
1681 base_features: u64,
1682 wayland_paths: Map<String, PathBuf>,
1683 vm_tube: Tube,
1684 resource_bridge: Option<Tube>,
1685 ) -> Result<Wl> {
1686 Ok(Wl {
1687 kill_evt: None,
1688 worker_thread: None,
1689 wayland_paths,
1690 vm_socket: Some(vm_tube),
1691 resource_bridge,
1692 use_transition_flags: false,
1693 use_send_vfd_v2: false,
1694 base_features,
1695 })
1696 }
1697 }
1698
1699 impl Drop for Wl {
drop(&mut self)1700 fn drop(&mut self) {
1701 if let Some(kill_evt) = self.kill_evt.take() {
1702 // Ignore the result because there is nothing we can do about it.
1703 let _ = kill_evt.write(1);
1704 }
1705
1706 if let Some(worker_thread) = self.worker_thread.take() {
1707 let _ = worker_thread.join();
1708 }
1709 }
1710 }
1711
1712 impl VirtioDevice for Wl {
keep_rds(&self) -> Vec<RawDescriptor>1713 fn keep_rds(&self) -> Vec<RawDescriptor> {
1714 let mut keep_rds = Vec::new();
1715
1716 if let Some(vm_socket) = &self.vm_socket {
1717 keep_rds.push(vm_socket.as_raw_descriptor());
1718 }
1719 if let Some(resource_bridge) = &self.resource_bridge {
1720 keep_rds.push(resource_bridge.as_raw_descriptor());
1721 }
1722
1723 keep_rds
1724 }
1725
device_type(&self) -> u321726 fn device_type(&self) -> u32 {
1727 TYPE_WL
1728 }
1729
queue_max_sizes(&self) -> &[u16]1730 fn queue_max_sizes(&self) -> &[u16] {
1731 QUEUE_SIZES
1732 }
1733
features(&self) -> u641734 fn features(&self) -> u64 {
1735 self.base_features | 1 << VIRTIO_WL_F_TRANS_FLAGS | 1 << VIRTIO_WL_F_SEND_FENCES
1736 }
1737
ack_features(&mut self, value: u64)1738 fn ack_features(&mut self, value: u64) {
1739 if value & (1 << VIRTIO_WL_F_TRANS_FLAGS) != 0 {
1740 self.use_transition_flags = true;
1741 }
1742 if value & (1 << VIRTIO_WL_F_SEND_FENCES) != 0 {
1743 self.use_send_vfd_v2 = true;
1744 }
1745 }
1746
activate( &mut self, mem: GuestMemory, interrupt: Interrupt, mut queues: Vec<Queue>, queue_evts: Vec<Event>, )1747 fn activate(
1748 &mut self,
1749 mem: GuestMemory,
1750 interrupt: Interrupt,
1751 mut queues: Vec<Queue>,
1752 queue_evts: Vec<Event>,
1753 ) {
1754 if queues.len() != QUEUE_SIZES.len() || queue_evts.len() != QUEUE_SIZES.len() {
1755 return;
1756 }
1757
1758 let (self_kill_evt, kill_evt) = match Event::new().and_then(|e| Ok((e.try_clone()?, e))) {
1759 Ok(v) => v,
1760 Err(e) => {
1761 error!("failed creating kill Event pair: {}", e);
1762 return;
1763 }
1764 };
1765 self.kill_evt = Some(self_kill_evt);
1766
1767 if let Some(vm_socket) = self.vm_socket.take() {
1768 let wayland_paths = self.wayland_paths.clone();
1769 let use_transition_flags = self.use_transition_flags;
1770 let use_send_vfd_v2 = self.use_send_vfd_v2;
1771 let resource_bridge = self.resource_bridge.take();
1772 let worker_result =
1773 thread::Builder::new()
1774 .name("virtio_wl".to_string())
1775 .spawn(move || {
1776 Worker::new(
1777 mem,
1778 interrupt,
1779 queues.remove(0),
1780 queues.remove(0),
1781 wayland_paths,
1782 vm_socket,
1783 use_transition_flags,
1784 use_send_vfd_v2,
1785 resource_bridge,
1786 )
1787 .run(queue_evts, kill_evt);
1788 });
1789
1790 match worker_result {
1791 Err(e) => {
1792 error!("failed to spawn virtio_wl worker: {}", e);
1793 return;
1794 }
1795 Ok(join_handle) => {
1796 self.worker_thread = Some(join_handle);
1797 }
1798 }
1799 }
1800 }
1801 }
1802