1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! This module implements the virtio wayland used by the guest to access the host's wayland server.
6 //!
7 //! The virtio wayland protocol is done over two queues: `in` and `out`. The `in` queue is used for
8 //! sending commands to the guest that are generated by the host, usually messages from the wayland
9 //! server. The `out` queue is for commands from the guest, usually requests to allocate shared
10 //! memory, open a wayland server connection, or send data over an existing connection.
11 //!
12 //! Each `WlVfd` represents one virtual file descriptor created by either the guest or the host.
13 //! Virtual file descriptors contain actual file descriptors, either a shared memory file descriptor
14 //! or a unix domain socket to the wayland server. In the shared memory case, there is also an
15 //! associated slot that indicates which hypervisor memory slot the memory is installed into, as
16 //! well as a page frame number that the guest can access the memory from.
17 //!
18 //! The types starting with `Ctrl` are structures representing the virtio wayland protocol "on the
19 //! wire." They are decoded and executed in the `execute` function and encoded as some variant of
20 //! `WlResp` for responses.
21 //!
22 //! There is one `WlState` instance that contains every known vfd and the current state of `in`
23 //! queue. The `in` queue requires extra state to buffer messages to the guest in case the `in`
24 //! queue is already full. The `WlState` also has a control socket necessary to fulfill certain
25 //! requests, such as those registering guest memory.
26 //!
27 //! The `Worker` is responsible for the poll loop over all possible events, encoding/decoding from
28 //! the virtio queue, and routing messages in and out of `WlState`. Possible events include the kill
29 //! event, available descriptors on the `in` or `out` queue, and incoming data on any vfd's socket.
30
31 use std::collections::btree_map::Entry;
32 use std::collections::{BTreeMap as Map, BTreeSet as Set, VecDeque};
33 use std::convert::From;
34 use std::error::Error as StdError;
35 use std::fmt;
36 use std::fs::File;
37 use std::io::{self, IoSliceMut, Read, Seek, SeekFrom, Write};
38 use std::mem::size_of;
39 #[cfg(feature = "minigbm")]
40 use std::os::raw::{c_uint, c_ulonglong};
41 use std::os::unix::net::UnixStream;
42 use std::path::{Path, PathBuf};
43 use std::rc::Rc;
44 use std::result;
45 use std::thread;
46 use std::time::Duration;
47
48 #[cfg(feature = "minigbm")]
49 use libc::{EBADF, EINVAL};
50
51 use data_model::*;
52
53 #[cfg(feature = "minigbm")]
54 use base::ioctl_iow_nr;
55 use base::{
56 error, ioctl_iowr_nr, ioctl_with_ref, pipe, round_up_to_page_size, warn, AsRawDescriptor,
57 Error, Event, EventType, FileFlags, FromRawDescriptor, PollToken, RawDescriptor, Result,
58 ScmSocket, SharedMemory, SharedMemoryUnix, Tube, TubeError, WaitContext,
59 };
60 #[cfg(feature = "gpu")]
61 use base::{IntoRawDescriptor, SafeDescriptor};
62 use remain::sorted;
63 use thiserror::Error as ThisError;
64 use vm_memory::{GuestMemory, GuestMemoryError};
65
66 #[cfg(feature = "minigbm")]
67 use vm_control::GpuMemoryDesc;
68
69 #[cfg(feature = "gpu")]
70 use super::resource_bridge::{
71 get_resource_info, BufferInfo, ResourceBridgeError, ResourceInfo, ResourceRequest,
72 };
73 use super::{Interrupt, Queue, Reader, SignalableInterrupt, VirtioDevice, Writer, TYPE_WL};
74 use vm_control::{MemSlot, VmMemoryDestination, VmMemoryRequest, VmMemoryResponse, VmMemorySource};
75
76 const VIRTWL_SEND_MAX_ALLOCS: usize = 28;
77 const VIRTIO_WL_CMD_VFD_NEW: u32 = 256;
78 const VIRTIO_WL_CMD_VFD_CLOSE: u32 = 257;
79 const VIRTIO_WL_CMD_VFD_SEND: u32 = 258;
80 const VIRTIO_WL_CMD_VFD_RECV: u32 = 259;
81 const VIRTIO_WL_CMD_VFD_NEW_CTX: u32 = 260;
82 const VIRTIO_WL_CMD_VFD_NEW_PIPE: u32 = 261;
83 const VIRTIO_WL_CMD_VFD_HUP: u32 = 262;
84 #[cfg(feature = "minigbm")]
85 const VIRTIO_WL_CMD_VFD_NEW_DMABUF: u32 = 263;
86 #[cfg(feature = "minigbm")]
87 const VIRTIO_WL_CMD_VFD_DMABUF_SYNC: u32 = 264;
88 #[cfg(feature = "gpu")]
89 const VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID: u32 = 265;
90 const VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED: u32 = 266;
91 const VIRTIO_WL_RESP_OK: u32 = 4096;
92 const VIRTIO_WL_RESP_VFD_NEW: u32 = 4097;
93 #[cfg(feature = "minigbm")]
94 const VIRTIO_WL_RESP_VFD_NEW_DMABUF: u32 = 4098;
95 const VIRTIO_WL_RESP_ERR: u32 = 4352;
96 const VIRTIO_WL_RESP_OUT_OF_MEMORY: u32 = 4353;
97 const VIRTIO_WL_RESP_INVALID_ID: u32 = 4354;
98 const VIRTIO_WL_RESP_INVALID_TYPE: u32 = 4355;
99 const VIRTIO_WL_RESP_INVALID_FLAGS: u32 = 4356;
100 const VIRTIO_WL_RESP_INVALID_CMD: u32 = 4357;
101 const VIRTIO_WL_VFD_WRITE: u32 = 0x1;
102 const VIRTIO_WL_VFD_READ: u32 = 0x2;
103 const VIRTIO_WL_VFD_MAP: u32 = 0x2;
104 const VIRTIO_WL_VFD_CONTROL: u32 = 0x4;
105 const VIRTIO_WL_VFD_FENCE: u32 = 0x8;
106 pub const VIRTIO_WL_F_TRANS_FLAGS: u32 = 0x01;
107 pub const VIRTIO_WL_F_SEND_FENCES: u32 = 0x02;
108
109 pub const QUEUE_SIZE: u16 = 256;
110 pub const QUEUE_SIZES: &[u16] = &[QUEUE_SIZE, QUEUE_SIZE];
111
112 const NEXT_VFD_ID_BASE: u32 = 0x40000000;
113 const VFD_ID_HOST_MASK: u32 = NEXT_VFD_ID_BASE;
114 // Each in-vq buffer is one page, so we need to leave space for the control header and the maximum
115 // number of allocs.
116 const IN_BUFFER_LEN: usize =
117 0x1000 - size_of::<CtrlVfdRecv>() - VIRTWL_SEND_MAX_ALLOCS * size_of::<Le32>();
118
119 #[cfg(feature = "minigbm")]
120 const VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK: u32 = 0x7;
121
122 #[cfg(feature = "minigbm")]
123 const DMA_BUF_IOCTL_BASE: c_uint = 0x62;
124
125 #[cfg(feature = "minigbm")]
126 #[repr(C)]
127 #[derive(Copy, Clone)]
128 struct dma_buf_sync {
129 flags: c_ulonglong,
130 }
131
132 #[cfg(feature = "minigbm")]
133 ioctl_iow_nr!(DMA_BUF_IOCTL_SYNC, DMA_BUF_IOCTL_BASE, 0, dma_buf_sync);
134
135 #[repr(C)]
136 #[derive(Copy, Clone, Default)]
137 struct sync_file_info {
138 name: [u8; 32],
139 status: i32,
140 flags: u32,
141 num_fences: u32,
142 pad: u32,
143 sync_fence_info: u64,
144 }
145
146 ioctl_iowr_nr!(SYNC_IOC_FILE_INFO, 0x3e, 4, sync_file_info);
147
is_fence(f: &File) -> bool148 fn is_fence(f: &File) -> bool {
149 let info = sync_file_info::default();
150 // Safe as f is a valid file
151 unsafe { ioctl_with_ref(f, SYNC_IOC_FILE_INFO(), &info) == 0 }
152 }
153
154 const VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL: u32 = 0;
155 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU: u32 = 1;
156 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE: u32 = 2;
157 const VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE: u32 = 3;
158
encode_vfd_new( writer: &mut Writer, resp: bool, vfd_id: u32, flags: u32, pfn: u64, size: u32, ) -> WlResult<()>159 fn encode_vfd_new(
160 writer: &mut Writer,
161 resp: bool,
162 vfd_id: u32,
163 flags: u32,
164 pfn: u64,
165 size: u32,
166 ) -> WlResult<()> {
167 let ctrl_vfd_new = CtrlVfdNew {
168 hdr: CtrlHeader {
169 type_: Le32::from(if resp {
170 VIRTIO_WL_RESP_VFD_NEW
171 } else {
172 VIRTIO_WL_CMD_VFD_NEW
173 }),
174 flags: Le32::from(0),
175 },
176 id: Le32::from(vfd_id),
177 flags: Le32::from(flags),
178 pfn: Le64::from(pfn),
179 size: Le32::from(size),
180 };
181
182 writer
183 .write_obj(ctrl_vfd_new)
184 .map_err(WlError::WriteResponse)
185 }
186
187 #[cfg(feature = "minigbm")]
encode_vfd_new_dmabuf( writer: &mut Writer, vfd_id: u32, flags: u32, pfn: u64, size: u32, desc: GpuMemoryDesc, ) -> WlResult<()>188 fn encode_vfd_new_dmabuf(
189 writer: &mut Writer,
190 vfd_id: u32,
191 flags: u32,
192 pfn: u64,
193 size: u32,
194 desc: GpuMemoryDesc,
195 ) -> WlResult<()> {
196 let ctrl_vfd_new_dmabuf = CtrlVfdNewDmabuf {
197 hdr: CtrlHeader {
198 type_: Le32::from(VIRTIO_WL_RESP_VFD_NEW_DMABUF),
199 flags: Le32::from(0),
200 },
201 id: Le32::from(vfd_id),
202 flags: Le32::from(flags),
203 pfn: Le64::from(pfn),
204 size: Le32::from(size),
205 width: Le32::from(0),
206 height: Le32::from(0),
207 format: Le32::from(0),
208 stride0: Le32::from(desc.planes[0].stride),
209 stride1: Le32::from(desc.planes[1].stride),
210 stride2: Le32::from(desc.planes[2].stride),
211 offset0: Le32::from(desc.planes[0].offset),
212 offset1: Le32::from(desc.planes[1].offset),
213 offset2: Le32::from(desc.planes[2].offset),
214 };
215
216 writer
217 .write_obj(ctrl_vfd_new_dmabuf)
218 .map_err(WlError::WriteResponse)
219 }
220
encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()>221 fn encode_vfd_recv(writer: &mut Writer, vfd_id: u32, data: &[u8], vfd_ids: &[u32]) -> WlResult<()> {
222 let ctrl_vfd_recv = CtrlVfdRecv {
223 hdr: CtrlHeader {
224 type_: Le32::from(VIRTIO_WL_CMD_VFD_RECV),
225 flags: Le32::from(0),
226 },
227 id: Le32::from(vfd_id),
228 vfd_count: Le32::from(vfd_ids.len() as u32),
229 };
230 writer
231 .write_obj(ctrl_vfd_recv)
232 .map_err(WlError::WriteResponse)?;
233
234 for &recv_vfd_id in vfd_ids.iter() {
235 writer
236 .write_obj(Le32::from(recv_vfd_id))
237 .map_err(WlError::WriteResponse)?;
238 }
239
240 writer.write_all(data).map_err(WlError::WriteResponse)
241 }
242
encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()>243 fn encode_vfd_hup(writer: &mut Writer, vfd_id: u32) -> WlResult<()> {
244 let ctrl_vfd_new = CtrlVfd {
245 hdr: CtrlHeader {
246 type_: Le32::from(VIRTIO_WL_CMD_VFD_HUP),
247 flags: Le32::from(0),
248 },
249 id: Le32::from(vfd_id),
250 };
251
252 writer
253 .write_obj(ctrl_vfd_new)
254 .map_err(WlError::WriteResponse)
255 }
256
encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()>257 fn encode_resp(writer: &mut Writer, resp: WlResp) -> WlResult<()> {
258 match resp {
259 WlResp::VfdNew {
260 id,
261 flags,
262 pfn,
263 size,
264 resp,
265 } => encode_vfd_new(writer, resp, id, flags, pfn, size),
266 #[cfg(feature = "minigbm")]
267 WlResp::VfdNewDmabuf {
268 id,
269 flags,
270 pfn,
271 size,
272 desc,
273 } => encode_vfd_new_dmabuf(writer, id, flags, pfn, size, desc),
274 WlResp::VfdRecv { id, data, vfds } => encode_vfd_recv(writer, id, data, vfds),
275 WlResp::VfdHup { id } => encode_vfd_hup(writer, id),
276 r => writer
277 .write_obj(Le32::from(r.get_code()))
278 .map_err(WlError::WriteResponse),
279 }
280 }
281
282 #[allow(dead_code)]
283 #[sorted]
284 #[derive(ThisError, Debug)]
285 enum WlError {
286 #[error("overflow in calculation")]
287 CheckedOffset,
288 #[error("failed to synchronize DMABuf access: {0}")]
289 DmabufSync(io::Error),
290 #[error("failed to create shared memory from descriptor: {0}")]
291 FromSharedMemory(Error),
292 #[error("access violation in guest memory: {0}")]
293 GuestMemory(#[from] GuestMemoryError),
294 #[error("invalid string: {0}")]
295 InvalidString(std::str::Utf8Error),
296 #[error("failed to create shared memory allocation: {0}")]
297 NewAlloc(Error),
298 #[error("failed to create pipe: {0}")]
299 NewPipe(Error),
300 #[error("error parsing descriptor: {0}")]
301 ParseDesc(io::Error),
302 #[error("failed to read a pipe: {0}")]
303 ReadPipe(io::Error),
304 #[error("failed to recv on a socket: {0}")]
305 RecvVfd(Error),
306 #[error("failed to send on a socket: {0}")]
307 SendVfd(Error),
308 #[error("failed to connect socket: {0}")]
309 SocketConnect(io::Error),
310 #[error("failed to set socket as non-blocking: {0}")]
311 SocketNonBlock(io::Error),
312 #[error("unknown socket name: {0}")]
313 UnknownSocketName(String),
314 #[error("invalid response from parent VM")]
315 VmBadResponse,
316 #[error("failed to control parent VM: {0}")]
317 VmControl(TubeError),
318 #[error("access violating in guest volatile memory: {0}")]
319 VolatileMemory(#[from] VolatileMemoryError),
320 #[error("failed to listen to descriptor on wait context: {0}")]
321 WaitContextAdd(Error),
322 #[error("failed to write to a pipe: {0}")]
323 WritePipe(io::Error),
324 #[error("failed to write response: {0}")]
325 WriteResponse(io::Error),
326 }
327
328 type WlResult<T> = result::Result<T, WlError>;
329
330 #[derive(Clone)]
331 struct VmRequester {
332 inner: Rc<Tube>,
333 }
334
335 impl VmRequester {
new(vm_socket: Tube) -> VmRequester336 fn new(vm_socket: Tube) -> VmRequester {
337 VmRequester {
338 inner: Rc::new(vm_socket),
339 }
340 }
341
request(&self, request: &VmMemoryRequest) -> WlResult<VmMemoryResponse>342 fn request(&self, request: &VmMemoryRequest) -> WlResult<VmMemoryResponse> {
343 self.inner.send(&request).map_err(WlError::VmControl)?;
344 self.inner.recv().map_err(WlError::VmControl)
345 }
346
register_memory(&self, shm: SharedMemory) -> WlResult<(SharedMemory, VmMemoryResponse)>347 fn register_memory(&self, shm: SharedMemory) -> WlResult<(SharedMemory, VmMemoryResponse)> {
348 let request = VmMemoryRequest::RegisterMemory {
349 source: VmMemorySource::SharedMemory(shm),
350 dest: VmMemoryDestination::NewAllocation,
351 read_only: false,
352 };
353 let response = self.request(&request)?;
354 match request {
355 VmMemoryRequest::RegisterMemory {
356 source: VmMemorySource::SharedMemory(shm),
357 ..
358 } => Ok((shm, response)),
359 _ => unreachable!(),
360 }
361 }
362 }
363
364 #[repr(C)]
365 #[derive(Copy, Clone, Default)]
366 struct CtrlHeader {
367 type_: Le32,
368 flags: Le32,
369 }
370
371 #[repr(C)]
372 #[derive(Copy, Clone, Default)]
373 struct CtrlVfdNew {
374 hdr: CtrlHeader,
375 id: Le32,
376 flags: Le32,
377 pfn: Le64,
378 size: Le32,
379 }
380
381 unsafe impl DataInit for CtrlVfdNew {}
382
383 #[repr(C)]
384 #[derive(Copy, Clone, Default)]
385 struct CtrlVfdNewCtxNamed {
386 hdr: CtrlHeader,
387 id: Le32,
388 flags: Le32, // Ignored.
389 pfn: Le64, // Ignored.
390 size: Le32, // Ignored.
391 name: [u8; 32],
392 }
393
394 unsafe impl DataInit for CtrlVfdNewCtxNamed {}
395
396 #[repr(C)]
397 #[derive(Copy, Clone, Default)]
398 #[cfg(feature = "minigbm")]
399 struct CtrlVfdNewDmabuf {
400 hdr: CtrlHeader,
401 id: Le32,
402 flags: Le32,
403 pfn: Le64,
404 size: Le32,
405 width: Le32,
406 height: Le32,
407 format: Le32,
408 stride0: Le32,
409 stride1: Le32,
410 stride2: Le32,
411 offset0: Le32,
412 offset1: Le32,
413 offset2: Le32,
414 }
415
416 #[cfg(feature = "minigbm")]
417 unsafe impl DataInit for CtrlVfdNewDmabuf {}
418
419 #[repr(C)]
420 #[derive(Copy, Clone, Default)]
421 #[cfg(feature = "minigbm")]
422 struct CtrlVfdDmabufSync {
423 hdr: CtrlHeader,
424 id: Le32,
425 flags: Le32,
426 }
427
428 #[cfg(feature = "minigbm")]
429 unsafe impl DataInit for CtrlVfdDmabufSync {}
430
431 #[repr(C)]
432 #[derive(Copy, Clone)]
433 struct CtrlVfdRecv {
434 hdr: CtrlHeader,
435 id: Le32,
436 vfd_count: Le32,
437 }
438
439 unsafe impl DataInit for CtrlVfdRecv {}
440
441 #[repr(C)]
442 #[derive(Copy, Clone, Default)]
443 struct CtrlVfd {
444 hdr: CtrlHeader,
445 id: Le32,
446 }
447
448 unsafe impl DataInit for CtrlVfd {}
449
450 #[repr(C)]
451 #[derive(Copy, Clone, Default)]
452 struct CtrlVfdSend {
453 hdr: CtrlHeader,
454 id: Le32,
455 vfd_count: Le32,
456 // Remainder is an array of vfd_count IDs followed by data.
457 }
458
459 unsafe impl DataInit for CtrlVfdSend {}
460
461 #[repr(C)]
462 #[derive(Copy, Clone, Default)]
463 struct CtrlVfdSendVfd {
464 kind: Le32,
465 id: Le32,
466 }
467
468 unsafe impl DataInit for CtrlVfdSendVfd {}
469
470 #[repr(C)]
471 #[derive(Copy, Clone)]
472 union CtrlVfdSendVfdV2Payload {
473 id: Le32,
474 seqno: Le64,
475 }
476
477 unsafe impl DataInit for CtrlVfdSendVfdV2Payload {}
478
479 #[repr(C)]
480 #[derive(Copy, Clone)]
481 struct CtrlVfdSendVfdV2 {
482 kind: Le32,
483 payload: CtrlVfdSendVfdV2Payload,
484 }
485
486 unsafe impl DataInit for CtrlVfdSendVfdV2 {}
487
488 impl CtrlVfdSendVfdV2 {
id(&self) -> Le32489 fn id(&self) -> Le32 {
490 assert!(
491 self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL
492 || self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
493 );
494 unsafe { self.payload.id }
495 }
496 #[cfg(feature = "gpu")]
seqno(&self) -> Le64497 fn seqno(&self) -> Le64 {
498 assert!(self.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE);
499 unsafe { self.payload.seqno }
500 }
501 }
502
503 #[derive(Debug)]
504 #[allow(dead_code)]
505 enum WlResp<'a> {
506 Ok,
507 VfdNew {
508 id: u32,
509 flags: u32,
510 pfn: u64,
511 size: u32,
512 // The VfdNew variant can be either a response or a command depending on this `resp`. This
513 // is important for the `get_code` method.
514 resp: bool,
515 },
516 #[cfg(feature = "minigbm")]
517 VfdNewDmabuf {
518 id: u32,
519 flags: u32,
520 pfn: u64,
521 size: u32,
522 desc: GpuMemoryDesc,
523 },
524 VfdRecv {
525 id: u32,
526 data: &'a [u8],
527 vfds: &'a [u32],
528 },
529 VfdHup {
530 id: u32,
531 },
532 Err(Box<dyn StdError>),
533 OutOfMemory,
534 InvalidId,
535 InvalidType,
536 InvalidFlags,
537 InvalidCommand,
538 }
539
540 impl<'a> WlResp<'a> {
get_code(&self) -> u32541 fn get_code(&self) -> u32 {
542 match *self {
543 WlResp::Ok => VIRTIO_WL_RESP_OK,
544 WlResp::VfdNew { resp, .. } => {
545 if resp {
546 VIRTIO_WL_RESP_VFD_NEW
547 } else {
548 VIRTIO_WL_CMD_VFD_NEW
549 }
550 }
551 #[cfg(feature = "minigbm")]
552 WlResp::VfdNewDmabuf { .. } => VIRTIO_WL_RESP_VFD_NEW_DMABUF,
553 WlResp::VfdRecv { .. } => VIRTIO_WL_CMD_VFD_RECV,
554 WlResp::VfdHup { .. } => VIRTIO_WL_CMD_VFD_HUP,
555 WlResp::Err(_) => VIRTIO_WL_RESP_ERR,
556 WlResp::OutOfMemory => VIRTIO_WL_RESP_OUT_OF_MEMORY,
557 WlResp::InvalidId => VIRTIO_WL_RESP_INVALID_ID,
558 WlResp::InvalidType => VIRTIO_WL_RESP_INVALID_TYPE,
559 WlResp::InvalidFlags => VIRTIO_WL_RESP_INVALID_FLAGS,
560 WlResp::InvalidCommand => VIRTIO_WL_RESP_INVALID_CMD,
561 }
562 }
563 }
564
565 #[derive(Default)]
566 struct WlVfd {
567 socket: Option<UnixStream>,
568 guest_shared_memory: Option<SharedMemory>,
569 remote_pipe: Option<File>,
570 local_pipe: Option<(u32 /* flags */, File)>,
571 slot: Option<(MemSlot, u64 /* pfn */, VmRequester)>,
572 #[cfg(feature = "minigbm")]
573 is_dmabuf: bool,
574 fence: Option<File>,
575 is_fence: bool,
576 }
577
578 impl fmt::Debug for WlVfd {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result579 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
580 write!(f, "WlVfd {{")?;
581 if let Some(s) = &self.socket {
582 write!(f, " socket: {}", s.as_raw_descriptor())?;
583 }
584 if let Some((slot, pfn, _)) = &self.slot {
585 write!(f, " slot: {} pfn: {}", slot, pfn)?;
586 }
587 if let Some(s) = &self.remote_pipe {
588 write!(f, " remote: {}", s.as_raw_descriptor())?;
589 }
590 if let Some((_, s)) = &self.local_pipe {
591 write!(f, " local: {}", s.as_raw_descriptor())?;
592 }
593 write!(f, " }}")
594 }
595 }
596
597 impl WlVfd {
connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd>598 fn connect<P: AsRef<Path>>(path: P) -> WlResult<WlVfd> {
599 let socket = UnixStream::connect(path).map_err(WlError::SocketConnect)?;
600 let mut vfd = WlVfd::default();
601 vfd.socket = Some(socket);
602 Ok(vfd)
603 }
604
allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd>605 fn allocate(vm: VmRequester, size: u64) -> WlResult<WlVfd> {
606 let size_page_aligned = round_up_to_page_size(size as usize) as u64;
607 let vfd_shm =
608 SharedMemory::named("virtwl_alloc", size_page_aligned).map_err(WlError::NewAlloc)?;
609
610 let (vfd_shm, register_response) = vm.register_memory(vfd_shm)?;
611
612 match register_response {
613 VmMemoryResponse::RegisterMemory { pfn, slot } => {
614 let mut vfd = WlVfd::default();
615 vfd.guest_shared_memory = Some(vfd_shm);
616 vfd.slot = Some((slot, pfn, vm));
617 Ok(vfd)
618 }
619 _ => Err(WlError::VmBadResponse),
620 }
621 }
622
623 #[cfg(feature = "minigbm")]
dmabuf( vm: VmRequester, width: u32, height: u32, format: u32, ) -> WlResult<(WlVfd, GpuMemoryDesc)>624 fn dmabuf(
625 vm: VmRequester,
626 width: u32,
627 height: u32,
628 format: u32,
629 ) -> WlResult<(WlVfd, GpuMemoryDesc)> {
630 let allocate_and_register_gpu_memory_response =
631 vm.request(&VmMemoryRequest::AllocateAndRegisterGpuMemory {
632 width,
633 height,
634 format,
635 dest: VmMemoryDestination::NewAllocation,
636 })?;
637 match allocate_and_register_gpu_memory_response {
638 VmMemoryResponse::AllocateAndRegisterGpuMemory {
639 descriptor,
640 pfn,
641 slot,
642 desc,
643 } => {
644 let mut vfd = WlVfd::default();
645 let vfd_shm =
646 SharedMemory::from_safe_descriptor(descriptor).map_err(WlError::NewAlloc)?;
647 vfd.guest_shared_memory = Some(vfd_shm);
648 vfd.slot = Some((slot, pfn, vm));
649 vfd.is_dmabuf = true;
650 Ok((vfd, desc))
651 }
652 _ => Err(WlError::VmBadResponse),
653 }
654 }
655
656 #[cfg(feature = "minigbm")]
dmabuf_sync(&self, flags: u32) -> WlResult<()>657 fn dmabuf_sync(&self, flags: u32) -> WlResult<()> {
658 if !self.is_dmabuf {
659 return Err(WlError::DmabufSync(io::Error::from_raw_os_error(EINVAL)));
660 }
661
662 match &self.guest_shared_memory {
663 Some(descriptor) => {
664 let sync = dma_buf_sync {
665 flags: flags as u64,
666 };
667 // Safe as descriptor is a valid dmabuf and incorrect flags will return an error.
668 if unsafe { ioctl_with_ref(descriptor, DMA_BUF_IOCTL_SYNC(), &sync) } < 0 {
669 Err(WlError::DmabufSync(io::Error::last_os_error()))
670 } else {
671 Ok(())
672 }
673 }
674 None => Err(WlError::DmabufSync(io::Error::from_raw_os_error(EBADF))),
675 }
676 }
677
pipe_remote_read_local_write() -> WlResult<WlVfd>678 fn pipe_remote_read_local_write() -> WlResult<WlVfd> {
679 let (read_pipe, write_pipe) = pipe(true).map_err(WlError::NewPipe)?;
680 let mut vfd = WlVfd::default();
681 vfd.remote_pipe = Some(read_pipe);
682 vfd.local_pipe = Some((VIRTIO_WL_VFD_WRITE, write_pipe));
683 Ok(vfd)
684 }
685
pipe_remote_write_local_read() -> WlResult<WlVfd>686 fn pipe_remote_write_local_read() -> WlResult<WlVfd> {
687 let (read_pipe, write_pipe) = pipe(true).map_err(WlError::NewPipe)?;
688 let mut vfd = WlVfd::default();
689 vfd.remote_pipe = Some(write_pipe);
690 vfd.local_pipe = Some((VIRTIO_WL_VFD_READ, read_pipe));
691 Ok(vfd)
692 }
693
from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd>694 fn from_file(vm: VmRequester, mut descriptor: File) -> WlResult<WlVfd> {
695 // We need to determine if the given file is more like shared memory or a pipe/socket. A
696 // quick and easy check is to seek to the end of the file. If it works we assume it's not a
697 // pipe/socket because those have no end. We can even use that seek location as an indicator
698 // for how big the shared memory chunk to map into guest memory is. If seeking to the end
699 // fails, we assume it's a socket or pipe with read/write semantics.
700 if descriptor.seek(SeekFrom::End(0)).is_ok() {
701 let shm = SharedMemory::from_file(descriptor).map_err(WlError::FromSharedMemory)?;
702 let (shm, register_response) = vm.register_memory(shm)?;
703
704 match register_response {
705 VmMemoryResponse::RegisterMemory { pfn, slot } => {
706 let mut vfd = WlVfd::default();
707 vfd.guest_shared_memory = Some(shm);
708 vfd.slot = Some((slot, pfn, vm));
709 Ok(vfd)
710 }
711 _ => Err(WlError::VmBadResponse),
712 }
713 } else if is_fence(&descriptor) {
714 let mut vfd = WlVfd::default();
715 vfd.is_fence = true;
716 vfd.fence = Some(descriptor);
717 Ok(vfd)
718 } else {
719 let flags = match FileFlags::from_file(&descriptor) {
720 Ok(FileFlags::Read) => VIRTIO_WL_VFD_READ,
721 Ok(FileFlags::Write) => VIRTIO_WL_VFD_WRITE,
722 Ok(FileFlags::ReadWrite) => VIRTIO_WL_VFD_READ | VIRTIO_WL_VFD_WRITE,
723 _ => 0,
724 };
725 let mut vfd = WlVfd::default();
726 vfd.local_pipe = Some((flags, descriptor));
727 Ok(vfd)
728 }
729 }
730
flags(&self, use_transition_flags: bool) -> u32731 fn flags(&self, use_transition_flags: bool) -> u32 {
732 let mut flags = 0;
733 if use_transition_flags {
734 if self.socket.is_some() {
735 flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ;
736 }
737 if let Some((f, _)) = self.local_pipe {
738 flags |= f;
739 }
740 if self.is_fence {
741 flags |= VIRTIO_WL_VFD_FENCE;
742 }
743 } else {
744 if self.socket.is_some() {
745 flags |= VIRTIO_WL_VFD_CONTROL;
746 }
747 if self.slot.is_some() {
748 flags |= VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP
749 }
750 }
751 flags
752 }
753
754 // Page frame number in the guest this VFD was mapped at.
pfn(&self) -> Option<u64>755 fn pfn(&self) -> Option<u64> {
756 self.slot.as_ref().map(|s| s.1)
757 }
758
759 // Size in bytes of the shared memory VFD.
size(&self) -> Option<u64>760 fn size(&self) -> Option<u64> {
761 self.guest_shared_memory.as_ref().map(|shm| shm.size())
762 }
763
764 // The FD that gets sent if this VFD is sent over a socket.
send_descriptor(&self) -> Option<RawDescriptor>765 fn send_descriptor(&self) -> Option<RawDescriptor> {
766 self.guest_shared_memory
767 .as_ref()
768 .map(|shm| shm.as_raw_descriptor())
769 .or(self.socket.as_ref().map(|s| s.as_raw_descriptor()))
770 .or(self.remote_pipe.as_ref().map(|p| p.as_raw_descriptor()))
771 .or(self.fence.as_ref().map(|f| f.as_raw_descriptor()))
772 }
773
774 // The FD that is used for polling for events on this VFD.
wait_descriptor(&self) -> Option<&dyn AsRawDescriptor>775 fn wait_descriptor(&self) -> Option<&dyn AsRawDescriptor> {
776 self.socket
777 .as_ref()
778 .map(|s| s as &dyn AsRawDescriptor)
779 .or_else(|| {
780 self.local_pipe
781 .as_ref()
782 .map(|(_, p)| p as &dyn AsRawDescriptor)
783 })
784 .or_else(|| self.fence.as_ref().map(|f| f as &dyn AsRawDescriptor))
785 }
786
787 // Sends data/files from the guest to the host over this VFD.
send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp>788 fn send(&mut self, rds: &[RawDescriptor], data: &mut Reader) -> WlResult<WlResp> {
789 if let Some(socket) = &self.socket {
790 socket
791 .send_with_fds(&data.get_remaining(), rds)
792 .map_err(WlError::SendVfd)?;
793 // All remaining data in `data` is now considered consumed.
794 data.consume(::std::usize::MAX);
795 Ok(WlResp::Ok)
796 } else if let Some((_, local_pipe)) = &mut self.local_pipe {
797 // Impossible to send descriptors over a simple pipe.
798 if !rds.is_empty() {
799 return Ok(WlResp::InvalidType);
800 }
801 data.read_to(local_pipe, usize::max_value())
802 .map_err(WlError::WritePipe)?;
803 Ok(WlResp::Ok)
804 } else {
805 Ok(WlResp::InvalidType)
806 }
807 }
808
809 // Receives data/files from the host for this VFD and queues it for the guest.
recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>>810 fn recv(&mut self, in_file_queue: &mut Vec<File>) -> WlResult<Vec<u8>> {
811 if let Some(socket) = self.socket.take() {
812 let mut buf = vec![0; IN_BUFFER_LEN];
813 let mut fd_buf = [0; VIRTWL_SEND_MAX_ALLOCS];
814 // If any errors happen, the socket will get dropped, preventing more reading.
815 let (len, file_count) = socket
816 .recv_with_fds(IoSliceMut::new(&mut buf), &mut fd_buf)
817 .map_err(WlError::RecvVfd)?;
818 // If any data gets read, the put the socket back for future recv operations.
819 if len != 0 || file_count != 0 {
820 buf.truncate(len);
821 buf.shrink_to_fit();
822 self.socket = Some(socket);
823 // Safe because the first file_counts fds from recv_with_fds are owned by us and
824 // valid.
825 in_file_queue.extend(
826 fd_buf[..file_count]
827 .iter()
828 .map(|&descriptor| unsafe { File::from_raw_descriptor(descriptor) }),
829 );
830 return Ok(buf);
831 }
832 Ok(Vec::new())
833 } else if let Some((flags, mut local_pipe)) = self.local_pipe.take() {
834 let mut buf = Vec::new();
835 buf.resize(IN_BUFFER_LEN, 0);
836 let len = local_pipe.read(&mut buf[..]).map_err(WlError::ReadPipe)?;
837 if len != 0 {
838 buf.truncate(len);
839 buf.shrink_to_fit();
840 self.local_pipe = Some((flags, local_pipe));
841 return Ok(buf);
842 }
843 Ok(Vec::new())
844 } else {
845 Ok(Vec::new())
846 }
847 }
848
849 // Called after this VFD is sent over a socket to ensure the local end of the VFD receives hang
850 // up events.
close_remote(&mut self)851 fn close_remote(&mut self) {
852 self.remote_pipe = None;
853 }
854
close(&mut self) -> WlResult<()>855 fn close(&mut self) -> WlResult<()> {
856 if let Some((slot, _, vm)) = self.slot.take() {
857 vm.request(&VmMemoryRequest::UnregisterMemory(slot))?;
858 }
859 self.socket = None;
860 self.remote_pipe = None;
861 self.local_pipe = None;
862 Ok(())
863 }
864 }
865
866 impl Drop for WlVfd {
drop(&mut self)867 fn drop(&mut self) {
868 let _ = self.close();
869 }
870 }
871
872 #[derive(Debug)]
873 enum WlRecv {
874 Vfd { id: u32 },
875 Data { buf: Vec<u8> },
876 Hup,
877 }
878
879 pub struct WlState {
880 wayland_paths: Map<String, PathBuf>,
881 vm: VmRequester,
882 resource_bridge: Option<Tube>,
883 use_transition_flags: bool,
884 wait_ctx: WaitContext<u32>,
885 vfds: Map<u32, WlVfd>,
886 next_vfd_id: u32,
887 in_file_queue: Vec<File>,
888 in_queue: VecDeque<(u32 /* vfd_id */, WlRecv)>,
889 current_recv_vfd: Option<u32>,
890 recv_vfds: Vec<u32>,
891 #[cfg(feature = "gpu")]
892 signaled_fence: Option<SafeDescriptor>,
893 use_send_vfd_v2: bool,
894 }
895
896 impl WlState {
897 /// Create a new `WlState` instance for running a virtio-wl device.
new( wayland_paths: Map<String, PathBuf>, vm_tube: Tube, use_transition_flags: bool, use_send_vfd_v2: bool, resource_bridge: Option<Tube>, ) -> WlState898 pub fn new(
899 wayland_paths: Map<String, PathBuf>,
900 vm_tube: Tube,
901 use_transition_flags: bool,
902 use_send_vfd_v2: bool,
903 resource_bridge: Option<Tube>,
904 ) -> WlState {
905 WlState {
906 wayland_paths,
907 vm: VmRequester::new(vm_tube),
908 resource_bridge,
909 wait_ctx: WaitContext::new().expect("failed to create WaitContext"),
910 use_transition_flags,
911 vfds: Map::new(),
912 next_vfd_id: NEXT_VFD_ID_BASE,
913 in_file_queue: Vec::new(),
914 in_queue: VecDeque::new(),
915 current_recv_vfd: None,
916 recv_vfds: Vec::new(),
917 #[cfg(feature = "gpu")]
918 signaled_fence: None,
919 use_send_vfd_v2,
920 }
921 }
922
923 /// This is a hack so that we can drive the inner WaitContext from an async fn. The proper
924 /// long-term solution is to replace the WaitContext completely by spawning async workers
925 /// instead.
wait_ctx(&self) -> &WaitContext<u32>926 pub fn wait_ctx(&self) -> &WaitContext<u32> {
927 &self.wait_ctx
928 }
929
new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp>930 fn new_pipe(&mut self, id: u32, flags: u32) -> WlResult<WlResp> {
931 if id & VFD_ID_HOST_MASK != 0 {
932 return Ok(WlResp::InvalidId);
933 }
934
935 if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ) != 0 {
936 return Ok(WlResp::InvalidFlags);
937 }
938
939 if flags & VIRTIO_WL_VFD_WRITE != 0 && flags & VIRTIO_WL_VFD_READ != 0 {
940 return Ok(WlResp::InvalidFlags);
941 }
942
943 match self.vfds.entry(id) {
944 Entry::Vacant(entry) => {
945 let vfd = if flags & VIRTIO_WL_VFD_WRITE != 0 {
946 WlVfd::pipe_remote_read_local_write()?
947 } else if flags & VIRTIO_WL_VFD_READ != 0 {
948 WlVfd::pipe_remote_write_local_read()?
949 } else {
950 return Ok(WlResp::InvalidFlags);
951 };
952 self.wait_ctx
953 .add(vfd.wait_descriptor().unwrap(), id)
954 .map_err(WlError::WaitContextAdd)?;
955 let resp = WlResp::VfdNew {
956 id,
957 flags: 0,
958 pfn: 0,
959 size: 0,
960 resp: true,
961 };
962 entry.insert(vfd);
963 Ok(resp)
964 }
965 Entry::Occupied(_) => Ok(WlResp::InvalidId),
966 }
967 }
968
new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp>969 fn new_alloc(&mut self, id: u32, flags: u32, size: u32) -> WlResult<WlResp> {
970 if id & VFD_ID_HOST_MASK != 0 {
971 return Ok(WlResp::InvalidId);
972 }
973
974 if self.use_transition_flags {
975 if flags != 0 {
976 return Ok(WlResp::InvalidFlags);
977 }
978 } else if flags & !(VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_MAP) != 0 {
979 return Ok(WlResp::Err(Box::from("invalid flags")));
980 }
981
982 match self.vfds.entry(id) {
983 Entry::Vacant(entry) => {
984 let vfd = WlVfd::allocate(self.vm.clone(), size as u64)?;
985 let resp = WlResp::VfdNew {
986 id,
987 flags,
988 pfn: vfd.pfn().unwrap_or_default(),
989 size: vfd.size().unwrap_or_default() as u32,
990 resp: true,
991 };
992 entry.insert(vfd);
993 Ok(resp)
994 }
995 Entry::Occupied(_) => Ok(WlResp::InvalidId),
996 }
997 }
998
999 #[cfg(feature = "minigbm")]
new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp>1000 fn new_dmabuf(&mut self, id: u32, width: u32, height: u32, format: u32) -> WlResult<WlResp> {
1001 if id & VFD_ID_HOST_MASK != 0 {
1002 return Ok(WlResp::InvalidId);
1003 }
1004
1005 match self.vfds.entry(id) {
1006 Entry::Vacant(entry) => {
1007 let (vfd, desc) = WlVfd::dmabuf(self.vm.clone(), width, height, format)?;
1008 let resp = WlResp::VfdNewDmabuf {
1009 id,
1010 flags: 0,
1011 pfn: vfd.pfn().unwrap_or_default(),
1012 size: vfd.size().unwrap_or_default() as u32,
1013 desc,
1014 };
1015 entry.insert(vfd);
1016 Ok(resp)
1017 }
1018 Entry::Occupied(_) => Ok(WlResp::InvalidId),
1019 }
1020 }
1021
1022 #[cfg(feature = "minigbm")]
dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp>1023 fn dmabuf_sync(&mut self, vfd_id: u32, flags: u32) -> WlResult<WlResp> {
1024 if flags & !(VIRTIO_WL_VFD_DMABUF_SYNC_VALID_FLAG_MASK) != 0 {
1025 return Ok(WlResp::InvalidFlags);
1026 }
1027
1028 match self.vfds.get_mut(&vfd_id) {
1029 Some(vfd) => {
1030 vfd.dmabuf_sync(flags)?;
1031 Ok(WlResp::Ok)
1032 }
1033 None => Ok(WlResp::InvalidId),
1034 }
1035 }
1036
new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp>1037 fn new_context(&mut self, id: u32, name: &str) -> WlResult<WlResp> {
1038 if id & VFD_ID_HOST_MASK != 0 {
1039 return Ok(WlResp::InvalidId);
1040 }
1041
1042 let flags = if self.use_transition_flags {
1043 VIRTIO_WL_VFD_WRITE | VIRTIO_WL_VFD_READ
1044 } else {
1045 VIRTIO_WL_VFD_CONTROL
1046 };
1047
1048 match self.vfds.entry(id) {
1049 Entry::Vacant(entry) => {
1050 let vfd = entry.insert(WlVfd::connect(
1051 &self
1052 .wayland_paths
1053 .get(name)
1054 .ok_or_else(|| WlError::UnknownSocketName(name.to_string()))?,
1055 )?);
1056 self.wait_ctx
1057 .add(vfd.wait_descriptor().unwrap(), id)
1058 .map_err(WlError::WaitContextAdd)?;
1059 Ok(WlResp::VfdNew {
1060 id,
1061 flags,
1062 pfn: 0,
1063 size: 0,
1064 resp: true,
1065 })
1066 }
1067 Entry::Occupied(_) => Ok(WlResp::InvalidId),
1068 }
1069 }
1070
process_wait_context(&mut self)1071 fn process_wait_context(&mut self) {
1072 let events = match self.wait_ctx.wait_timeout(Duration::from_secs(0)) {
1073 Ok(v) => v,
1074 Err(e) => {
1075 error!("failed polling for vfd evens: {}", e);
1076 return;
1077 }
1078 };
1079
1080 for event in events.iter().filter(|e| e.is_readable) {
1081 if let Err(e) = self.recv(event.token) {
1082 error!("failed to recv from vfd: {}", e)
1083 }
1084 }
1085
1086 for event in events.iter().filter(|e| e.is_hungup) {
1087 if !event.is_readable {
1088 let vfd_id = event.token;
1089 if let Some(descriptor) =
1090 self.vfds.get(&vfd_id).and_then(|vfd| vfd.wait_descriptor())
1091 {
1092 if let Err(e) = self.wait_ctx.delete(descriptor) {
1093 warn!("failed to remove hungup vfd from poll context: {}", e);
1094 }
1095 }
1096 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1097 }
1098 }
1099 }
1100
close(&mut self, vfd_id: u32) -> WlResult<WlResp>1101 fn close(&mut self, vfd_id: u32) -> WlResult<WlResp> {
1102 let mut to_delete = Set::new();
1103 for (dest_vfd_id, q) in &self.in_queue {
1104 if *dest_vfd_id == vfd_id {
1105 if let WlRecv::Vfd { id } = q {
1106 to_delete.insert(*id);
1107 }
1108 }
1109 }
1110 for vfd_id in to_delete {
1111 // Sorry sub-error, we can't have cascading errors leaving us in an inconsistent state.
1112 let _ = self.close(vfd_id);
1113 }
1114 match self.vfds.remove(&vfd_id) {
1115 Some(mut vfd) => {
1116 self.in_queue.retain(|&(id, _)| id != vfd_id);
1117 vfd.close()?;
1118 Ok(WlResp::Ok)
1119 }
1120 None => Ok(WlResp::InvalidId),
1121 }
1122 }
1123
1124 #[cfg(feature = "gpu")]
get_info(&mut self, request: ResourceRequest) -> Option<File>1125 fn get_info(&mut self, request: ResourceRequest) -> Option<File> {
1126 let sock = self.resource_bridge.as_ref().unwrap();
1127 match get_resource_info(sock, request) {
1128 Ok(ResourceInfo::Buffer(BufferInfo { file, .. })) => Some(file),
1129 Ok(ResourceInfo::Fence { file }) => Some(file),
1130 Err(ResourceBridgeError::InvalidResource(req)) => {
1131 warn!("attempt to send non-existent gpu resource {}", req);
1132 None
1133 }
1134 Err(e) => {
1135 error!("{}", e);
1136 // If there was an error with the resource bridge, it can no longer be
1137 // trusted to continue to function.
1138 self.resource_bridge = None;
1139 None
1140 }
1141 }
1142 }
1143
send( &mut self, vfd_id: u32, vfd_count: usize, foreign_id: bool, reader: &mut Reader, ) -> WlResult<WlResp>1144 fn send(
1145 &mut self,
1146 vfd_id: u32,
1147 vfd_count: usize,
1148 foreign_id: bool,
1149 reader: &mut Reader,
1150 ) -> WlResult<WlResp> {
1151 // First stage gathers and normalizes all id information from guest memory.
1152 let mut send_vfd_ids = [CtrlVfdSendVfdV2 {
1153 kind: Le32::from(0),
1154 payload: CtrlVfdSendVfdV2Payload { id: Le32::from(0) },
1155 }; VIRTWL_SEND_MAX_ALLOCS];
1156 for vfd_id in send_vfd_ids.iter_mut().take(vfd_count) {
1157 *vfd_id = if foreign_id {
1158 if self.use_send_vfd_v2 {
1159 reader.read_obj().map_err(WlError::ParseDesc)?
1160 } else {
1161 let vfd: CtrlVfdSendVfd = reader.read_obj().map_err(WlError::ParseDesc)?;
1162 CtrlVfdSendVfdV2 {
1163 kind: vfd.kind,
1164 payload: CtrlVfdSendVfdV2Payload { id: vfd.id },
1165 }
1166 }
1167 } else {
1168 CtrlVfdSendVfdV2 {
1169 kind: Le32::from(VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL),
1170 payload: CtrlVfdSendVfdV2Payload {
1171 id: reader.read_obj().map_err(WlError::ParseDesc)?,
1172 },
1173 }
1174 };
1175 }
1176
1177 // Next stage collects corresponding file descriptors for each id.
1178 let mut rds = [0; VIRTWL_SEND_MAX_ALLOCS];
1179 #[cfg(feature = "gpu")]
1180 let mut bridged_files = Vec::new();
1181 for (&send_vfd_id, descriptor) in send_vfd_ids[..vfd_count].iter().zip(rds.iter_mut()) {
1182 match send_vfd_id.kind.to_native() {
1183 VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL => {
1184 match self.vfds.get(&send_vfd_id.id().to_native()) {
1185 Some(vfd) => match vfd.send_descriptor() {
1186 Some(vfd_fd) => *descriptor = vfd_fd,
1187 None => return Ok(WlResp::InvalidType),
1188 },
1189 None => {
1190 warn!(
1191 "attempt to send non-existant vfd 0x{:08x}",
1192 send_vfd_id.id().to_native()
1193 );
1194 return Ok(WlResp::InvalidId);
1195 }
1196 }
1197 }
1198 #[cfg(feature = "gpu")]
1199 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU if self.resource_bridge.is_some() => {
1200 match self.get_info(ResourceRequest::GetBuffer {
1201 id: send_vfd_id.id().to_native(),
1202 }) {
1203 Some(file) => {
1204 *descriptor = file.as_raw_descriptor();
1205 bridged_files.push(file);
1206 }
1207 None => return Ok(WlResp::InvalidId),
1208 }
1209 }
1210 #[cfg(feature = "gpu")]
1211 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE if self.resource_bridge.is_some() => {
1212 match self.get_info(ResourceRequest::GetFence {
1213 seqno: send_vfd_id.seqno().to_native(),
1214 }) {
1215 Some(file) => {
1216 *descriptor = file.as_raw_descriptor();
1217 bridged_files.push(file);
1218 }
1219 None => return Ok(WlResp::InvalidId),
1220 }
1221 }
1222 #[cfg(feature = "gpu")]
1223 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE
1224 if self.resource_bridge.is_some() =>
1225 {
1226 if self.signaled_fence.is_none() {
1227 // If the guest is sending a signaled fence, we know a fence
1228 // with seqno 0 must already be signaled.
1229 match self.get_info(ResourceRequest::GetFence { seqno: 0 }) {
1230 Some(file) => {
1231 // Safe since get_info returned a valid File.
1232 let safe_descriptor = unsafe {
1233 SafeDescriptor::from_raw_descriptor(file.into_raw_descriptor())
1234 };
1235 self.signaled_fence = Some(safe_descriptor)
1236 }
1237 None => return Ok(WlResp::InvalidId),
1238 }
1239 }
1240 match self.signaled_fence.as_ref().unwrap().try_clone() {
1241 Ok(dup) => {
1242 *descriptor = dup.into_raw_descriptor();
1243 // Safe because the fd comes from a valid SafeDescriptor.
1244 let file = unsafe { File::from_raw_descriptor(*descriptor) };
1245 bridged_files.push(file);
1246 }
1247 Err(_) => return Ok(WlResp::InvalidId),
1248 }
1249 }
1250 VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU
1251 | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_FENCE
1252 | VIRTIO_WL_CTRL_VFD_SEND_KIND_VIRTGPU_SIGNALED_FENCE => {
1253 let _ = self.resource_bridge.as_ref();
1254 warn!("attempt to send foreign resource kind but feature is disabled");
1255 }
1256 kind => {
1257 warn!("attempt to send unknown foreign resource kind: {}", kind);
1258 return Ok(WlResp::InvalidId);
1259 }
1260 }
1261 }
1262
1263 // Final stage sends file descriptors and data to the target vfd's socket.
1264 match self.vfds.get_mut(&vfd_id) {
1265 Some(vfd) => match vfd.send(&rds[..vfd_count], reader)? {
1266 WlResp::Ok => {}
1267 _ => return Ok(WlResp::InvalidType),
1268 },
1269 None => return Ok(WlResp::InvalidId),
1270 }
1271 // The vfds with remote FDs need to be closed so that the local side can receive
1272 // hangup events.
1273 for &send_vfd_id in &send_vfd_ids[..vfd_count] {
1274 if send_vfd_id.kind == VIRTIO_WL_CTRL_VFD_SEND_KIND_LOCAL {
1275 if let Some(vfd) = self.vfds.get_mut(&send_vfd_id.id().into()) {
1276 vfd.close_remote();
1277 }
1278 }
1279 }
1280 Ok(WlResp::Ok)
1281 }
1282
recv(&mut self, vfd_id: u32) -> WlResult<()>1283 fn recv(&mut self, vfd_id: u32) -> WlResult<()> {
1284 let buf = match self.vfds.get_mut(&vfd_id) {
1285 Some(vfd) => {
1286 if vfd.is_fence {
1287 if let Err(e) = self.wait_ctx.delete(vfd.wait_descriptor().unwrap()) {
1288 warn!("failed to remove hungup vfd from poll context: {}", e);
1289 }
1290 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1291 return Ok(());
1292 } else {
1293 vfd.recv(&mut self.in_file_queue)?
1294 }
1295 }
1296 None => return Ok(()),
1297 };
1298
1299 if self.in_file_queue.is_empty() && buf.is_empty() {
1300 self.in_queue.push_back((vfd_id, WlRecv::Hup));
1301 return Ok(());
1302 }
1303 for file in self.in_file_queue.drain(..) {
1304 let vfd = WlVfd::from_file(self.vm.clone(), file)?;
1305 if let Some(wait_descriptor) = vfd.wait_descriptor() {
1306 self.wait_ctx
1307 .add(wait_descriptor, self.next_vfd_id)
1308 .map_err(WlError::WaitContextAdd)?;
1309 }
1310 self.vfds.insert(self.next_vfd_id, vfd);
1311 self.in_queue.push_back((
1312 vfd_id,
1313 WlRecv::Vfd {
1314 id: self.next_vfd_id,
1315 },
1316 ));
1317 self.next_vfd_id += 1;
1318 }
1319 self.in_queue.push_back((vfd_id, WlRecv::Data { buf }));
1320
1321 Ok(())
1322 }
1323
execute(&mut self, reader: &mut Reader) -> WlResult<WlResp>1324 fn execute(&mut self, reader: &mut Reader) -> WlResult<WlResp> {
1325 let type_ = {
1326 let mut type_reader = reader.clone();
1327 type_reader.read_obj::<Le32>().map_err(WlError::ParseDesc)?
1328 };
1329 match type_.into() {
1330 VIRTIO_WL_CMD_VFD_NEW => {
1331 let ctrl = reader
1332 .read_obj::<CtrlVfdNew>()
1333 .map_err(WlError::ParseDesc)?;
1334 self.new_alloc(ctrl.id.into(), ctrl.flags.into(), ctrl.size.into())
1335 }
1336 VIRTIO_WL_CMD_VFD_CLOSE => {
1337 let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1338 self.close(ctrl.id.into())
1339 }
1340 VIRTIO_WL_CMD_VFD_SEND => {
1341 let ctrl = reader
1342 .read_obj::<CtrlVfdSend>()
1343 .map_err(WlError::ParseDesc)?;
1344 let foreign_id = false;
1345 self.send(
1346 ctrl.id.into(),
1347 ctrl.vfd_count.to_native() as usize,
1348 foreign_id,
1349 reader,
1350 )
1351 }
1352 #[cfg(feature = "gpu")]
1353 VIRTIO_WL_CMD_VFD_SEND_FOREIGN_ID => {
1354 let ctrl = reader
1355 .read_obj::<CtrlVfdSend>()
1356 .map_err(WlError::ParseDesc)?;
1357 let foreign_id = true;
1358 self.send(
1359 ctrl.id.into(),
1360 ctrl.vfd_count.to_native() as usize,
1361 foreign_id,
1362 reader,
1363 )
1364 }
1365 VIRTIO_WL_CMD_VFD_NEW_CTX => {
1366 let ctrl = reader.read_obj::<CtrlVfd>().map_err(WlError::ParseDesc)?;
1367 self.new_context(ctrl.id.into(), "")
1368 }
1369 VIRTIO_WL_CMD_VFD_NEW_PIPE => {
1370 let ctrl = reader
1371 .read_obj::<CtrlVfdNew>()
1372 .map_err(WlError::ParseDesc)?;
1373 self.new_pipe(ctrl.id.into(), ctrl.flags.into())
1374 }
1375 #[cfg(feature = "minigbm")]
1376 VIRTIO_WL_CMD_VFD_NEW_DMABUF => {
1377 let ctrl = reader
1378 .read_obj::<CtrlVfdNewDmabuf>()
1379 .map_err(WlError::ParseDesc)?;
1380 self.new_dmabuf(
1381 ctrl.id.into(),
1382 ctrl.width.into(),
1383 ctrl.height.into(),
1384 ctrl.format.into(),
1385 )
1386 }
1387 #[cfg(feature = "minigbm")]
1388 VIRTIO_WL_CMD_VFD_DMABUF_SYNC => {
1389 let ctrl = reader
1390 .read_obj::<CtrlVfdDmabufSync>()
1391 .map_err(WlError::ParseDesc)?;
1392 self.dmabuf_sync(ctrl.id.into(), ctrl.flags.into())
1393 }
1394 VIRTIO_WL_CMD_VFD_NEW_CTX_NAMED => {
1395 let ctrl = reader
1396 .read_obj::<CtrlVfdNewCtxNamed>()
1397 .map_err(WlError::ParseDesc)?;
1398 let name_len = ctrl
1399 .name
1400 .iter()
1401 .position(|x| x == &0)
1402 .unwrap_or(ctrl.name.len());
1403 let name =
1404 std::str::from_utf8(&ctrl.name[..name_len]).map_err(WlError::InvalidString)?;
1405 self.new_context(ctrl.id.into(), name)
1406 }
1407 op_type => {
1408 warn!("unexpected command {}", op_type);
1409 Ok(WlResp::InvalidCommand)
1410 }
1411 }
1412 }
1413
next_recv(&self) -> Option<WlResp>1414 fn next_recv(&self) -> Option<WlResp> {
1415 if let Some(q) = self.in_queue.front() {
1416 match *q {
1417 (vfd_id, WlRecv::Vfd { id }) => {
1418 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1419 match self.vfds.get(&id) {
1420 Some(vfd) => Some(WlResp::VfdNew {
1421 id,
1422 flags: vfd.flags(self.use_transition_flags),
1423 pfn: vfd.pfn().unwrap_or_default(),
1424 size: vfd.size().unwrap_or_default() as u32,
1425 resp: false,
1426 }),
1427 _ => Some(WlResp::VfdNew {
1428 id,
1429 flags: 0,
1430 pfn: 0,
1431 size: 0,
1432 resp: false,
1433 }),
1434 }
1435 } else {
1436 Some(WlResp::VfdRecv {
1437 id: self.current_recv_vfd.unwrap(),
1438 data: &[],
1439 vfds: &self.recv_vfds[..],
1440 })
1441 }
1442 }
1443 (vfd_id, WlRecv::Data { ref buf }) => {
1444 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1445 Some(WlResp::VfdRecv {
1446 id: vfd_id,
1447 data: &buf[..],
1448 vfds: &self.recv_vfds[..],
1449 })
1450 } else {
1451 Some(WlResp::VfdRecv {
1452 id: self.current_recv_vfd.unwrap(),
1453 data: &[],
1454 vfds: &self.recv_vfds[..],
1455 })
1456 }
1457 }
1458 (vfd_id, WlRecv::Hup) => Some(WlResp::VfdHup { id: vfd_id }),
1459 }
1460 } else {
1461 None
1462 }
1463 }
1464
pop_recv(&mut self)1465 fn pop_recv(&mut self) {
1466 if let Some(q) = self.in_queue.front() {
1467 match *q {
1468 (vfd_id, WlRecv::Vfd { id }) => {
1469 if self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id) {
1470 self.recv_vfds.push(id);
1471 self.current_recv_vfd = Some(vfd_id);
1472 } else {
1473 self.recv_vfds.clear();
1474 self.current_recv_vfd = None;
1475 return;
1476 }
1477 }
1478 (vfd_id, WlRecv::Data { .. }) => {
1479 self.recv_vfds.clear();
1480 self.current_recv_vfd = None;
1481 if !(self.current_recv_vfd.is_none() || self.current_recv_vfd == Some(vfd_id)) {
1482 return;
1483 }
1484 }
1485 (_, WlRecv::Hup) => {
1486 self.recv_vfds.clear();
1487 self.current_recv_vfd = None;
1488 }
1489 }
1490 }
1491 self.in_queue.pop_front();
1492 }
1493 }
1494
1495 #[derive(ThisError, Debug)]
1496 #[error("no descriptors available in queue")]
1497 pub struct DescriptorsExhausted;
1498
1499 /// Handle incoming events and forward them to the VM over the input queue.
process_in_queue<I: SignalableInterrupt>( interrupt: &I, in_queue: &mut Queue, mem: &GuestMemory, state: &mut WlState, ) -> ::std::result::Result<(), DescriptorsExhausted>1500 pub fn process_in_queue<I: SignalableInterrupt>(
1501 interrupt: &I,
1502 in_queue: &mut Queue,
1503 mem: &GuestMemory,
1504 state: &mut WlState,
1505 ) -> ::std::result::Result<(), DescriptorsExhausted> {
1506 const MIN_IN_DESC_LEN: u32 =
1507 (size_of::<CtrlVfdRecv>() + size_of::<Le32>() * VIRTWL_SEND_MAX_ALLOCS) as u32;
1508
1509 state.process_wait_context();
1510
1511 let mut needs_interrupt = false;
1512 let mut exhausted_queue = false;
1513 loop {
1514 let desc = if let Some(d) = in_queue.peek(mem) {
1515 d
1516 } else {
1517 exhausted_queue = true;
1518 break;
1519 };
1520 if desc.len < MIN_IN_DESC_LEN || desc.is_read_only() {
1521 needs_interrupt = true;
1522 in_queue.pop_peeked(mem);
1523 in_queue.add_used(mem, desc.index, 0);
1524 continue;
1525 }
1526
1527 let index = desc.index;
1528 let mut should_pop = false;
1529 if let Some(in_resp) = state.next_recv() {
1530 let bytes_written = match Writer::new(mem.clone(), desc) {
1531 Ok(mut writer) => {
1532 match encode_resp(&mut writer, in_resp) {
1533 Ok(()) => {
1534 should_pop = true;
1535 }
1536 Err(e) => {
1537 error!("failed to encode response to descriptor chain: {}", e);
1538 }
1539 };
1540 writer.bytes_written() as u32
1541 }
1542 Err(e) => {
1543 error!("invalid descriptor: {}", e);
1544 0
1545 }
1546 };
1547
1548 needs_interrupt = true;
1549 in_queue.pop_peeked(mem);
1550 in_queue.add_used(mem, index, bytes_written);
1551 } else {
1552 break;
1553 }
1554 if should_pop {
1555 state.pop_recv();
1556 }
1557 }
1558
1559 if needs_interrupt {
1560 in_queue.trigger_interrupt(mem, interrupt);
1561 }
1562
1563 if exhausted_queue {
1564 Err(DescriptorsExhausted)
1565 } else {
1566 Ok(())
1567 }
1568 }
1569
1570 /// Handle messages from the output queue and forward them to the display sever, if necessary.
process_out_queue<I: SignalableInterrupt>( interrupt: &I, out_queue: &mut Queue, mem: &GuestMemory, state: &mut WlState, )1571 pub fn process_out_queue<I: SignalableInterrupt>(
1572 interrupt: &I,
1573 out_queue: &mut Queue,
1574 mem: &GuestMemory,
1575 state: &mut WlState,
1576 ) {
1577 let mut needs_interrupt = false;
1578 while let Some(desc) = out_queue.pop(mem) {
1579 let desc_index = desc.index;
1580 match (
1581 Reader::new(mem.clone(), desc.clone()),
1582 Writer::new(mem.clone(), desc),
1583 ) {
1584 (Ok(mut reader), Ok(mut writer)) => {
1585 let resp = match state.execute(&mut reader) {
1586 Ok(r) => r,
1587 Err(e) => WlResp::Err(Box::new(e)),
1588 };
1589
1590 match encode_resp(&mut writer, resp) {
1591 Ok(()) => {}
1592 Err(e) => {
1593 error!("failed to encode response to descriptor chain: {}", e);
1594 }
1595 }
1596
1597 out_queue.add_used(mem, desc_index, writer.bytes_written() as u32);
1598 needs_interrupt = true;
1599 }
1600 (_, Err(e)) | (Err(e), _) => {
1601 error!("invalid descriptor: {}", e);
1602 out_queue.add_used(mem, desc_index, 0);
1603 needs_interrupt = true;
1604 }
1605 }
1606 }
1607
1608 if needs_interrupt {
1609 out_queue.trigger_interrupt(mem, interrupt);
1610 }
1611 }
1612
1613 struct Worker {
1614 interrupt: Interrupt,
1615 mem: GuestMemory,
1616 in_queue: Queue,
1617 out_queue: Queue,
1618 state: WlState,
1619 }
1620
1621 impl Worker {
new( mem: GuestMemory, interrupt: Interrupt, in_queue: Queue, out_queue: Queue, wayland_paths: Map<String, PathBuf>, vm_tube: Tube, use_transition_flags: bool, use_send_vfd_v2: bool, resource_bridge: Option<Tube>, ) -> Worker1622 fn new(
1623 mem: GuestMemory,
1624 interrupt: Interrupt,
1625 in_queue: Queue,
1626 out_queue: Queue,
1627 wayland_paths: Map<String, PathBuf>,
1628 vm_tube: Tube,
1629 use_transition_flags: bool,
1630 use_send_vfd_v2: bool,
1631 resource_bridge: Option<Tube>,
1632 ) -> Worker {
1633 Worker {
1634 interrupt,
1635 mem,
1636 in_queue,
1637 out_queue,
1638 state: WlState::new(
1639 wayland_paths,
1640 vm_tube,
1641 use_transition_flags,
1642 use_send_vfd_v2,
1643 resource_bridge,
1644 ),
1645 }
1646 }
1647
run(&mut self, mut queue_evts: Vec<Event>, kill_evt: Event)1648 fn run(&mut self, mut queue_evts: Vec<Event>, kill_evt: Event) {
1649 let in_queue_evt = queue_evts.remove(0);
1650 let out_queue_evt = queue_evts.remove(0);
1651 #[derive(PollToken)]
1652 enum Token {
1653 InQueue,
1654 OutQueue,
1655 Kill,
1656 State,
1657 InterruptResample,
1658 }
1659
1660 let wait_ctx: WaitContext<Token> = match WaitContext::build_with(&[
1661 (&in_queue_evt, Token::InQueue),
1662 (&out_queue_evt, Token::OutQueue),
1663 (&kill_evt, Token::Kill),
1664 (&self.state.wait_ctx, Token::State),
1665 ]) {
1666 Ok(pc) => pc,
1667 Err(e) => {
1668 error!("failed creating WaitContext: {}", e);
1669 return;
1670 }
1671 };
1672 if let Some(resample_evt) = self.interrupt.get_resample_evt() {
1673 if wait_ctx
1674 .add(resample_evt, Token::InterruptResample)
1675 .is_err()
1676 {
1677 error!("failed adding resample event to WaitContext.");
1678 return;
1679 }
1680 }
1681
1682 let mut watching_state_ctx = true;
1683 'wait: loop {
1684 let events = match wait_ctx.wait() {
1685 Ok(v) => v,
1686 Err(e) => {
1687 error!("failed polling for events: {}", e);
1688 break;
1689 }
1690 };
1691
1692 for event in &events {
1693 match event.token {
1694 Token::InQueue => {
1695 let _ = in_queue_evt.read();
1696 if !watching_state_ctx {
1697 if let Err(e) =
1698 wait_ctx.modify(&self.state.wait_ctx, EventType::Read, Token::State)
1699 {
1700 error!("Failed to modify wait_ctx descriptor for WlState: {}", e);
1701 break;
1702 }
1703 watching_state_ctx = true;
1704 }
1705 }
1706 Token::OutQueue => {
1707 let _ = out_queue_evt.read();
1708 process_out_queue(
1709 &self.interrupt,
1710 &mut self.out_queue,
1711 &self.mem,
1712 &mut self.state,
1713 );
1714 }
1715 Token::Kill => break 'wait,
1716 Token::State => {
1717 if let Err(DescriptorsExhausted) = process_in_queue(
1718 &self.interrupt,
1719 &mut self.in_queue,
1720 &self.mem,
1721 &mut self.state,
1722 ) {
1723 if let Err(e) =
1724 wait_ctx.modify(&self.state.wait_ctx, EventType::None, Token::State)
1725 {
1726 error!(
1727 "Failed to stop watching wait_ctx descriptor for WlState: {}",
1728 e
1729 );
1730 break;
1731 }
1732 watching_state_ctx = false;
1733 }
1734 }
1735 Token::InterruptResample => {
1736 self.interrupt.interrupt_resample();
1737 }
1738 }
1739 }
1740 }
1741 }
1742 }
1743
1744 pub struct Wl {
1745 kill_evt: Option<Event>,
1746 worker_thread: Option<thread::JoinHandle<()>>,
1747 wayland_paths: Map<String, PathBuf>,
1748 vm_socket: Option<Tube>,
1749 resource_bridge: Option<Tube>,
1750 use_transition_flags: bool,
1751 use_send_vfd_v2: bool,
1752 base_features: u64,
1753 }
1754
1755 impl Wl {
new( base_features: u64, wayland_paths: Map<String, PathBuf>, vm_tube: Tube, resource_bridge: Option<Tube>, ) -> Result<Wl>1756 pub fn new(
1757 base_features: u64,
1758 wayland_paths: Map<String, PathBuf>,
1759 vm_tube: Tube,
1760 resource_bridge: Option<Tube>,
1761 ) -> Result<Wl> {
1762 Ok(Wl {
1763 kill_evt: None,
1764 worker_thread: None,
1765 wayland_paths,
1766 vm_socket: Some(vm_tube),
1767 resource_bridge,
1768 use_transition_flags: false,
1769 use_send_vfd_v2: false,
1770 base_features,
1771 })
1772 }
1773 }
1774
1775 impl Drop for Wl {
drop(&mut self)1776 fn drop(&mut self) {
1777 if let Some(kill_evt) = self.kill_evt.take() {
1778 // Ignore the result because there is nothing we can do about it.
1779 let _ = kill_evt.write(1);
1780 }
1781
1782 if let Some(worker_thread) = self.worker_thread.take() {
1783 let _ = worker_thread.join();
1784 }
1785 }
1786 }
1787
1788 impl VirtioDevice for Wl {
keep_rds(&self) -> Vec<RawDescriptor>1789 fn keep_rds(&self) -> Vec<RawDescriptor> {
1790 let mut keep_rds = Vec::new();
1791
1792 if let Some(vm_socket) = &self.vm_socket {
1793 keep_rds.push(vm_socket.as_raw_descriptor());
1794 }
1795 if let Some(resource_bridge) = &self.resource_bridge {
1796 keep_rds.push(resource_bridge.as_raw_descriptor());
1797 }
1798
1799 keep_rds
1800 }
1801
device_type(&self) -> u321802 fn device_type(&self) -> u32 {
1803 TYPE_WL
1804 }
1805
queue_max_sizes(&self) -> &[u16]1806 fn queue_max_sizes(&self) -> &[u16] {
1807 QUEUE_SIZES
1808 }
1809
features(&self) -> u641810 fn features(&self) -> u64 {
1811 self.base_features | 1 << VIRTIO_WL_F_TRANS_FLAGS | 1 << VIRTIO_WL_F_SEND_FENCES
1812 }
1813
ack_features(&mut self, value: u64)1814 fn ack_features(&mut self, value: u64) {
1815 if value & (1 << VIRTIO_WL_F_TRANS_FLAGS) != 0 {
1816 self.use_transition_flags = true;
1817 }
1818 if value & (1 << VIRTIO_WL_F_SEND_FENCES) != 0 {
1819 self.use_send_vfd_v2 = true;
1820 }
1821 }
1822
activate( &mut self, mem: GuestMemory, interrupt: Interrupt, mut queues: Vec<Queue>, queue_evts: Vec<Event>, )1823 fn activate(
1824 &mut self,
1825 mem: GuestMemory,
1826 interrupt: Interrupt,
1827 mut queues: Vec<Queue>,
1828 queue_evts: Vec<Event>,
1829 ) {
1830 if queues.len() != QUEUE_SIZES.len() || queue_evts.len() != QUEUE_SIZES.len() {
1831 return;
1832 }
1833
1834 let (self_kill_evt, kill_evt) = match Event::new().and_then(|e| Ok((e.try_clone()?, e))) {
1835 Ok(v) => v,
1836 Err(e) => {
1837 error!("failed creating kill Event pair: {}", e);
1838 return;
1839 }
1840 };
1841 self.kill_evt = Some(self_kill_evt);
1842
1843 if let Some(vm_socket) = self.vm_socket.take() {
1844 let wayland_paths = self.wayland_paths.clone();
1845 let use_transition_flags = self.use_transition_flags;
1846 let use_send_vfd_v2 = self.use_send_vfd_v2;
1847 let resource_bridge = self.resource_bridge.take();
1848 let worker_result =
1849 thread::Builder::new()
1850 .name("virtio_wl".to_string())
1851 .spawn(move || {
1852 Worker::new(
1853 mem,
1854 interrupt,
1855 queues.remove(0),
1856 queues.remove(0),
1857 wayland_paths,
1858 vm_socket,
1859 use_transition_flags,
1860 use_send_vfd_v2,
1861 resource_bridge,
1862 )
1863 .run(queue_evts, kill_evt);
1864 });
1865
1866 match worker_result {
1867 Err(e) => {
1868 error!("failed to spawn virtio_wl worker: {}", e);
1869 return;
1870 }
1871 Ok(join_handle) => {
1872 self.worker_thread = Some(join_handle);
1873 }
1874 }
1875 }
1876 }
1877 }
1878