1 // Copyright 2018 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 mod edid;
6 mod parameters;
7 mod protocol;
8 mod virtio_gpu;
9
10 use std::cell::RefCell;
11 use std::collections::BTreeMap;
12 use std::collections::VecDeque;
13 use std::io::Read;
14 use std::path::PathBuf;
15 use std::rc::Rc;
16 use std::sync::atomic::AtomicBool;
17 use std::sync::atomic::Ordering;
18 use std::sync::Arc;
19
20 use anyhow::anyhow;
21 use anyhow::Context;
22 use base::debug;
23 use base::error;
24 #[cfg(unix)]
25 use base::platform::move_task_to_cgroup;
26 use base::warn;
27 use base::AsRawDescriptor;
28 use base::Event;
29 use base::EventToken;
30 use base::RawDescriptor;
31 use base::Result;
32 use base::SafeDescriptor;
33 use base::SendTube;
34 use base::Tube;
35 use base::VmEventType;
36 use base::WaitContext;
37 use base::WorkerThread;
38 use data_model::*;
39 pub use gpu_display::EventDevice;
40 use gpu_display::*;
41 pub use parameters::GpuParameters;
42 use rutabaga_gfx::*;
43 use serde::Deserialize;
44 use serde::Serialize;
45 use sync::Mutex;
46 pub use vm_control::gpu::DisplayMode as GpuDisplayMode;
47 pub use vm_control::gpu::DisplayParameters as GpuDisplayParameters;
48 use vm_control::gpu::GpuControlCommand;
49 use vm_control::gpu::GpuControlResult;
50 pub use vm_control::gpu::DEFAULT_DISPLAY_HEIGHT;
51 pub use vm_control::gpu::DEFAULT_DISPLAY_WIDTH;
52 pub use vm_control::gpu::DEFAULT_REFRESH_RATE;
53 use vm_memory::GuestAddress;
54 use vm_memory::GuestMemory;
55 use zerocopy::AsBytes;
56
57 pub use self::protocol::virtio_gpu_config;
58 pub use self::protocol::VIRTIO_GPU_F_CONTEXT_INIT;
59 pub use self::protocol::VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
60 pub use self::protocol::VIRTIO_GPU_F_EDID;
61 pub use self::protocol::VIRTIO_GPU_F_RESOURCE_BLOB;
62 pub use self::protocol::VIRTIO_GPU_F_RESOURCE_SYNC;
63 pub use self::protocol::VIRTIO_GPU_F_RESOURCE_UUID;
64 pub use self::protocol::VIRTIO_GPU_F_VIRGL;
65 pub use self::protocol::VIRTIO_GPU_SHM_ID_HOST_VISIBLE;
66 use self::protocol::*;
67 pub use self::virtio_gpu::ProcessDisplayResult;
68 use self::virtio_gpu::VirtioGpu;
69 use super::copy_config;
70 pub use super::device_constants::gpu::QUEUE_SIZES;
71 use super::resource_bridge::ResourceRequest;
72 use super::resource_bridge::ResourceResponse;
73 use super::DescriptorChain;
74 use super::DeviceType;
75 use super::Interrupt;
76 use super::Queue;
77 use super::Reader;
78 use super::SharedMemoryMapper;
79 use super::SharedMemoryRegion;
80 use super::SignalableInterrupt;
81 use super::VirtioDevice;
82 use super::Writer;
83 use crate::Suspendable;
84
85 #[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
86 pub enum GpuMode {
87 #[serde(rename = "2d", alias = "2D")]
88 Mode2D,
89 #[cfg(feature = "virgl_renderer")]
90 #[serde(rename = "virglrenderer", alias = "3d", alias = "3D")]
91 ModeVirglRenderer,
92 #[cfg(feature = "gfxstream")]
93 #[serde(rename = "gfxstream")]
94 ModeGfxstream,
95 }
96
97 impl Default for GpuMode {
default() -> Self98 fn default() -> Self {
99 #[cfg(all(windows, feature = "gfxstream"))]
100 return GpuMode::ModeGfxstream;
101
102 #[cfg(all(unix, feature = "virgl_renderer"))]
103 return GpuMode::ModeVirglRenderer;
104
105 #[cfg(not(any(
106 all(windows, feature = "gfxstream"),
107 all(unix, feature = "virgl_renderer"),
108 )))]
109 return GpuMode::Mode2D;
110 }
111 }
112
113 #[derive(Copy, Clone, Debug)]
114 pub struct VirtioScanoutBlobData {
115 pub width: u32,
116 pub height: u32,
117 pub drm_format: DrmFormat,
118 pub strides: [u32; 4],
119 pub offsets: [u32; 4],
120 }
121
122 #[derive(PartialEq, Eq, PartialOrd, Ord)]
123 enum VirtioGpuRing {
124 Global,
125 ContextSpecific { ctx_id: u32, ring_idx: u8 },
126 }
127
128 struct FenceDescriptor {
129 ring: VirtioGpuRing,
130 fence_id: u64,
131 index: u16,
132 len: u32,
133 }
134
135 #[derive(Default)]
136 pub struct FenceState {
137 descs: Vec<FenceDescriptor>,
138 completed_fences: BTreeMap<VirtioGpuRing, u64>,
139 }
140
141 pub trait QueueReader {
pop(&self, mem: &GuestMemory) -> Option<DescriptorChain>142 fn pop(&self, mem: &GuestMemory) -> Option<DescriptorChain>;
add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32)143 fn add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32);
signal_used(&self, mem: &GuestMemory)144 fn signal_used(&self, mem: &GuestMemory);
145 }
146
147 struct LocalQueueReader {
148 queue: RefCell<Queue>,
149 interrupt: Interrupt,
150 }
151
152 impl LocalQueueReader {
new(queue: Queue, interrupt: Interrupt) -> Self153 fn new(queue: Queue, interrupt: Interrupt) -> Self {
154 Self {
155 queue: RefCell::new(queue),
156 interrupt,
157 }
158 }
159 }
160
161 impl QueueReader for LocalQueueReader {
pop(&self, mem: &GuestMemory) -> Option<DescriptorChain>162 fn pop(&self, mem: &GuestMemory) -> Option<DescriptorChain> {
163 self.queue.borrow_mut().pop(mem)
164 }
165
add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32)166 fn add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32) {
167 self.queue.borrow_mut().add_used(mem, desc_index, len)
168 }
169
signal_used(&self, mem: &GuestMemory)170 fn signal_used(&self, mem: &GuestMemory) {
171 self.queue
172 .borrow_mut()
173 .trigger_interrupt(mem, &self.interrupt);
174 }
175 }
176
177 #[derive(Clone)]
178 struct SharedQueueReader {
179 queue: Arc<Mutex<Queue>>,
180 interrupt: Interrupt,
181 }
182
183 impl SharedQueueReader {
new(queue: Queue, interrupt: Interrupt) -> Self184 fn new(queue: Queue, interrupt: Interrupt) -> Self {
185 Self {
186 queue: Arc::new(Mutex::new(queue)),
187 interrupt,
188 }
189 }
190 }
191
192 impl QueueReader for SharedQueueReader {
pop(&self, mem: &GuestMemory) -> Option<DescriptorChain>193 fn pop(&self, mem: &GuestMemory) -> Option<DescriptorChain> {
194 self.queue.lock().pop(mem)
195 }
196
add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32)197 fn add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32) {
198 self.queue.lock().add_used(mem, desc_index, len)
199 }
200
signal_used(&self, mem: &GuestMemory)201 fn signal_used(&self, mem: &GuestMemory) {
202 self.queue.lock().trigger_interrupt(mem, &self.interrupt);
203 }
204 }
205
206 /// Initializes the virtio_gpu state tracker.
build( display_backends: &[DisplayBackend], display_params: Vec<GpuDisplayParameters>, display_event: Arc<AtomicBool>, rutabaga_builder: RutabagaBuilder, event_devices: Vec<EventDevice>, mapper: Box<dyn SharedMemoryMapper>, external_blob: bool, #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>, udmabuf: bool, fence_handler: RutabagaFenceHandler, rutabaga_server_descriptor: Option<SafeDescriptor>, ) -> Option<VirtioGpu>207 fn build(
208 display_backends: &[DisplayBackend],
209 display_params: Vec<GpuDisplayParameters>,
210 display_event: Arc<AtomicBool>,
211 rutabaga_builder: RutabagaBuilder,
212 event_devices: Vec<EventDevice>,
213 mapper: Box<dyn SharedMemoryMapper>,
214 external_blob: bool,
215 #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>,
216 udmabuf: bool,
217 fence_handler: RutabagaFenceHandler,
218 rutabaga_server_descriptor: Option<SafeDescriptor>,
219 ) -> Option<VirtioGpu> {
220 let mut display_opt = None;
221 for display_backend in display_backends {
222 match display_backend.build(
223 #[cfg(windows)]
224 wndproc_thread,
225 ) {
226 Ok(c) => {
227 display_opt = Some(c);
228 break;
229 }
230 Err(e) => error!("failed to open display: {}", e),
231 };
232 }
233
234 let display = match display_opt {
235 Some(d) => d,
236 None => {
237 error!("failed to open any displays");
238 return None;
239 }
240 };
241
242 VirtioGpu::new(
243 display,
244 display_params,
245 display_event,
246 rutabaga_builder,
247 event_devices,
248 mapper,
249 external_blob,
250 udmabuf,
251 fence_handler,
252 rutabaga_server_descriptor,
253 )
254 }
255
256 /// Create a handler that writes into the completed fence queue
create_fence_handler<Q>( mem: GuestMemory, ctrl_queue: Q, fence_state: Arc<Mutex<FenceState>>, ) -> RutabagaFenceHandler where Q: QueueReader + Send + Clone + 'static,257 pub fn create_fence_handler<Q>(
258 mem: GuestMemory,
259 ctrl_queue: Q,
260 fence_state: Arc<Mutex<FenceState>>,
261 ) -> RutabagaFenceHandler
262 where
263 Q: QueueReader + Send + Clone + 'static,
264 {
265 RutabagaFenceClosure::new(move |completed_fence| {
266 let mut signal = false;
267
268 {
269 let ring = match completed_fence.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
270 0 => VirtioGpuRing::Global,
271 _ => VirtioGpuRing::ContextSpecific {
272 ctx_id: completed_fence.ctx_id,
273 ring_idx: completed_fence.ring_idx,
274 },
275 };
276
277 let mut fence_state = fence_state.lock();
278 fence_state.descs.retain(|f_desc| {
279 if f_desc.ring == ring && f_desc.fence_id <= completed_fence.fence_id {
280 ctrl_queue.add_used(&mem, f_desc.index, f_desc.len);
281 signal = true;
282 return false;
283 }
284 true
285 });
286 // Update the last completed fence for this context
287 fence_state
288 .completed_fences
289 .insert(ring, completed_fence.fence_id);
290 }
291
292 if signal {
293 ctrl_queue.signal_used(&mem);
294 }
295 })
296 }
297
298 pub struct ReturnDescriptor {
299 pub index: u16,
300 pub len: u32,
301 }
302
303 pub struct Frontend {
304 fence_state: Arc<Mutex<FenceState>>,
305 return_cursor_descriptors: VecDeque<ReturnDescriptor>,
306 virtio_gpu: VirtioGpu,
307 }
308
309 impl Frontend {
new(virtio_gpu: VirtioGpu, fence_state: Arc<Mutex<FenceState>>) -> Frontend310 fn new(virtio_gpu: VirtioGpu, fence_state: Arc<Mutex<FenceState>>) -> Frontend {
311 Frontend {
312 fence_state,
313 return_cursor_descriptors: Default::default(),
314 virtio_gpu,
315 }
316 }
317
318 /// Returns the internal connection to the compositor and its associated state.
display(&mut self) -> &Rc<RefCell<GpuDisplay>>319 pub fn display(&mut self) -> &Rc<RefCell<GpuDisplay>> {
320 self.virtio_gpu.display()
321 }
322
323 /// Processes the internal `display` events and returns `true` if any display was closed.
process_display(&mut self) -> ProcessDisplayResult324 pub fn process_display(&mut self) -> ProcessDisplayResult {
325 self.virtio_gpu.process_display()
326 }
327
328 /// Processes incoming requests on `resource_bridge`.
process_resource_bridge(&mut self, resource_bridge: &Tube) -> anyhow::Result<()>329 pub fn process_resource_bridge(&mut self, resource_bridge: &Tube) -> anyhow::Result<()> {
330 let response = match resource_bridge.recv() {
331 Ok(ResourceRequest::GetBuffer { id }) => self.virtio_gpu.export_resource(id),
332 Ok(ResourceRequest::GetFence { seqno }) => {
333 // The seqno originated from self.backend, so it should fit in a u32.
334 match u32::try_from(seqno) {
335 Ok(fence_id) => self.virtio_gpu.export_fence(fence_id),
336 Err(_) => ResourceResponse::Invalid,
337 }
338 }
339 Err(e) => return Err(e).context("Error receiving resource bridge request"),
340 };
341
342 resource_bridge
343 .send(&response)
344 .context("Error sending resource bridge response")?;
345
346 Ok(())
347 }
348
349 /// Processes the GPU control command and returns the result with a bool indicating if the
350 /// GPU device's config needs to be updated.
process_gpu_control_command(&mut self, cmd: GpuControlCommand) -> GpuControlResult351 pub fn process_gpu_control_command(&mut self, cmd: GpuControlCommand) -> GpuControlResult {
352 self.virtio_gpu.process_gpu_control_command(cmd)
353 }
354
process_gpu_command( &mut self, mem: &GuestMemory, cmd: GpuCommand, reader: &mut Reader, ) -> VirtioGpuResult355 fn process_gpu_command(
356 &mut self,
357 mem: &GuestMemory,
358 cmd: GpuCommand,
359 reader: &mut Reader,
360 ) -> VirtioGpuResult {
361 self.virtio_gpu.force_ctx_0();
362
363 match cmd {
364 GpuCommand::GetDisplayInfo(_) => Ok(GpuResponse::OkDisplayInfo(
365 self.virtio_gpu.display_info().to_vec(),
366 )),
367 GpuCommand::ResourceCreate2d(info) => {
368 let resource_id = info.resource_id.to_native();
369
370 let resource_create_3d = ResourceCreate3D {
371 target: RUTABAGA_PIPE_TEXTURE_2D,
372 format: info.format.to_native(),
373 bind: RUTABAGA_PIPE_BIND_RENDER_TARGET,
374 width: info.width.to_native(),
375 height: info.height.to_native(),
376 depth: 1,
377 array_size: 1,
378 last_level: 0,
379 nr_samples: 0,
380 flags: 0,
381 };
382
383 self.virtio_gpu
384 .resource_create_3d(resource_id, resource_create_3d)
385 }
386 GpuCommand::ResourceUnref(info) => {
387 self.virtio_gpu.unref_resource(info.resource_id.to_native())
388 }
389 GpuCommand::SetScanout(info) => self.virtio_gpu.set_scanout(
390 info.scanout_id.to_native(),
391 info.resource_id.to_native(),
392 None,
393 ),
394 GpuCommand::ResourceFlush(info) => {
395 self.virtio_gpu.flush_resource(info.resource_id.to_native())
396 }
397 GpuCommand::TransferToHost2d(info) => {
398 let resource_id = info.resource_id.to_native();
399 let transfer = Transfer3D::new_2d(
400 info.r.x.to_native(),
401 info.r.y.to_native(),
402 info.r.width.to_native(),
403 info.r.height.to_native(),
404 );
405 self.virtio_gpu.transfer_write(0, resource_id, transfer)
406 }
407 GpuCommand::ResourceAttachBacking(info) => {
408 let available_bytes = reader.available_bytes();
409 if available_bytes != 0 {
410 let entry_count = info.nr_entries.to_native() as usize;
411 let mut vecs = Vec::with_capacity(entry_count);
412 for _ in 0..entry_count {
413 match reader.read_obj::<virtio_gpu_mem_entry>() {
414 Ok(entry) => {
415 let addr = GuestAddress(entry.addr.to_native());
416 let len = entry.length.to_native() as usize;
417 vecs.push((addr, len))
418 }
419 Err(_) => return Err(GpuResponse::ErrUnspec),
420 }
421 }
422 self.virtio_gpu
423 .attach_backing(info.resource_id.to_native(), mem, vecs)
424 } else {
425 error!("missing data for command {:?}", cmd);
426 Err(GpuResponse::ErrUnspec)
427 }
428 }
429 GpuCommand::ResourceDetachBacking(info) => {
430 self.virtio_gpu.detach_backing(info.resource_id.to_native())
431 }
432 GpuCommand::UpdateCursor(info) => self.virtio_gpu.update_cursor(
433 info.resource_id.to_native(),
434 info.pos.scanout_id.to_native(),
435 info.pos.x.into(),
436 info.pos.y.into(),
437 ),
438 GpuCommand::MoveCursor(info) => self.virtio_gpu.move_cursor(
439 info.pos.scanout_id.to_native(),
440 info.pos.x.into(),
441 info.pos.y.into(),
442 ),
443 GpuCommand::ResourceAssignUuid(info) => {
444 let resource_id = info.resource_id.to_native();
445 self.virtio_gpu.resource_assign_uuid(resource_id)
446 }
447 GpuCommand::GetCapsetInfo(info) => self
448 .virtio_gpu
449 .get_capset_info(info.capset_index.to_native()),
450 GpuCommand::GetCapset(info) => self
451 .virtio_gpu
452 .get_capset(info.capset_id.to_native(), info.capset_version.to_native()),
453 GpuCommand::CtxCreate(info) => {
454 let context_name: Option<String> = String::from_utf8(info.debug_name.to_vec()).ok();
455 self.virtio_gpu.create_context(
456 info.hdr.ctx_id.to_native(),
457 info.context_init.to_native(),
458 context_name.as_deref(),
459 )
460 }
461 GpuCommand::CtxDestroy(info) => {
462 self.virtio_gpu.destroy_context(info.hdr.ctx_id.to_native())
463 }
464 GpuCommand::CtxAttachResource(info) => self
465 .virtio_gpu
466 .context_attach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
467 GpuCommand::CtxDetachResource(info) => self
468 .virtio_gpu
469 .context_detach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
470 GpuCommand::ResourceCreate3d(info) => {
471 let resource_id = info.resource_id.to_native();
472 let resource_create_3d = ResourceCreate3D {
473 target: info.target.to_native(),
474 format: info.format.to_native(),
475 bind: info.bind.to_native(),
476 width: info.width.to_native(),
477 height: info.height.to_native(),
478 depth: info.depth.to_native(),
479 array_size: info.array_size.to_native(),
480 last_level: info.last_level.to_native(),
481 nr_samples: info.nr_samples.to_native(),
482 flags: info.flags.to_native(),
483 };
484
485 self.virtio_gpu
486 .resource_create_3d(resource_id, resource_create_3d)
487 }
488 GpuCommand::TransferToHost3d(info) => {
489 let ctx_id = info.hdr.ctx_id.to_native();
490 let resource_id = info.resource_id.to_native();
491
492 let transfer = Transfer3D {
493 x: info.box_.x.to_native(),
494 y: info.box_.y.to_native(),
495 z: info.box_.z.to_native(),
496 w: info.box_.w.to_native(),
497 h: info.box_.h.to_native(),
498 d: info.box_.d.to_native(),
499 level: info.level.to_native(),
500 stride: info.stride.to_native(),
501 layer_stride: info.layer_stride.to_native(),
502 offset: info.offset.to_native(),
503 };
504
505 self.virtio_gpu
506 .transfer_write(ctx_id, resource_id, transfer)
507 }
508 GpuCommand::TransferFromHost3d(info) => {
509 let ctx_id = info.hdr.ctx_id.to_native();
510 let resource_id = info.resource_id.to_native();
511
512 let transfer = Transfer3D {
513 x: info.box_.x.to_native(),
514 y: info.box_.y.to_native(),
515 z: info.box_.z.to_native(),
516 w: info.box_.w.to_native(),
517 h: info.box_.h.to_native(),
518 d: info.box_.d.to_native(),
519 level: info.level.to_native(),
520 stride: info.stride.to_native(),
521 layer_stride: info.layer_stride.to_native(),
522 offset: info.offset.to_native(),
523 };
524
525 self.virtio_gpu
526 .transfer_read(ctx_id, resource_id, transfer, None)
527 }
528 GpuCommand::CmdSubmit3d(info) => {
529 if reader.available_bytes() != 0 {
530 let cmd_size = info.size.to_native() as usize;
531 let mut cmd_buf = vec![0; cmd_size];
532 if reader.read_exact(&mut cmd_buf[..]).is_ok() {
533 self.virtio_gpu
534 .submit_command(info.hdr.ctx_id.to_native(), &mut cmd_buf[..])
535 } else {
536 Err(GpuResponse::ErrInvalidParameter)
537 }
538 } else {
539 // Silently accept empty command buffers to allow for
540 // benchmarking.
541 Ok(GpuResponse::OkNoData)
542 }
543 }
544 GpuCommand::ResourceCreateBlob(info) => {
545 let resource_id = info.resource_id.to_native();
546 let ctx_id = info.hdr.ctx_id.to_native();
547
548 let resource_create_blob = ResourceCreateBlob {
549 blob_mem: info.blob_mem.to_native(),
550 blob_flags: info.blob_flags.to_native(),
551 blob_id: info.blob_id.to_native(),
552 size: info.size.to_native(),
553 };
554
555 let entry_count = info.nr_entries.to_native();
556 if reader.available_bytes() == 0 && entry_count > 0 {
557 return Err(GpuResponse::ErrUnspec);
558 }
559
560 let mut vecs = Vec::with_capacity(entry_count as usize);
561 for _ in 0..entry_count {
562 match reader.read_obj::<virtio_gpu_mem_entry>() {
563 Ok(entry) => {
564 let addr = GuestAddress(entry.addr.to_native());
565 let len = entry.length.to_native() as usize;
566 vecs.push((addr, len))
567 }
568 Err(_) => return Err(GpuResponse::ErrUnspec),
569 }
570 }
571
572 self.virtio_gpu.resource_create_blob(
573 ctx_id,
574 resource_id,
575 resource_create_blob,
576 vecs,
577 mem,
578 )
579 }
580 GpuCommand::SetScanoutBlob(info) => {
581 let scanout_id = info.scanout_id.to_native();
582 let resource_id = info.resource_id.to_native();
583 let virtio_gpu_format = info.format.to_native();
584 let width = info.width.to_native();
585 let height = info.width.to_native();
586 let mut strides: [u32; 4] = [0; 4];
587 let mut offsets: [u32; 4] = [0; 4];
588
589 // As of v4.19, virtio-gpu kms only really uses these formats. If that changes,
590 // the following may have to change too.
591 let drm_format = match virtio_gpu_format {
592 VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM => DrmFormat::new(b'X', b'R', b'2', b'4'),
593 VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM => DrmFormat::new(b'A', b'R', b'2', b'4'),
594 _ => {
595 error!("unrecognized virtio-gpu format {}", virtio_gpu_format);
596 return Err(GpuResponse::ErrUnspec);
597 }
598 };
599
600 for plane_index in 0..PLANE_INFO_MAX_COUNT {
601 offsets[plane_index] = info.offsets[plane_index].to_native();
602 strides[plane_index] = info.strides[plane_index].to_native();
603 }
604
605 let scanout = VirtioScanoutBlobData {
606 width,
607 height,
608 drm_format,
609 strides,
610 offsets,
611 };
612
613 self.virtio_gpu
614 .set_scanout(scanout_id, resource_id, Some(scanout))
615 }
616 GpuCommand::ResourceMapBlob(info) => {
617 let resource_id = info.resource_id.to_native();
618 let offset = info.offset.to_native();
619 self.virtio_gpu.resource_map_blob(resource_id, offset)
620 }
621 GpuCommand::ResourceUnmapBlob(info) => {
622 let resource_id = info.resource_id.to_native();
623 self.virtio_gpu.resource_unmap_blob(resource_id)
624 }
625 GpuCommand::GetEdid(info) => self.virtio_gpu.get_edid(info.scanout.to_native()),
626 }
627 }
628
629 /// Processes virtio messages on `queue`.
process_queue(&mut self, mem: &GuestMemory, queue: &dyn QueueReader) -> bool630 pub fn process_queue(&mut self, mem: &GuestMemory, queue: &dyn QueueReader) -> bool {
631 let mut signal_used = false;
632 while let Some(desc) = queue.pop(mem) {
633 match (
634 Reader::new(mem.clone(), desc.clone()),
635 Writer::new(mem.clone(), desc.clone()),
636 ) {
637 (Ok(mut reader), Ok(mut writer)) => {
638 if let Some(ret_desc) =
639 self.process_descriptor(mem, desc.index, &mut reader, &mut writer)
640 {
641 queue.add_used(mem, ret_desc.index, ret_desc.len);
642 signal_used = true;
643 }
644 }
645 (_, Err(e)) | (Err(e), _) => {
646 debug!("invalid descriptor: {}", e);
647 queue.add_used(mem, desc.index, 0);
648 signal_used = true;
649 }
650 }
651 }
652
653 signal_used
654 }
655
process_descriptor( &mut self, mem: &GuestMemory, desc_index: u16, reader: &mut Reader, writer: &mut Writer, ) -> Option<ReturnDescriptor>656 fn process_descriptor(
657 &mut self,
658 mem: &GuestMemory,
659 desc_index: u16,
660 reader: &mut Reader,
661 writer: &mut Writer,
662 ) -> Option<ReturnDescriptor> {
663 let mut resp = Err(GpuResponse::ErrUnspec);
664 let mut gpu_cmd = None;
665 let mut len = 0;
666 match GpuCommand::decode(reader) {
667 Ok(cmd) => {
668 resp = self.process_gpu_command(mem, cmd, reader);
669 gpu_cmd = Some(cmd);
670 }
671 Err(e) => debug!("descriptor decode error: {}", e),
672 }
673
674 let mut gpu_response = match resp {
675 Ok(gpu_response) => gpu_response,
676 Err(gpu_response) => {
677 debug!("{:?} -> {:?}", gpu_cmd, gpu_response);
678 gpu_response
679 }
680 };
681
682 if writer.available_bytes() != 0 {
683 let mut fence_id = 0;
684 let mut ctx_id = 0;
685 let mut flags = 0;
686 let mut ring_idx = 0;
687 if let Some(cmd) = gpu_cmd {
688 let ctrl_hdr = cmd.ctrl_hdr();
689 if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
690 flags = ctrl_hdr.flags.to_native();
691 fence_id = ctrl_hdr.fence_id.to_native();
692 ctx_id = ctrl_hdr.ctx_id.to_native();
693 ring_idx = ctrl_hdr.ring_idx;
694
695 let fence = RutabagaFence {
696 flags,
697 fence_id,
698 ctx_id,
699 ring_idx,
700 };
701 gpu_response = match self.virtio_gpu.create_fence(fence) {
702 Ok(_) => gpu_response,
703 Err(fence_resp) => {
704 warn!("create_fence {} -> {:?}", fence_id, fence_resp);
705 fence_resp
706 }
707 };
708 }
709 }
710
711 // Prepare the response now, even if it is going to wait until
712 // fence is complete.
713 match gpu_response.encode(flags, fence_id, ctx_id, ring_idx, writer) {
714 Ok(l) => len = l,
715 Err(e) => debug!("ctrl queue response encode error: {}", e),
716 }
717
718 if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
719 let ring = match flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
720 0 => VirtioGpuRing::Global,
721 _ => VirtioGpuRing::ContextSpecific { ctx_id, ring_idx },
722 };
723
724 // In case the fence is signaled immediately after creation, don't add a return
725 // FenceDescriptor.
726 let mut fence_state = self.fence_state.lock();
727 if fence_id > *fence_state.completed_fences.get(&ring).unwrap_or(&0) {
728 fence_state.descs.push(FenceDescriptor {
729 ring,
730 fence_id,
731 index: desc_index,
732 len,
733 });
734
735 return None;
736 }
737 }
738
739 // No fence (or already completed fence), respond now.
740 }
741 Some(ReturnDescriptor {
742 index: desc_index,
743 len,
744 })
745 }
746
return_cursor(&mut self) -> Option<ReturnDescriptor>747 pub fn return_cursor(&mut self) -> Option<ReturnDescriptor> {
748 self.return_cursor_descriptors.pop_front()
749 }
750
event_poll(&self)751 pub fn event_poll(&self) {
752 self.virtio_gpu.event_poll();
753 }
754 }
755
756 #[derive(EventToken, PartialEq, Eq, Clone, Copy, Debug)]
757 enum WorkerToken {
758 CtrlQueue,
759 CursorQueue,
760 Display,
761 #[cfg(unix)]
762 GpuControl,
763 InterruptResample,
764 Kill,
765 ResourceBridge {
766 index: usize,
767 },
768 VirtioGpuPoll,
769 }
770
771 struct EventManager<'a> {
772 pub wait_ctx: WaitContext<WorkerToken>,
773 events: Vec<(&'a dyn AsRawDescriptor, WorkerToken)>,
774 }
775
776 impl<'a> EventManager<'a> {
new() -> Result<EventManager<'a>>777 pub fn new() -> Result<EventManager<'a>> {
778 Ok(EventManager {
779 wait_ctx: WaitContext::new()?,
780 events: vec![],
781 })
782 }
783
build_with( triggers: &[(&'a dyn AsRawDescriptor, WorkerToken)], ) -> Result<EventManager<'a>>784 pub fn build_with(
785 triggers: &[(&'a dyn AsRawDescriptor, WorkerToken)],
786 ) -> Result<EventManager<'a>> {
787 let mut manager = EventManager::new()?;
788 manager.wait_ctx.add_many(triggers)?;
789
790 for (descriptor, token) in triggers {
791 manager.events.push((*descriptor, *token));
792 }
793 Ok(manager)
794 }
795
add(&mut self, descriptor: &'a dyn AsRawDescriptor, token: WorkerToken) -> Result<()>796 pub fn add(&mut self, descriptor: &'a dyn AsRawDescriptor, token: WorkerToken) -> Result<()> {
797 self.wait_ctx.add(descriptor, token)?;
798 self.events.push((descriptor, token));
799 Ok(())
800 }
801
delete(&mut self, token: WorkerToken)802 pub fn delete(&mut self, token: WorkerToken) {
803 self.events.retain(|event| {
804 if event.1 == token {
805 self.wait_ctx.delete(event.0).ok();
806 return false;
807 }
808 true
809 });
810 }
811 }
812
813 struct Worker {
814 interrupt: Interrupt,
815 exit_evt_wrtube: SendTube,
816 #[cfg(unix)]
817 gpu_control_tube: Tube,
818 mem: GuestMemory,
819 ctrl_queue: SharedQueueReader,
820 ctrl_evt: Event,
821 cursor_queue: LocalQueueReader,
822 cursor_evt: Event,
823 resource_bridges: ResourceBridges,
824 kill_evt: Event,
825 state: Frontend,
826 }
827
828 impl Worker {
run(&mut self)829 fn run(&mut self) {
830 let display_desc =
831 match SafeDescriptor::try_from(&*self.state.display().borrow() as &dyn AsRawDescriptor)
832 {
833 Ok(v) => v,
834 Err(e) => {
835 error!("failed getting event descriptor for display: {}", e);
836 return;
837 }
838 };
839
840 let mut event_manager = match EventManager::build_with(&[
841 (&self.ctrl_evt, WorkerToken::CtrlQueue),
842 (&self.cursor_evt, WorkerToken::CursorQueue),
843 (&display_desc, WorkerToken::Display),
844 #[cfg(unix)]
845 (&self.gpu_control_tube, WorkerToken::GpuControl),
846 (&self.kill_evt, WorkerToken::Kill),
847 ]) {
848 Ok(v) => v,
849 Err(e) => {
850 error!("failed creating WaitContext: {}", e);
851 return;
852 }
853 };
854
855 if let Some(resample_evt) = self.interrupt.get_resample_evt() {
856 if let Err(e) = event_manager.add(resample_evt, WorkerToken::InterruptResample) {
857 error!(
858 "failed adding interrupt resample event to WaitContext: {}",
859 e
860 );
861 return;
862 }
863 }
864
865 let poll_desc: SafeDescriptor;
866 if let Some(desc) = self.state.virtio_gpu.poll_descriptor() {
867 poll_desc = desc;
868 if let Err(e) = event_manager.add(&poll_desc, WorkerToken::VirtioGpuPoll) {
869 error!("failed adding poll event to WaitContext: {}", e);
870 return;
871 }
872 }
873
874 self.resource_bridges
875 .add_to_wait_context(&mut event_manager.wait_ctx);
876
877 // TODO(davidriley): The entire main loop processing is somewhat racey and incorrect with
878 // respect to cursor vs control queue processing. As both currently and originally
879 // written, while the control queue is only processed/read from after the the cursor queue
880 // is finished, the entire queue will be processed at that time. The end effect of this
881 // racyiness is that control queue descriptors that are issued after cursors descriptors
882 // might be handled first instead of the other way around. In practice, the cursor queue
883 // isn't used so this isn't a huge issue.
884
885 'wait: loop {
886 let events = match event_manager.wait_ctx.wait() {
887 Ok(v) => v,
888 Err(e) => {
889 error!("failed polling for events: {}", e);
890 break;
891 }
892 };
893 let mut signal_used_cursor = false;
894 let mut signal_used_ctrl = false;
895 let mut ctrl_available = false;
896 let mut display_available = false;
897 let mut needs_config_interrupt = false;
898
899 // Remove event triggers that have been hung-up to prevent unnecessary worker wake-ups
900 // (see b/244486346#comment62 for context).
901 for event in events.iter().filter(|e| e.is_hungup) {
902 error!(
903 "unhandled virtio-gpu worker event hang-up detected: {:?}",
904 event.token
905 );
906 event_manager.delete(event.token);
907 }
908
909 for event in events.iter().filter(|e| e.is_readable) {
910 match event.token {
911 WorkerToken::CtrlQueue => {
912 let _ = self.ctrl_evt.wait();
913 // Set flag that control queue is available to be read, but defer reading
914 // until rest of the events are processed.
915 ctrl_available = true;
916 }
917 WorkerToken::CursorQueue => {
918 let _ = self.cursor_evt.wait();
919 if self.state.process_queue(&self.mem, &self.cursor_queue) {
920 signal_used_cursor = true;
921 }
922 }
923 WorkerToken::Display => {
924 // We only need to process_display once-per-wake, regardless of how many
925 // WorkerToken::Display events are received.
926 display_available = true;
927 }
928 #[cfg(unix)]
929 WorkerToken::GpuControl => {
930 let req = match self.gpu_control_tube.recv() {
931 Ok(req) => req,
932 Err(e) => {
933 error!("gpu control socket failed recv: {:?}", e);
934 break 'wait;
935 }
936 };
937
938 let resp = self.state.process_gpu_control_command(req);
939
940 if let GpuControlResult::DisplaysUpdated = resp {
941 needs_config_interrupt = true;
942 }
943
944 if let Err(e) = self.gpu_control_tube.send(&resp) {
945 error!("display control socket failed send: {}", e);
946 break 'wait;
947 }
948 }
949 WorkerToken::ResourceBridge { index } => {
950 self.resource_bridges.set_should_process(index);
951 }
952 WorkerToken::InterruptResample => {
953 self.interrupt.interrupt_resample();
954 }
955 WorkerToken::VirtioGpuPoll => {
956 self.state.event_poll();
957 }
958 WorkerToken::Kill => {
959 break 'wait;
960 }
961 }
962 }
963
964 // All cursor commands go first because they have higher priority.
965 while let Some(desc) = self.state.return_cursor() {
966 self.cursor_queue.add_used(&self.mem, desc.index, desc.len);
967 signal_used_cursor = true;
968 }
969
970 if display_available {
971 match self.state.process_display() {
972 ProcessDisplayResult::CloseRequested => {
973 let _ = self.exit_evt_wrtube.send::<VmEventType>(&VmEventType::Exit);
974 }
975 ProcessDisplayResult::Error(_e) => {
976 base::error!("Display processing failed, disabling display event handler.");
977 event_manager.delete(WorkerToken::Display);
978 }
979 ProcessDisplayResult::Success => (),
980 };
981 }
982
983 if ctrl_available && self.state.process_queue(&self.mem, &self.ctrl_queue) {
984 signal_used_ctrl = true;
985 }
986
987 // Process the entire control queue before the resource bridge in case a resource is
988 // created or destroyed by the control queue. Processing the resource bridge first may
989 // lead to a race condition.
990 // TODO(davidriley): This is still inherently racey if both the control queue request
991 // and the resource bridge request come in at the same time after the control queue is
992 // processed above and before the corresponding bridge is processed below.
993 self.resource_bridges
994 .process_resource_bridges(&mut self.state, &mut event_manager.wait_ctx);
995
996 if signal_used_ctrl {
997 self.ctrl_queue.signal_used(&self.mem);
998 }
999
1000 if signal_used_cursor {
1001 self.cursor_queue.signal_used(&self.mem);
1002 }
1003
1004 if needs_config_interrupt {
1005 self.interrupt.signal_config_changed();
1006 }
1007 }
1008 }
1009 }
1010
1011 /// Indicates a backend that should be tried for the gpu to use for display.
1012 ///
1013 /// Several instances of this enum are used in an ordered list to give the gpu device many backends
1014 /// to use as fallbacks in case some do not work.
1015 #[derive(Clone)]
1016 pub enum DisplayBackend {
1017 #[cfg(unix)]
1018 /// Use the wayland backend with the given socket path if given.
1019 Wayland(Option<PathBuf>),
1020 #[cfg(unix)]
1021 /// Open a connection to the X server at the given display if given.
1022 X(Option<String>),
1023 /// Emulate a display without actually displaying it.
1024 Stub,
1025 #[cfg(windows)]
1026 /// Open a window using WinAPI.
1027 WinApi(WinDisplayProperties),
1028 }
1029
1030 impl DisplayBackend {
build( &self, #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>, ) -> std::result::Result<GpuDisplay, GpuDisplayError>1031 fn build(
1032 &self,
1033 #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>,
1034 ) -> std::result::Result<GpuDisplay, GpuDisplayError> {
1035 match self {
1036 #[cfg(unix)]
1037 DisplayBackend::Wayland(path) => GpuDisplay::open_wayland(path.as_ref()),
1038 #[cfg(unix)]
1039 DisplayBackend::X(display) => GpuDisplay::open_x(display.as_ref()),
1040 DisplayBackend::Stub => GpuDisplay::open_stub(),
1041 #[cfg(windows)]
1042 DisplayBackend::WinApi(display_properties) => match wndproc_thread.take() {
1043 Some(wndproc_thread) => GpuDisplay::open_winapi(
1044 wndproc_thread,
1045 /* win_metrics= */ None,
1046 display_properties.clone(),
1047 ),
1048 None => {
1049 error!("wndproc_thread is none");
1050 Err(GpuDisplayError::Allocate)
1051 }
1052 },
1053 }
1054 }
1055 }
1056
1057 pub struct Gpu {
1058 exit_evt_wrtube: SendTube,
1059 #[cfg(unix)]
1060 gpu_control_tube: Option<Tube>,
1061 mapper: Option<Box<dyn SharedMemoryMapper>>,
1062 resource_bridges: Option<ResourceBridges>,
1063 event_devices: Vec<EventDevice>,
1064 worker_thread: Option<WorkerThread<()>>,
1065 display_backends: Vec<DisplayBackend>,
1066 display_params: Vec<GpuDisplayParameters>,
1067 display_event: Arc<AtomicBool>,
1068 rutabaga_builder: Option<RutabagaBuilder>,
1069 pci_bar_size: u64,
1070 external_blob: bool,
1071 rutabaga_component: RutabagaComponentType,
1072 #[cfg(windows)]
1073 wndproc_thread: Option<WindowProcedureThread>,
1074 base_features: u64,
1075 udmabuf: bool,
1076 rutabaga_server_descriptor: Option<SafeDescriptor>,
1077 capset_mask: u64,
1078 #[cfg(unix)]
1079 gpu_cgroup_path: Option<PathBuf>,
1080 }
1081
1082 impl Gpu {
new( exit_evt_wrtube: SendTube, #[cfg(unix)] gpu_control_tube: Tube, resource_bridges: Vec<Tube>, display_backends: Vec<DisplayBackend>, gpu_parameters: &GpuParameters, rutabaga_server_descriptor: Option<SafeDescriptor>, event_devices: Vec<EventDevice>, external_blob: bool, system_blob: bool, base_features: u64, channels: BTreeMap<String, PathBuf>, #[cfg(windows)] wndproc_thread: WindowProcedureThread, #[cfg(unix)] gpu_cgroup_path: Option<&PathBuf>, ) -> Gpu1083 pub fn new(
1084 exit_evt_wrtube: SendTube,
1085 #[cfg(unix)] gpu_control_tube: Tube,
1086 resource_bridges: Vec<Tube>,
1087 display_backends: Vec<DisplayBackend>,
1088 gpu_parameters: &GpuParameters,
1089 rutabaga_server_descriptor: Option<SafeDescriptor>,
1090 event_devices: Vec<EventDevice>,
1091 external_blob: bool,
1092 system_blob: bool,
1093 base_features: u64,
1094 channels: BTreeMap<String, PathBuf>,
1095 #[cfg(windows)] wndproc_thread: WindowProcedureThread,
1096 #[cfg(unix)] gpu_cgroup_path: Option<&PathBuf>,
1097 ) -> Gpu {
1098 let mut display_params = gpu_parameters.display_params.clone();
1099 if display_params.is_empty() {
1100 display_params.push(Default::default());
1101 }
1102 let (display_width, display_height) = display_params[0].get_virtual_display_size();
1103
1104 let mut rutabaga_channels: Vec<RutabagaChannel> = Vec::new();
1105 for (channel_name, path) in &channels {
1106 match &channel_name[..] {
1107 "" => rutabaga_channels.push(RutabagaChannel {
1108 base_channel: path.clone(),
1109 channel_type: RUTABAGA_CHANNEL_TYPE_WAYLAND,
1110 }),
1111 "mojo" => rutabaga_channels.push(RutabagaChannel {
1112 base_channel: path.clone(),
1113 channel_type: RUTABAGA_CHANNEL_TYPE_CAMERA,
1114 }),
1115 _ => error!("unknown rutabaga channel"),
1116 }
1117 }
1118
1119 let rutabaga_channels_opt = Some(rutabaga_channels);
1120 let component = match gpu_parameters.mode {
1121 GpuMode::Mode2D => RutabagaComponentType::Rutabaga2D,
1122 #[cfg(feature = "virgl_renderer")]
1123 GpuMode::ModeVirglRenderer => RutabagaComponentType::VirglRenderer,
1124 #[cfg(feature = "gfxstream")]
1125 GpuMode::ModeGfxstream => RutabagaComponentType::Gfxstream,
1126 };
1127
1128 let use_render_server = rutabaga_server_descriptor.is_some();
1129
1130 let rutabaga_builder = RutabagaBuilder::new(component, gpu_parameters.capset_mask)
1131 .set_display_width(display_width)
1132 .set_display_height(display_height)
1133 .set_rutabaga_channels(rutabaga_channels_opt)
1134 .set_use_egl(gpu_parameters.renderer_use_egl)
1135 .set_use_gles(gpu_parameters.renderer_use_gles)
1136 .set_use_glx(gpu_parameters.renderer_use_glx)
1137 .set_use_surfaceless(gpu_parameters.renderer_use_surfaceless)
1138 .set_use_vulkan(gpu_parameters.use_vulkan.unwrap_or_default())
1139 .set_wsi(gpu_parameters.wsi.as_ref())
1140 .set_use_external_blob(external_blob)
1141 .set_use_system_blob(system_blob)
1142 .set_use_render_server(use_render_server);
1143
1144 #[cfg(feature = "gfxstream")]
1145 let rutabaga_builder = rutabaga_builder
1146 .set_use_guest_angle(gpu_parameters.gfxstream_use_guest_angle.unwrap_or_default())
1147 .set_support_gles31(gpu_parameters.gfxstream_support_gles31.unwrap_or_default());
1148
1149 Gpu {
1150 exit_evt_wrtube,
1151 #[cfg(unix)]
1152 gpu_control_tube: Some(gpu_control_tube),
1153 mapper: None,
1154 resource_bridges: Some(ResourceBridges::new(resource_bridges)),
1155 event_devices,
1156 worker_thread: None,
1157 display_backends,
1158 display_params,
1159 display_event: Arc::new(AtomicBool::new(false)),
1160 rutabaga_builder: Some(rutabaga_builder),
1161 pci_bar_size: gpu_parameters.pci_bar_size,
1162 external_blob,
1163 rutabaga_component: component,
1164 #[cfg(windows)]
1165 wndproc_thread: Some(wndproc_thread),
1166 base_features,
1167 udmabuf: gpu_parameters.udmabuf,
1168 rutabaga_server_descriptor,
1169 capset_mask: gpu_parameters.capset_mask,
1170 #[cfg(unix)]
1171 gpu_cgroup_path: gpu_cgroup_path.cloned(),
1172 }
1173 }
1174
1175 /// Initializes the internal device state so that it can begin processing virtqueues.
initialize_frontend( &mut self, fence_state: Arc<Mutex<FenceState>>, fence_handler: RutabagaFenceHandler, mapper: Box<dyn SharedMemoryMapper>, ) -> Option<Frontend>1176 pub fn initialize_frontend(
1177 &mut self,
1178 fence_state: Arc<Mutex<FenceState>>,
1179 fence_handler: RutabagaFenceHandler,
1180 mapper: Box<dyn SharedMemoryMapper>,
1181 ) -> Option<Frontend> {
1182 let rutabaga_builder = self.rutabaga_builder.take()?;
1183 let rutabaga_server_descriptor = self.rutabaga_server_descriptor.take();
1184 let event_devices = self.event_devices.split_off(0);
1185
1186 build(
1187 &self.display_backends,
1188 self.display_params.clone(),
1189 self.display_event.clone(),
1190 rutabaga_builder,
1191 event_devices,
1192 mapper,
1193 self.external_blob,
1194 #[cfg(windows)]
1195 &mut self.wndproc_thread,
1196 self.udmabuf,
1197 fence_handler,
1198 rutabaga_server_descriptor,
1199 )
1200 .map(|vgpu| Frontend::new(vgpu, fence_state))
1201 }
1202
get_config(&self) -> virtio_gpu_config1203 fn get_config(&self) -> virtio_gpu_config {
1204 let mut events_read = 0;
1205
1206 if self.display_event.load(Ordering::Relaxed) {
1207 events_read |= VIRTIO_GPU_EVENT_DISPLAY;
1208 }
1209
1210 let num_capsets = match self.capset_mask {
1211 0 => {
1212 match self.rutabaga_component {
1213 RutabagaComponentType::Rutabaga2D => 0,
1214 _ => {
1215 #[allow(unused_mut)]
1216 let mut num_capsets = 0;
1217
1218 // Three capsets for virgl_renderer
1219 #[cfg(feature = "virgl_renderer")]
1220 {
1221 num_capsets += 3;
1222 }
1223
1224 // One capset for gfxstream
1225 #[cfg(feature = "gfxstream")]
1226 {
1227 num_capsets += 1;
1228 }
1229
1230 num_capsets
1231 }
1232 }
1233 }
1234 _ => self.capset_mask.count_ones(),
1235 };
1236
1237 virtio_gpu_config {
1238 events_read: Le32::from(events_read),
1239 events_clear: Le32::from(0),
1240 num_scanouts: Le32::from(VIRTIO_GPU_MAX_SCANOUTS as u32),
1241 num_capsets: Le32::from(num_capsets),
1242 }
1243 }
1244
1245 /// Send a request to exit the process to VMM.
send_exit_evt(&self) -> anyhow::Result<()>1246 pub fn send_exit_evt(&self) -> anyhow::Result<()> {
1247 self.exit_evt_wrtube
1248 .send::<VmEventType>(&VmEventType::Exit)
1249 .context("failed to send exit event")
1250 }
1251 }
1252
1253 impl Drop for Gpu {
drop(&mut self)1254 fn drop(&mut self) {
1255 if let Some(worker_thread) = self.worker_thread.take() {
1256 worker_thread.stop();
1257 }
1258 }
1259 }
1260
1261 impl VirtioDevice for Gpu {
keep_rds(&self) -> Vec<RawDescriptor>1262 fn keep_rds(&self) -> Vec<RawDescriptor> {
1263 let mut keep_rds = Vec::new();
1264
1265 // To find the RawDescriptor associated with stdout and stderr on Windows is difficult.
1266 // Resource bridges are used only for Wayland displays. There is also no meaningful way
1267 // casting the underlying DMA buffer wrapped in File to a copyable RawDescriptor.
1268 // TODO(davidriley): Remove once virgl has another path to include
1269 // debugging logs.
1270 #[cfg(unix)]
1271 if cfg!(debug_assertions) {
1272 keep_rds.push(libc::STDOUT_FILENO);
1273 keep_rds.push(libc::STDERR_FILENO);
1274 }
1275
1276 if let Some(ref mapper) = self.mapper {
1277 if let Some(descriptor) = mapper.as_raw_descriptor() {
1278 keep_rds.push(descriptor);
1279 }
1280 }
1281
1282 if let Some(ref rutabaga_server_descriptor) = self.rutabaga_server_descriptor {
1283 keep_rds.push(rutabaga_server_descriptor.as_raw_descriptor());
1284 }
1285
1286 keep_rds.push(self.exit_evt_wrtube.as_raw_descriptor());
1287
1288 #[cfg(unix)]
1289 if let Some(gpu_control_tube) = &self.gpu_control_tube {
1290 keep_rds.push(gpu_control_tube.as_raw_descriptor());
1291 }
1292
1293 if let Some(resource_bridges) = &self.resource_bridges {
1294 resource_bridges.append_raw_descriptors(&mut keep_rds);
1295 }
1296
1297 keep_rds
1298 }
1299
device_type(&self) -> DeviceType1300 fn device_type(&self) -> DeviceType {
1301 DeviceType::Gpu
1302 }
1303
queue_max_sizes(&self) -> &[u16]1304 fn queue_max_sizes(&self) -> &[u16] {
1305 QUEUE_SIZES
1306 }
1307
features(&self) -> u641308 fn features(&self) -> u64 {
1309 let mut virtio_gpu_features = 1 << VIRTIO_GPU_F_EDID;
1310
1311 // If a non-2D component is specified, enable 3D features. It is possible to run display
1312 // contexts without 3D backend (i.e, gfxstream / virglrender), so check for that too.
1313 if self.rutabaga_component != RutabagaComponentType::Rutabaga2D || self.capset_mask != 0 {
1314 virtio_gpu_features |= 1 << VIRTIO_GPU_F_VIRGL
1315 | 1 << VIRTIO_GPU_F_RESOURCE_UUID
1316 | 1 << VIRTIO_GPU_F_RESOURCE_BLOB
1317 | 1 << VIRTIO_GPU_F_CONTEXT_INIT
1318 | 1 << VIRTIO_GPU_F_EDID
1319 | 1 << VIRTIO_GPU_F_RESOURCE_SYNC;
1320
1321 if self.udmabuf {
1322 virtio_gpu_features |= 1 << VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
1323 }
1324 }
1325
1326 self.base_features | virtio_gpu_features
1327 }
1328
ack_features(&mut self, value: u64)1329 fn ack_features(&mut self, value: u64) {
1330 let _ = value;
1331 }
1332
read_config(&self, offset: u64, data: &mut [u8])1333 fn read_config(&self, offset: u64, data: &mut [u8]) {
1334 copy_config(data, 0, self.get_config().as_bytes(), offset);
1335 }
1336
write_config(&mut self, offset: u64, data: &[u8])1337 fn write_config(&mut self, offset: u64, data: &[u8]) {
1338 let mut cfg = self.get_config();
1339 copy_config(cfg.as_bytes_mut(), offset, data, 0);
1340 if (cfg.events_clear.to_native() & VIRTIO_GPU_EVENT_DISPLAY) != 0 {
1341 self.display_event.store(false, Ordering::Relaxed);
1342 }
1343 }
1344
activate( &mut self, mem: GuestMemory, interrupt: Interrupt, mut queues: Vec<(Queue, Event)>, ) -> anyhow::Result<()>1345 fn activate(
1346 &mut self,
1347 mem: GuestMemory,
1348 interrupt: Interrupt,
1349 mut queues: Vec<(Queue, Event)>,
1350 ) -> anyhow::Result<()> {
1351 if queues.len() != QUEUE_SIZES.len() {
1352 return Err(anyhow!(
1353 "expected {} queues, got {}",
1354 QUEUE_SIZES.len(),
1355 queues.len()
1356 ));
1357 }
1358
1359 let exit_evt_wrtube = self
1360 .exit_evt_wrtube
1361 .try_clone()
1362 .context("error cloning exit tube")?;
1363
1364 #[cfg(unix)]
1365 let gpu_control_tube = self
1366 .gpu_control_tube
1367 .take()
1368 .context("gpu_control_tube is none")?;
1369
1370 let resource_bridges = self
1371 .resource_bridges
1372 .take()
1373 .context("resource_bridges is none")?;
1374
1375 let (ctrl_queue, ctrl_evt) = queues.remove(0);
1376 let ctrl_queue = SharedQueueReader::new(ctrl_queue, interrupt.clone());
1377 let (cursor_queue, cursor_evt) = queues.remove(0);
1378 let cursor_queue = LocalQueueReader::new(cursor_queue, interrupt.clone());
1379 let display_backends = self.display_backends.clone();
1380 let display_params = self.display_params.clone();
1381 let display_event = self.display_event.clone();
1382 let event_devices = self.event_devices.split_off(0);
1383 let external_blob = self.external_blob;
1384 let udmabuf = self.udmabuf;
1385 let fence_state = Arc::new(Mutex::new(Default::default()));
1386 let rutabaga_server_descriptor = self.rutabaga_server_descriptor.take();
1387
1388 #[cfg(windows)]
1389 let mut wndproc_thread = self.wndproc_thread.take();
1390
1391 #[cfg(unix)]
1392 let gpu_cgroup_path = self.gpu_cgroup_path.clone();
1393
1394 let mapper = self.mapper.take().context("missing mapper")?;
1395 let rutabaga_builder = self
1396 .rutabaga_builder
1397 .take()
1398 .context("missing rutabaga_builder")?;
1399
1400 self.worker_thread = Some(WorkerThread::start("v_gpu", move |kill_evt| {
1401 #[cfg(unix)]
1402 if let Some(cgroup_path) = gpu_cgroup_path {
1403 move_task_to_cgroup(cgroup_path, base::gettid())
1404 .expect("Failed to move v_gpu into requested cgroup");
1405 }
1406
1407 let fence_handler =
1408 create_fence_handler(mem.clone(), ctrl_queue.clone(), fence_state.clone());
1409
1410 let virtio_gpu = match build(
1411 &display_backends,
1412 display_params,
1413 display_event,
1414 rutabaga_builder,
1415 event_devices,
1416 mapper,
1417 external_blob,
1418 #[cfg(windows)]
1419 &mut wndproc_thread,
1420 udmabuf,
1421 fence_handler,
1422 rutabaga_server_descriptor,
1423 ) {
1424 Some(backend) => backend,
1425 None => return,
1426 };
1427
1428 Worker {
1429 interrupt,
1430 exit_evt_wrtube,
1431 #[cfg(unix)]
1432 gpu_control_tube,
1433 mem,
1434 ctrl_queue: ctrl_queue.clone(),
1435 ctrl_evt,
1436 cursor_queue,
1437 cursor_evt,
1438 resource_bridges,
1439 kill_evt,
1440 state: Frontend::new(virtio_gpu, fence_state),
1441 }
1442 .run()
1443 }));
1444
1445 Ok(())
1446 }
1447
get_shared_memory_region(&self) -> Option<SharedMemoryRegion>1448 fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
1449 Some(SharedMemoryRegion {
1450 id: VIRTIO_GPU_SHM_ID_HOST_VISIBLE,
1451 length: self.pci_bar_size,
1452 })
1453 }
1454
set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>)1455 fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
1456 self.mapper = Some(mapper);
1457 }
1458
expose_shmem_descriptors_with_viommu(&self) -> bool1459 fn expose_shmem_descriptors_with_viommu(&self) -> bool {
1460 true
1461 }
1462 }
1463
1464 impl Suspendable for Gpu {}
1465
1466 /// This struct takes the ownership of resource bridges and tracks which ones should be processed.
1467 struct ResourceBridges {
1468 resource_bridges: Vec<Tube>,
1469 should_process: Vec<bool>,
1470 }
1471
1472 impl ResourceBridges {
new(resource_bridges: Vec<Tube>) -> Self1473 pub fn new(resource_bridges: Vec<Tube>) -> Self {
1474 #[cfg(windows)]
1475 assert!(
1476 resource_bridges.is_empty(),
1477 "resource bridges are not supported on Windows"
1478 );
1479
1480 let mut resource_bridges = Self {
1481 resource_bridges,
1482 should_process: Default::default(),
1483 };
1484 resource_bridges.reset_should_process();
1485 resource_bridges
1486 }
1487
1488 // Appends raw descriptors of all resource bridges to the given vector.
append_raw_descriptors(&self, rds: &mut Vec<RawDescriptor>)1489 pub fn append_raw_descriptors(&self, rds: &mut Vec<RawDescriptor>) {
1490 for bridge in &self.resource_bridges {
1491 rds.push(bridge.as_raw_descriptor());
1492 }
1493 }
1494
1495 /// Adds all resource bridges to WaitContext.
add_to_wait_context(&self, wait_ctx: &mut WaitContext<WorkerToken>)1496 pub fn add_to_wait_context(&self, wait_ctx: &mut WaitContext<WorkerToken>) {
1497 for (index, bridge) in self.resource_bridges.iter().enumerate() {
1498 if let Err(e) = wait_ctx.add(bridge, WorkerToken::ResourceBridge { index }) {
1499 error!("failed to add resource bridge to WaitContext: {}", e);
1500 }
1501 }
1502 }
1503
1504 /// Marks that the resource bridge at the given index should be processed when
1505 /// `process_resource_bridges()` is called.
set_should_process(&mut self, index: usize)1506 pub fn set_should_process(&mut self, index: usize) {
1507 self.should_process[index] = true;
1508 }
1509
1510 /// Processes all resource bridges that have been marked as should be processed. The markings
1511 /// will be cleared before returning. Faulty resource bridges will be removed from WaitContext.
process_resource_bridges( &mut self, state: &mut Frontend, wait_ctx: &mut WaitContext<WorkerToken>, )1512 pub fn process_resource_bridges(
1513 &mut self,
1514 state: &mut Frontend,
1515 wait_ctx: &mut WaitContext<WorkerToken>,
1516 ) {
1517 for (bridge, &should_process) in self.resource_bridges.iter().zip(&self.should_process) {
1518 if should_process {
1519 if let Err(e) = state.process_resource_bridge(bridge) {
1520 error!("Failed to process resource bridge: {:#}", e);
1521 error!("Removing that resource bridge from the wait context.");
1522 wait_ctx.delete(bridge).unwrap_or_else(|e| {
1523 error!("Failed to remove faulty resource bridge: {:#}", e)
1524 });
1525 }
1526 }
1527 }
1528 self.reset_should_process();
1529 }
1530
reset_should_process(&mut self)1531 fn reset_should_process(&mut self) {
1532 self.should_process.clear();
1533 self.should_process
1534 .resize(self.resource_bridges.len(), false);
1535 }
1536 }
1537
1538 /// This function creates the window procedure thread and windows.
1539 ///
1540 /// We have seen third-party DLLs hooking into window creation. They may have deep call stack, and
1541 /// they may not be well tested against late window creation, which may lead to stack overflow.
1542 /// Hence, this should be called as early as possible when the VM is booting.
1543 #[cfg(windows)]
1544 #[inline]
start_wndproc_thread( vm_tube: Option<Arc<Mutex<Tube>>>, ) -> anyhow::Result<WindowProcedureThread>1545 pub fn start_wndproc_thread(
1546 vm_tube: Option<Arc<Mutex<Tube>>>,
1547 ) -> anyhow::Result<WindowProcedureThread> {
1548 WindowProcedureThread::start_thread(vm_tube)
1549 }
1550