1 // Copyright 2018 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 mod edid;
6 mod parameters;
7 mod protocol;
8 mod snapshot;
9 mod virtio_gpu;
10
11 use std::cell::RefCell;
12 use std::collections::BTreeMap;
13 use std::io::Read;
14 use std::path::PathBuf;
15 use std::rc::Rc;
16 use std::sync::atomic::AtomicBool;
17 use std::sync::atomic::Ordering;
18 use std::sync::mpsc;
19 use std::sync::Arc;
20
21 use ::snapshot::AnySnapshot;
22 use anyhow::anyhow;
23 use anyhow::Context;
24 use base::custom_serde::deserialize_map_from_kv_vec;
25 use base::custom_serde::serialize_map_as_kv_vec;
26 use base::debug;
27 use base::error;
28 use base::info;
29 #[cfg(any(target_os = "android", target_os = "linux"))]
30 use base::linux::move_task_to_cgroup;
31 use base::warn;
32 use base::AsRawDescriptor;
33 use base::Event;
34 use base::EventToken;
35 use base::RawDescriptor;
36 use base::ReadNotifier;
37 #[cfg(windows)]
38 use base::RecvTube;
39 use base::Result;
40 use base::SafeDescriptor;
41 use base::SendTube;
42 use base::Tube;
43 use base::VmEventType;
44 use base::WaitContext;
45 use base::WorkerThread;
46 use data_model::*;
47 pub use gpu_display::EventDevice;
48 use gpu_display::*;
49 use hypervisor::MemCacheType;
50 pub use parameters::AudioDeviceMode;
51 pub use parameters::GpuParameters;
52 use rutabaga_gfx::*;
53 use serde::Deserialize;
54 use serde::Serialize;
55 use sync::Mutex;
56 pub use vm_control::gpu::DisplayMode as GpuDisplayMode;
57 pub use vm_control::gpu::DisplayParameters as GpuDisplayParameters;
58 use vm_control::gpu::GpuControlCommand;
59 use vm_control::gpu::GpuControlResult;
60 pub use vm_control::gpu::MouseMode as GpuMouseMode;
61 pub use vm_control::gpu::DEFAULT_DISPLAY_HEIGHT;
62 pub use vm_control::gpu::DEFAULT_DISPLAY_WIDTH;
63 pub use vm_control::gpu::DEFAULT_REFRESH_RATE;
64 #[cfg(windows)]
65 use vm_control::ModifyWaitContext;
66 use vm_memory::GuestAddress;
67 use vm_memory::GuestMemory;
68 use zerocopy::IntoBytes;
69
70 pub use self::protocol::virtio_gpu_config;
71 pub use self::protocol::VIRTIO_GPU_F_CONTEXT_INIT;
72 pub use self::protocol::VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
73 pub use self::protocol::VIRTIO_GPU_F_EDID;
74 pub use self::protocol::VIRTIO_GPU_F_FENCE_PASSING;
75 pub use self::protocol::VIRTIO_GPU_F_RESOURCE_BLOB;
76 pub use self::protocol::VIRTIO_GPU_F_RESOURCE_UUID;
77 pub use self::protocol::VIRTIO_GPU_F_VIRGL;
78 pub use self::protocol::VIRTIO_GPU_MAX_SCANOUTS;
79 pub use self::protocol::VIRTIO_GPU_SHM_ID_HOST_VISIBLE;
80 use self::protocol::*;
81 use self::virtio_gpu::to_rutabaga_descriptor;
82 pub use self::virtio_gpu::ProcessDisplayResult;
83 use self::virtio_gpu::VirtioGpu;
84 use self::virtio_gpu::VirtioGpuSnapshot;
85 use super::copy_config;
86 use super::resource_bridge::ResourceRequest;
87 use super::DescriptorChain;
88 use super::DeviceType;
89 use super::Interrupt;
90 use super::Queue;
91 use super::Reader;
92 use super::SharedMemoryMapper;
93 use super::SharedMemoryPrepareType;
94 use super::SharedMemoryRegion;
95 use super::VirtioDevice;
96 use super::Writer;
97 use crate::PciAddress;
98
99 // First queue is for virtio gpu commands. Second queue is for cursor commands, which we expect
100 // there to be fewer of.
101 const QUEUE_SIZES: &[u16] = &[512, 16];
102
103 #[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
104 pub enum GpuMode {
105 #[serde(rename = "2d", alias = "2D")]
106 Mode2D,
107 #[cfg(feature = "virgl_renderer")]
108 #[serde(rename = "virglrenderer", alias = "3d", alias = "3D")]
109 ModeVirglRenderer,
110 #[cfg(feature = "gfxstream")]
111 #[serde(rename = "gfxstream")]
112 ModeGfxstream,
113 }
114
115 impl Default for GpuMode {
default() -> Self116 fn default() -> Self {
117 #[cfg(all(windows, feature = "gfxstream"))]
118 return GpuMode::ModeGfxstream;
119
120 #[cfg(all(unix, feature = "virgl_renderer"))]
121 return GpuMode::ModeVirglRenderer;
122
123 #[cfg(not(any(
124 all(windows, feature = "gfxstream"),
125 all(unix, feature = "virgl_renderer"),
126 )))]
127 return GpuMode::Mode2D;
128 }
129 }
130
131 #[derive(Clone, Debug, Serialize, Deserialize)]
132 #[serde(rename_all = "kebab-case")]
133 pub enum GpuWsi {
134 #[serde(alias = "vk")]
135 Vulkan,
136 }
137
138 #[derive(Copy, Clone, Debug)]
139 pub struct VirtioScanoutBlobData {
140 pub width: u32,
141 pub height: u32,
142 pub drm_format: DrmFormat,
143 pub strides: [u32; 4],
144 pub offsets: [u32; 4],
145 }
146
147 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
148 enum VirtioGpuRing {
149 Global,
150 ContextSpecific { ctx_id: u32, ring_idx: u8 },
151 }
152
153 struct FenceDescriptor {
154 ring: VirtioGpuRing,
155 fence_id: u64,
156 desc_chain: DescriptorChain,
157 len: u32,
158 }
159
160 #[derive(Default)]
161 pub struct FenceState {
162 descs: Vec<FenceDescriptor>,
163 completed_fences: BTreeMap<VirtioGpuRing, u64>,
164 }
165
166 #[derive(Serialize, Deserialize)]
167 struct FenceStateSnapshot {
168 // Customize serialization to avoid errors when trying to use objects as keys in JSON
169 // dictionaries.
170 #[serde(
171 serialize_with = "serialize_map_as_kv_vec",
172 deserialize_with = "deserialize_map_from_kv_vec"
173 )]
174 completed_fences: BTreeMap<VirtioGpuRing, u64>,
175 }
176
177 impl FenceState {
snapshot(&self) -> FenceStateSnapshot178 fn snapshot(&self) -> FenceStateSnapshot {
179 assert!(self.descs.is_empty(), "can't snapshot with pending fences");
180 FenceStateSnapshot {
181 completed_fences: self.completed_fences.clone(),
182 }
183 }
184
restore(&mut self, snapshot: FenceStateSnapshot)185 fn restore(&mut self, snapshot: FenceStateSnapshot) {
186 assert!(self.descs.is_empty(), "can't restore activated device");
187 self.completed_fences = snapshot.completed_fences;
188 }
189 }
190
191 pub trait QueueReader {
pop(&self) -> Option<DescriptorChain>192 fn pop(&self) -> Option<DescriptorChain>;
add_used(&self, desc_chain: DescriptorChain, len: u32)193 fn add_used(&self, desc_chain: DescriptorChain, len: u32);
signal_used(&self)194 fn signal_used(&self);
195 }
196
197 struct LocalQueueReader {
198 queue: RefCell<Queue>,
199 }
200
201 impl LocalQueueReader {
new(queue: Queue) -> Self202 fn new(queue: Queue) -> Self {
203 Self {
204 queue: RefCell::new(queue),
205 }
206 }
207 }
208
209 impl QueueReader for LocalQueueReader {
pop(&self) -> Option<DescriptorChain>210 fn pop(&self) -> Option<DescriptorChain> {
211 self.queue.borrow_mut().pop()
212 }
213
add_used(&self, desc_chain: DescriptorChain, len: u32)214 fn add_used(&self, desc_chain: DescriptorChain, len: u32) {
215 self.queue.borrow_mut().add_used(desc_chain, len)
216 }
217
signal_used(&self)218 fn signal_used(&self) {
219 self.queue.borrow_mut().trigger_interrupt();
220 }
221 }
222
223 #[derive(Clone)]
224 struct SharedQueueReader {
225 queue: Arc<Mutex<Queue>>,
226 }
227
228 impl SharedQueueReader {
new(queue: Queue) -> Self229 fn new(queue: Queue) -> Self {
230 Self {
231 queue: Arc::new(Mutex::new(queue)),
232 }
233 }
234 }
235
236 impl QueueReader for SharedQueueReader {
pop(&self) -> Option<DescriptorChain>237 fn pop(&self) -> Option<DescriptorChain> {
238 self.queue.lock().pop()
239 }
240
add_used(&self, desc_chain: DescriptorChain, len: u32)241 fn add_used(&self, desc_chain: DescriptorChain, len: u32) {
242 self.queue.lock().add_used(desc_chain, len)
243 }
244
signal_used(&self)245 fn signal_used(&self) {
246 self.queue.lock().trigger_interrupt();
247 }
248 }
249
250 /// Initializes the virtio_gpu state tracker.
build( display_backends: &[DisplayBackend], display_params: Vec<GpuDisplayParameters>, display_event: Arc<AtomicBool>, rutabaga: Rutabaga, mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>, external_blob: bool, fixed_blob_mapping: bool, #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>, udmabuf: bool, #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube, snapshot_scratch_directory: Option<PathBuf>, ) -> Option<VirtioGpu>251 fn build(
252 display_backends: &[DisplayBackend],
253 display_params: Vec<GpuDisplayParameters>,
254 display_event: Arc<AtomicBool>,
255 rutabaga: Rutabaga,
256 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
257 external_blob: bool,
258 fixed_blob_mapping: bool,
259 #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>,
260 udmabuf: bool,
261 #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube,
262 snapshot_scratch_directory: Option<PathBuf>,
263 ) -> Option<VirtioGpu> {
264 let mut display_opt = None;
265 for display_backend in display_backends {
266 match display_backend.build(
267 #[cfg(windows)]
268 wndproc_thread,
269 #[cfg(windows)]
270 gpu_display_wait_descriptor_ctrl_wr
271 .try_clone()
272 .expect("failed to clone wait context ctrl channel"),
273 ) {
274 Ok(c) => {
275 display_opt = Some(c);
276 break;
277 }
278 Err(e) => error!("failed to open display: {}", e),
279 };
280 }
281
282 let display = match display_opt {
283 Some(d) => d,
284 None => {
285 error!("failed to open any displays");
286 return None;
287 }
288 };
289
290 VirtioGpu::new(
291 display,
292 display_params,
293 display_event,
294 rutabaga,
295 mapper,
296 external_blob,
297 fixed_blob_mapping,
298 udmabuf,
299 snapshot_scratch_directory,
300 )
301 }
302
303 /// Resources used by the fence handler.
304 pub struct FenceHandlerActivationResources<Q>
305 where
306 Q: QueueReader + Send + Clone + 'static,
307 {
308 pub mem: GuestMemory,
309 pub ctrl_queue: Q,
310 }
311
312 /// Create a handler that writes into the completed fence queue
create_fence_handler<Q>( fence_handler_resources: Arc<Mutex<Option<FenceHandlerActivationResources<Q>>>>, fence_state: Arc<Mutex<FenceState>>, ) -> RutabagaFenceHandler where Q: QueueReader + Send + Clone + 'static,313 pub fn create_fence_handler<Q>(
314 fence_handler_resources: Arc<Mutex<Option<FenceHandlerActivationResources<Q>>>>,
315 fence_state: Arc<Mutex<FenceState>>,
316 ) -> RutabagaFenceHandler
317 where
318 Q: QueueReader + Send + Clone + 'static,
319 {
320 RutabagaFenceHandler::new(move |completed_fence: RutabagaFence| {
321 let mut signal = false;
322
323 if let Some(ref fence_handler_resources) = *fence_handler_resources.lock() {
324 // Limits the lifetime of `fence_state`:
325 {
326 let ring = match completed_fence.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
327 0 => VirtioGpuRing::Global,
328 _ => VirtioGpuRing::ContextSpecific {
329 ctx_id: completed_fence.ctx_id,
330 ring_idx: completed_fence.ring_idx,
331 },
332 };
333
334 let mut fence_state = fence_state.lock();
335 // TODO(dverkamp): use `drain_filter()` when it is stabilized
336 let mut i = 0;
337 while i < fence_state.descs.len() {
338 if fence_state.descs[i].ring == ring
339 && fence_state.descs[i].fence_id <= completed_fence.fence_id
340 {
341 let completed_desc = fence_state.descs.remove(i);
342 fence_handler_resources
343 .ctrl_queue
344 .add_used(completed_desc.desc_chain, completed_desc.len);
345 signal = true;
346 } else {
347 i += 1;
348 }
349 }
350
351 // Update the last completed fence for this context
352 fence_state
353 .completed_fences
354 .insert(ring, completed_fence.fence_id);
355 }
356
357 if signal {
358 fence_handler_resources.ctrl_queue.signal_used();
359 }
360 }
361 })
362 }
363
364 pub struct ReturnDescriptor {
365 pub desc_chain: DescriptorChain,
366 pub len: u32,
367 }
368
369 pub struct Frontend {
370 fence_state: Arc<Mutex<FenceState>>,
371 virtio_gpu: VirtioGpu,
372 }
373
374 impl Frontend {
new(virtio_gpu: VirtioGpu, fence_state: Arc<Mutex<FenceState>>) -> Frontend375 fn new(virtio_gpu: VirtioGpu, fence_state: Arc<Mutex<FenceState>>) -> Frontend {
376 Frontend {
377 fence_state,
378 virtio_gpu,
379 }
380 }
381
382 /// Returns the internal connection to the compositor and its associated state.
display(&mut self) -> &Rc<RefCell<GpuDisplay>>383 pub fn display(&mut self) -> &Rc<RefCell<GpuDisplay>> {
384 self.virtio_gpu.display()
385 }
386
387 /// Processes the internal `display` events and returns `true` if any display was closed.
process_display(&mut self) -> ProcessDisplayResult388 pub fn process_display(&mut self) -> ProcessDisplayResult {
389 self.virtio_gpu.process_display()
390 }
391
392 /// Processes incoming requests on `resource_bridge`.
process_resource_bridge(&mut self, resource_bridge: &Tube) -> anyhow::Result<()>393 pub fn process_resource_bridge(&mut self, resource_bridge: &Tube) -> anyhow::Result<()> {
394 let response = match resource_bridge.recv() {
395 Ok(ResourceRequest::GetBuffer { id }) => self.virtio_gpu.export_resource(id),
396 Ok(ResourceRequest::GetFence { seqno }) => self.virtio_gpu.export_fence(seqno),
397 Err(e) => return Err(e).context("Error receiving resource bridge request"),
398 };
399
400 resource_bridge
401 .send(&response)
402 .context("Error sending resource bridge response")?;
403
404 Ok(())
405 }
406
407 /// Processes the GPU control command and returns the result with a bool indicating if the
408 /// GPU device's config needs to be updated.
process_gpu_control_command(&mut self, cmd: GpuControlCommand) -> GpuControlResult409 pub fn process_gpu_control_command(&mut self, cmd: GpuControlCommand) -> GpuControlResult {
410 self.virtio_gpu.process_gpu_control_command(cmd)
411 }
412
process_gpu_command( &mut self, mem: &GuestMemory, cmd: GpuCommand, reader: &mut Reader, ) -> VirtioGpuResult413 fn process_gpu_command(
414 &mut self,
415 mem: &GuestMemory,
416 cmd: GpuCommand,
417 reader: &mut Reader,
418 ) -> VirtioGpuResult {
419 self.virtio_gpu.force_ctx_0();
420
421 match cmd {
422 GpuCommand::GetDisplayInfo(_) => Ok(GpuResponse::OkDisplayInfo(
423 self.virtio_gpu.display_info().to_vec(),
424 )),
425 GpuCommand::ResourceCreate2d(info) => {
426 let resource_id = info.resource_id.to_native();
427
428 let resource_create_3d = ResourceCreate3D {
429 target: RUTABAGA_PIPE_TEXTURE_2D,
430 format: info.format.to_native(),
431 bind: RUTABAGA_PIPE_BIND_RENDER_TARGET,
432 width: info.width.to_native(),
433 height: info.height.to_native(),
434 depth: 1,
435 array_size: 1,
436 last_level: 0,
437 nr_samples: 0,
438 flags: 0,
439 };
440
441 self.virtio_gpu
442 .resource_create_3d(resource_id, resource_create_3d)
443 }
444 GpuCommand::ResourceUnref(info) => {
445 self.virtio_gpu.unref_resource(info.resource_id.to_native())
446 }
447 GpuCommand::SetScanout(info) => self.virtio_gpu.set_scanout(
448 info.r,
449 info.scanout_id.to_native(),
450 info.resource_id.to_native(),
451 None,
452 ),
453 GpuCommand::ResourceFlush(info) => {
454 self.virtio_gpu.flush_resource(info.resource_id.to_native())
455 }
456 GpuCommand::TransferToHost2d(info) => {
457 let resource_id = info.resource_id.to_native();
458 let transfer = Transfer3D::new_2d(
459 info.r.x.to_native(),
460 info.r.y.to_native(),
461 info.r.width.to_native(),
462 info.r.height.to_native(),
463 info.offset.to_native(),
464 );
465 self.virtio_gpu.transfer_write(0, resource_id, transfer)
466 }
467 GpuCommand::ResourceAttachBacking(info) => {
468 let available_bytes = reader.available_bytes();
469 if available_bytes != 0 {
470 let entry_count = info.nr_entries.to_native() as usize;
471 let mut vecs = Vec::with_capacity(entry_count);
472 for _ in 0..entry_count {
473 match reader.read_obj::<virtio_gpu_mem_entry>() {
474 Ok(entry) => {
475 let addr = GuestAddress(entry.addr.to_native());
476 let len = entry.length.to_native() as usize;
477 vecs.push((addr, len))
478 }
479 Err(_) => return Err(GpuResponse::ErrUnspec),
480 }
481 }
482 self.virtio_gpu
483 .attach_backing(info.resource_id.to_native(), mem, vecs)
484 } else {
485 error!("missing data for command {:?}", cmd);
486 Err(GpuResponse::ErrUnspec)
487 }
488 }
489 GpuCommand::ResourceDetachBacking(info) => {
490 self.virtio_gpu.detach_backing(info.resource_id.to_native())
491 }
492 GpuCommand::UpdateCursor(info) => self.virtio_gpu.update_cursor(
493 info.resource_id.to_native(),
494 info.pos.scanout_id.to_native(),
495 info.pos.x.into(),
496 info.pos.y.into(),
497 ),
498 GpuCommand::MoveCursor(info) => self.virtio_gpu.move_cursor(
499 info.pos.scanout_id.to_native(),
500 info.pos.x.into(),
501 info.pos.y.into(),
502 ),
503 GpuCommand::ResourceAssignUuid(info) => {
504 let resource_id = info.resource_id.to_native();
505 self.virtio_gpu.resource_assign_uuid(resource_id)
506 }
507 GpuCommand::GetCapsetInfo(info) => self
508 .virtio_gpu
509 .get_capset_info(info.capset_index.to_native()),
510 GpuCommand::GetCapset(info) => self
511 .virtio_gpu
512 .get_capset(info.capset_id.to_native(), info.capset_version.to_native()),
513 GpuCommand::CtxCreate(info) => {
514 let context_name: Option<String> = String::from_utf8(info.debug_name.to_vec()).ok();
515 self.virtio_gpu.create_context(
516 info.hdr.ctx_id.to_native(),
517 info.context_init.to_native(),
518 context_name.as_deref(),
519 )
520 }
521 GpuCommand::CtxDestroy(info) => {
522 self.virtio_gpu.destroy_context(info.hdr.ctx_id.to_native())
523 }
524 GpuCommand::CtxAttachResource(info) => self
525 .virtio_gpu
526 .context_attach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
527 GpuCommand::CtxDetachResource(info) => self
528 .virtio_gpu
529 .context_detach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
530 GpuCommand::ResourceCreate3d(info) => {
531 let resource_id = info.resource_id.to_native();
532 let resource_create_3d = ResourceCreate3D {
533 target: info.target.to_native(),
534 format: info.format.to_native(),
535 bind: info.bind.to_native(),
536 width: info.width.to_native(),
537 height: info.height.to_native(),
538 depth: info.depth.to_native(),
539 array_size: info.array_size.to_native(),
540 last_level: info.last_level.to_native(),
541 nr_samples: info.nr_samples.to_native(),
542 flags: info.flags.to_native(),
543 };
544
545 self.virtio_gpu
546 .resource_create_3d(resource_id, resource_create_3d)
547 }
548 GpuCommand::TransferToHost3d(info) => {
549 let ctx_id = info.hdr.ctx_id.to_native();
550 let resource_id = info.resource_id.to_native();
551
552 let transfer = Transfer3D {
553 x: info.box_.x.to_native(),
554 y: info.box_.y.to_native(),
555 z: info.box_.z.to_native(),
556 w: info.box_.w.to_native(),
557 h: info.box_.h.to_native(),
558 d: info.box_.d.to_native(),
559 level: info.level.to_native(),
560 stride: info.stride.to_native(),
561 layer_stride: info.layer_stride.to_native(),
562 offset: info.offset.to_native(),
563 };
564
565 self.virtio_gpu
566 .transfer_write(ctx_id, resource_id, transfer)
567 }
568 GpuCommand::TransferFromHost3d(info) => {
569 let ctx_id = info.hdr.ctx_id.to_native();
570 let resource_id = info.resource_id.to_native();
571
572 let transfer = Transfer3D {
573 x: info.box_.x.to_native(),
574 y: info.box_.y.to_native(),
575 z: info.box_.z.to_native(),
576 w: info.box_.w.to_native(),
577 h: info.box_.h.to_native(),
578 d: info.box_.d.to_native(),
579 level: info.level.to_native(),
580 stride: info.stride.to_native(),
581 layer_stride: info.layer_stride.to_native(),
582 offset: info.offset.to_native(),
583 };
584
585 self.virtio_gpu
586 .transfer_read(ctx_id, resource_id, transfer, None)
587 }
588 GpuCommand::CmdSubmit3d(info) => {
589 if reader.available_bytes() != 0 {
590 let num_in_fences = info.num_in_fences.to_native() as usize;
591 let cmd_size = info.size.to_native() as usize;
592 let mut cmd_buf = vec![0; cmd_size];
593 let mut fence_ids: Vec<u64> = Vec::with_capacity(num_in_fences);
594 let ctx_id = info.hdr.ctx_id.to_native();
595
596 for _ in 0..num_in_fences {
597 match reader.read_obj::<Le64>() {
598 Ok(fence_id) => {
599 fence_ids.push(fence_id.to_native());
600 }
601 Err(_) => return Err(GpuResponse::ErrUnspec),
602 }
603 }
604
605 if reader.read_exact(&mut cmd_buf[..]).is_ok() {
606 self.virtio_gpu
607 .submit_command(ctx_id, &mut cmd_buf[..], &fence_ids[..])
608 } else {
609 Err(GpuResponse::ErrInvalidParameter)
610 }
611 } else {
612 // Silently accept empty command buffers to allow for
613 // benchmarking.
614 Ok(GpuResponse::OkNoData)
615 }
616 }
617 GpuCommand::ResourceCreateBlob(info) => {
618 let resource_id = info.resource_id.to_native();
619 let ctx_id = info.hdr.ctx_id.to_native();
620
621 let resource_create_blob = ResourceCreateBlob {
622 blob_mem: info.blob_mem.to_native(),
623 blob_flags: info.blob_flags.to_native(),
624 blob_id: info.blob_id.to_native(),
625 size: info.size.to_native(),
626 };
627
628 let entry_count = info.nr_entries.to_native();
629 if reader.available_bytes() == 0 && entry_count > 0 {
630 return Err(GpuResponse::ErrUnspec);
631 }
632
633 let mut vecs = Vec::with_capacity(entry_count as usize);
634 for _ in 0..entry_count {
635 match reader.read_obj::<virtio_gpu_mem_entry>() {
636 Ok(entry) => {
637 let addr = GuestAddress(entry.addr.to_native());
638 let len = entry.length.to_native() as usize;
639 vecs.push((addr, len))
640 }
641 Err(_) => return Err(GpuResponse::ErrUnspec),
642 }
643 }
644
645 self.virtio_gpu.resource_create_blob(
646 ctx_id,
647 resource_id,
648 resource_create_blob,
649 vecs,
650 mem,
651 )
652 }
653 GpuCommand::SetScanoutBlob(info) => {
654 let scanout_id = info.scanout_id.to_native();
655 let resource_id = info.resource_id.to_native();
656 let virtio_gpu_format = info.format.to_native();
657 let width = info.width.to_native();
658 let height = info.height.to_native();
659 let mut strides: [u32; 4] = [0; 4];
660 let mut offsets: [u32; 4] = [0; 4];
661
662 // As of v4.19, virtio-gpu kms only really uses these formats. If that changes,
663 // the following may have to change too.
664 let drm_format = match virtio_gpu_format {
665 VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM => DrmFormat::new(b'X', b'R', b'2', b'4'),
666 VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM => DrmFormat::new(b'A', b'R', b'2', b'4'),
667 _ => {
668 error!("unrecognized virtio-gpu format {}", virtio_gpu_format);
669 return Err(GpuResponse::ErrUnspec);
670 }
671 };
672
673 for plane_index in 0..PLANE_INFO_MAX_COUNT {
674 offsets[plane_index] = info.offsets[plane_index].to_native();
675 strides[plane_index] = info.strides[plane_index].to_native();
676 }
677
678 let scanout = VirtioScanoutBlobData {
679 width,
680 height,
681 drm_format,
682 strides,
683 offsets,
684 };
685
686 self.virtio_gpu
687 .set_scanout(info.r, scanout_id, resource_id, Some(scanout))
688 }
689 GpuCommand::ResourceMapBlob(info) => {
690 let resource_id = info.resource_id.to_native();
691 let offset = info.offset.to_native();
692 self.virtio_gpu.resource_map_blob(resource_id, offset)
693 }
694 GpuCommand::ResourceUnmapBlob(info) => {
695 let resource_id = info.resource_id.to_native();
696 self.virtio_gpu.resource_unmap_blob(resource_id)
697 }
698 GpuCommand::GetEdid(info) => self.virtio_gpu.get_edid(info.scanout.to_native()),
699 }
700 }
701
702 /// Processes virtio messages on `queue`.
process_queue(&mut self, mem: &GuestMemory, queue: &dyn QueueReader) -> bool703 pub fn process_queue(&mut self, mem: &GuestMemory, queue: &dyn QueueReader) -> bool {
704 let mut signal_used = false;
705 while let Some(desc) = queue.pop() {
706 if let Some(ret_desc) = self.process_descriptor(mem, desc) {
707 queue.add_used(ret_desc.desc_chain, ret_desc.len);
708 signal_used = true;
709 }
710 }
711
712 signal_used
713 }
714
process_descriptor( &mut self, mem: &GuestMemory, mut desc_chain: DescriptorChain, ) -> Option<ReturnDescriptor>715 fn process_descriptor(
716 &mut self,
717 mem: &GuestMemory,
718 mut desc_chain: DescriptorChain,
719 ) -> Option<ReturnDescriptor> {
720 let reader = &mut desc_chain.reader;
721 let writer = &mut desc_chain.writer;
722 let mut resp = Err(GpuResponse::ErrUnspec);
723 let mut gpu_cmd = None;
724 let mut len = 0;
725 match GpuCommand::decode(reader) {
726 Ok(cmd) => {
727 resp = self.process_gpu_command(mem, cmd, reader);
728 gpu_cmd = Some(cmd);
729 }
730 Err(e) => debug!("descriptor decode error: {}", e),
731 }
732
733 let mut gpu_response = match resp {
734 Ok(gpu_response) => gpu_response,
735 Err(gpu_response) => {
736 if let Some(gpu_cmd) = gpu_cmd {
737 error!(
738 "error processing gpu command {:?}: {:?}",
739 gpu_cmd, gpu_response
740 );
741 }
742 gpu_response
743 }
744 };
745
746 if writer.available_bytes() != 0 {
747 let mut fence_id = 0;
748 let mut ctx_id = 0;
749 let mut flags = 0;
750 let mut ring_idx = 0;
751 if let Some(cmd) = gpu_cmd {
752 let ctrl_hdr = cmd.ctrl_hdr();
753 if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
754 flags = ctrl_hdr.flags.to_native();
755 fence_id = ctrl_hdr.fence_id.to_native();
756 ctx_id = ctrl_hdr.ctx_id.to_native();
757 ring_idx = ctrl_hdr.ring_idx;
758
759 let fence = RutabagaFence {
760 flags,
761 fence_id,
762 ctx_id,
763 ring_idx,
764 };
765 gpu_response = match self.virtio_gpu.create_fence(fence) {
766 Ok(_) => gpu_response,
767 Err(fence_resp) => {
768 warn!("create_fence {} -> {:?}", fence_id, fence_resp);
769 fence_resp
770 }
771 };
772 }
773 }
774
775 // Prepare the response now, even if it is going to wait until
776 // fence is complete.
777 match gpu_response.encode(flags, fence_id, ctx_id, ring_idx, writer) {
778 Ok(l) => len = l,
779 Err(e) => debug!("ctrl queue response encode error: {}", e),
780 }
781
782 if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
783 let ring = match flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
784 0 => VirtioGpuRing::Global,
785 _ => VirtioGpuRing::ContextSpecific { ctx_id, ring_idx },
786 };
787
788 // In case the fence is signaled immediately after creation, don't add a return
789 // FenceDescriptor.
790 let mut fence_state = self.fence_state.lock();
791 if fence_id > *fence_state.completed_fences.get(&ring).unwrap_or(&0) {
792 fence_state.descs.push(FenceDescriptor {
793 ring,
794 fence_id,
795 desc_chain,
796 len,
797 });
798
799 return None;
800 }
801 }
802
803 // No fence (or already completed fence), respond now.
804 }
805 Some(ReturnDescriptor { desc_chain, len })
806 }
807
event_poll(&self)808 pub fn event_poll(&self) {
809 self.virtio_gpu.event_poll();
810 }
811 }
812
813 #[derive(EventToken, PartialEq, Eq, Clone, Copy, Debug)]
814 enum WorkerToken {
815 CtrlQueue,
816 CursorQueue,
817 Display,
818 GpuControl,
819 Sleep,
820 Kill,
821 ResourceBridge {
822 index: usize,
823 },
824 VirtioGpuPoll,
825 #[cfg(windows)]
826 DisplayDescriptorRequest,
827 }
828
829 struct EventManager<'a> {
830 pub wait_ctx: WaitContext<WorkerToken>,
831 events: Vec<(&'a dyn AsRawDescriptor, WorkerToken)>,
832 }
833
834 impl<'a> EventManager<'a> {
new() -> Result<EventManager<'a>>835 pub fn new() -> Result<EventManager<'a>> {
836 Ok(EventManager {
837 wait_ctx: WaitContext::new()?,
838 events: vec![],
839 })
840 }
841
build_with( triggers: &[(&'a dyn AsRawDescriptor, WorkerToken)], ) -> Result<EventManager<'a>>842 pub fn build_with(
843 triggers: &[(&'a dyn AsRawDescriptor, WorkerToken)],
844 ) -> Result<EventManager<'a>> {
845 let mut manager = EventManager::new()?;
846 manager.wait_ctx.add_many(triggers)?;
847
848 for (descriptor, token) in triggers {
849 manager.events.push((*descriptor, *token));
850 }
851 Ok(manager)
852 }
853
add(&mut self, descriptor: &'a dyn AsRawDescriptor, token: WorkerToken) -> Result<()>854 pub fn add(&mut self, descriptor: &'a dyn AsRawDescriptor, token: WorkerToken) -> Result<()> {
855 self.wait_ctx.add(descriptor, token)?;
856 self.events.push((descriptor, token));
857 Ok(())
858 }
859
delete(&mut self, token: WorkerToken)860 pub fn delete(&mut self, token: WorkerToken) {
861 self.events.retain(|event| {
862 if event.1 == token {
863 self.wait_ctx.delete(event.0).ok();
864 return false;
865 }
866 true
867 });
868 }
869 }
870
871 #[derive(Serialize, Deserialize)]
872 struct WorkerSnapshot {
873 fence_state_snapshot: FenceStateSnapshot,
874 virtio_gpu_snapshot: VirtioGpuSnapshot,
875 }
876
877 struct WorkerActivateRequest {
878 resources: GpuActivationResources,
879 }
880
881 enum WorkerRequest {
882 Activate(WorkerActivateRequest),
883 Suspend,
884 Snapshot,
885 Restore(WorkerSnapshot),
886 }
887
888 enum WorkerResponse {
889 Ok,
890 Suspend(GpuDeactivationResources),
891 Snapshot(WorkerSnapshot),
892 }
893
894 struct GpuActivationResources {
895 mem: GuestMemory,
896 interrupt: Interrupt,
897 ctrl_queue: SharedQueueReader,
898 cursor_queue: LocalQueueReader,
899 }
900
901 struct GpuDeactivationResources {
902 queues: Option<Vec<Queue>>,
903 }
904
905 struct Worker {
906 request_receiver: mpsc::Receiver<WorkerRequest>,
907 response_sender: mpsc::Sender<anyhow::Result<WorkerResponse>>,
908 exit_evt_wrtube: SendTube,
909 gpu_control_tube: Tube,
910 resource_bridges: ResourceBridges,
911 suspend_evt: Event,
912 kill_evt: Event,
913 state: Frontend,
914 fence_state: Arc<Mutex<FenceState>>,
915 fence_handler_resources: Arc<Mutex<Option<FenceHandlerActivationResources<SharedQueueReader>>>>,
916 #[cfg(windows)]
917 gpu_display_wait_descriptor_ctrl_rd: RecvTube,
918 activation_resources: Option<GpuActivationResources>,
919 }
920
921 #[derive(Copy, Clone)]
922 enum WorkerStopReason {
923 Sleep,
924 Kill,
925 }
926
927 enum WorkerState {
928 Inactive,
929 Active,
930 Error,
931 }
932
933 impl Worker {
new( rutabaga_builder: RutabagaBuilder, rutabaga_server_descriptor: Option<RutabagaDescriptor>, display_backends: Vec<DisplayBackend>, display_params: Vec<GpuDisplayParameters>, display_event: Arc<AtomicBool>, mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>, event_devices: Vec<EventDevice>, external_blob: bool, fixed_blob_mapping: bool, udmabuf: bool, request_receiver: mpsc::Receiver<WorkerRequest>, response_sender: mpsc::Sender<anyhow::Result<WorkerResponse>>, exit_evt_wrtube: SendTube, gpu_control_tube: Tube, resource_bridges: ResourceBridges, suspend_evt: Event, kill_evt: Event, #[cfg(windows)] mut wndproc_thread: Option<WindowProcedureThread>, #[cfg(windows)] gpu_display_wait_descriptor_ctrl_rd: RecvTube, #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube, snapshot_scratch_directory: Option<PathBuf>, ) -> anyhow::Result<Worker>934 fn new(
935 rutabaga_builder: RutabagaBuilder,
936 rutabaga_server_descriptor: Option<RutabagaDescriptor>,
937 display_backends: Vec<DisplayBackend>,
938 display_params: Vec<GpuDisplayParameters>,
939 display_event: Arc<AtomicBool>,
940 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
941 event_devices: Vec<EventDevice>,
942 external_blob: bool,
943 fixed_blob_mapping: bool,
944 udmabuf: bool,
945 request_receiver: mpsc::Receiver<WorkerRequest>,
946 response_sender: mpsc::Sender<anyhow::Result<WorkerResponse>>,
947 exit_evt_wrtube: SendTube,
948 gpu_control_tube: Tube,
949 resource_bridges: ResourceBridges,
950 suspend_evt: Event,
951 kill_evt: Event,
952 #[cfg(windows)] mut wndproc_thread: Option<WindowProcedureThread>,
953 #[cfg(windows)] gpu_display_wait_descriptor_ctrl_rd: RecvTube,
954 #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube,
955 snapshot_scratch_directory: Option<PathBuf>,
956 ) -> anyhow::Result<Worker> {
957 let fence_state = Arc::new(Mutex::new(Default::default()));
958 let fence_handler_resources = Arc::new(Mutex::new(None));
959 let fence_handler =
960 create_fence_handler(fence_handler_resources.clone(), fence_state.clone());
961 let rutabaga = rutabaga_builder.build(fence_handler, rutabaga_server_descriptor)?;
962 let mut virtio_gpu = build(
963 &display_backends,
964 display_params,
965 display_event,
966 rutabaga,
967 mapper,
968 external_blob,
969 fixed_blob_mapping,
970 #[cfg(windows)]
971 &mut wndproc_thread,
972 udmabuf,
973 #[cfg(windows)]
974 gpu_display_wait_descriptor_ctrl_wr,
975 snapshot_scratch_directory,
976 )
977 .ok_or_else(|| anyhow!("failed to build virtio gpu"))?;
978
979 for event_device in event_devices {
980 virtio_gpu
981 .import_event_device(event_device)
982 // We lost the `EventDevice`, so fail hard.
983 .context("failed to import event device")?;
984 }
985
986 Ok(Worker {
987 request_receiver,
988 response_sender,
989 exit_evt_wrtube,
990 gpu_control_tube,
991 resource_bridges,
992 suspend_evt,
993 kill_evt,
994 state: Frontend::new(virtio_gpu, fence_state.clone()),
995 fence_state,
996 fence_handler_resources,
997 #[cfg(windows)]
998 gpu_display_wait_descriptor_ctrl_rd,
999 activation_resources: None,
1000 })
1001 }
1002
run(&mut self)1003 fn run(&mut self) {
1004 // This loop effectively only runs while the worker is inactive. Once activated via
1005 // a `WorkerRequest::Activate`, the worker will remain in `run_until_sleep_or_exit()`
1006 // until suspended via `kill_evt` or `suspend_evt` being signaled.
1007 loop {
1008 let request = match self.request_receiver.recv() {
1009 Ok(r) => r,
1010 Err(_) => {
1011 info!("virtio gpu worker connection ended, exiting.");
1012 return;
1013 }
1014 };
1015
1016 match request {
1017 WorkerRequest::Activate(request) => {
1018 let response = self.on_activate(request).map(|_| WorkerResponse::Ok);
1019 self.response_sender
1020 .send(response)
1021 .expect("failed to send gpu worker response for activate");
1022
1023 let stop_reason = self
1024 .run_until_sleep_or_exit()
1025 .expect("failed to run gpu worker processing");
1026
1027 if let WorkerStopReason::Kill = stop_reason {
1028 break;
1029 }
1030 }
1031 WorkerRequest::Suspend => {
1032 let response = self.on_suspend().map(WorkerResponse::Suspend);
1033 self.response_sender
1034 .send(response)
1035 .expect("failed to send gpu worker response for suspend");
1036 }
1037 WorkerRequest::Snapshot => {
1038 let response = self.on_snapshot().map(WorkerResponse::Snapshot);
1039 self.response_sender
1040 .send(response)
1041 .expect("failed to send gpu worker response for snapshot");
1042 }
1043 WorkerRequest::Restore(snapshot) => {
1044 let response = self.on_restore(snapshot).map(|_| WorkerResponse::Ok);
1045 self.response_sender
1046 .send(response)
1047 .expect("failed to send gpu worker response for restore");
1048 }
1049 }
1050 }
1051 }
1052
on_activate(&mut self, request: WorkerActivateRequest) -> anyhow::Result<()>1053 fn on_activate(&mut self, request: WorkerActivateRequest) -> anyhow::Result<()> {
1054 self.fence_handler_resources
1055 .lock()
1056 .replace(FenceHandlerActivationResources {
1057 mem: request.resources.mem.clone(),
1058 ctrl_queue: request.resources.ctrl_queue.clone(),
1059 });
1060
1061 self.state
1062 .virtio_gpu
1063 .resume(&request.resources.mem)
1064 .context("gpu worker failed to activate virtio frontend")?;
1065
1066 self.activation_resources = Some(request.resources);
1067
1068 Ok(())
1069 }
1070
on_suspend(&mut self) -> anyhow::Result<GpuDeactivationResources>1071 fn on_suspend(&mut self) -> anyhow::Result<GpuDeactivationResources> {
1072 self.state
1073 .virtio_gpu
1074 .suspend()
1075 .context("failed to suspend VirtioGpu")?;
1076
1077 self.fence_handler_resources.lock().take();
1078
1079 let queues = if let Some(activation_resources) = self.activation_resources.take() {
1080 Some(vec![
1081 match Arc::try_unwrap(activation_resources.ctrl_queue.queue) {
1082 Ok(x) => x.into_inner(),
1083 Err(_) => panic!("too many refs on ctrl_queue"),
1084 },
1085 activation_resources.cursor_queue.queue.into_inner(),
1086 ])
1087 } else {
1088 None
1089 };
1090
1091 Ok(GpuDeactivationResources { queues })
1092 }
1093
on_snapshot(&mut self) -> anyhow::Result<WorkerSnapshot>1094 fn on_snapshot(&mut self) -> anyhow::Result<WorkerSnapshot> {
1095 Ok(WorkerSnapshot {
1096 fence_state_snapshot: self.fence_state.lock().snapshot(),
1097 virtio_gpu_snapshot: self
1098 .state
1099 .virtio_gpu
1100 .snapshot()
1101 .context("failed to snapshot VirtioGpu")?,
1102 })
1103 }
1104
on_restore(&mut self, snapshot: WorkerSnapshot) -> anyhow::Result<()>1105 fn on_restore(&mut self, snapshot: WorkerSnapshot) -> anyhow::Result<()> {
1106 self.fence_state
1107 .lock()
1108 .restore(snapshot.fence_state_snapshot);
1109
1110 self.state
1111 .virtio_gpu
1112 .restore(snapshot.virtio_gpu_snapshot)
1113 .context("failed to restore VirtioGpu")?;
1114
1115 Ok(())
1116 }
1117
run_until_sleep_or_exit(&mut self) -> anyhow::Result<WorkerStopReason>1118 fn run_until_sleep_or_exit(&mut self) -> anyhow::Result<WorkerStopReason> {
1119 let activation_resources = self
1120 .activation_resources
1121 .as_ref()
1122 .context("virtio gpu worker missing activation resources")?;
1123
1124 let display_desc =
1125 SafeDescriptor::try_from(&*self.state.display().borrow() as &dyn AsRawDescriptor)
1126 .context("failed getting event descriptor for display")?;
1127
1128 let ctrl_evt = activation_resources
1129 .ctrl_queue
1130 .queue
1131 .lock()
1132 .event()
1133 .try_clone()
1134 .context("failed to clone queue event")?;
1135 let cursor_evt = activation_resources
1136 .cursor_queue
1137 .queue
1138 .borrow()
1139 .event()
1140 .try_clone()
1141 .context("failed to clone queue event")?;
1142
1143 let mut event_manager = EventManager::build_with(&[
1144 (&ctrl_evt, WorkerToken::CtrlQueue),
1145 (&cursor_evt, WorkerToken::CursorQueue),
1146 (&display_desc, WorkerToken::Display),
1147 (
1148 self.gpu_control_tube.get_read_notifier(),
1149 WorkerToken::GpuControl,
1150 ),
1151 (&self.suspend_evt, WorkerToken::Sleep),
1152 (&self.kill_evt, WorkerToken::Kill),
1153 #[cfg(windows)]
1154 (
1155 self.gpu_display_wait_descriptor_ctrl_rd.get_read_notifier(),
1156 WorkerToken::DisplayDescriptorRequest,
1157 ),
1158 ])
1159 .context("failed creating gpu worker WaitContext")?;
1160
1161 let poll_desc: SafeDescriptor;
1162 if let Some(desc) = self.state.virtio_gpu.poll_descriptor() {
1163 poll_desc = desc;
1164 event_manager
1165 .add(&poll_desc, WorkerToken::VirtioGpuPoll)
1166 .context("failed adding poll event to WaitContext")?;
1167 }
1168
1169 self.resource_bridges
1170 .add_to_wait_context(&mut event_manager.wait_ctx);
1171
1172 // TODO(davidriley): The entire main loop processing is somewhat racey and incorrect with
1173 // respect to cursor vs control queue processing. As both currently and originally
1174 // written, while the control queue is only processed/read from after the the cursor queue
1175 // is finished, the entire queue will be processed at that time. The end effect of this
1176 // racyiness is that control queue descriptors that are issued after cursors descriptors
1177 // might be handled first instead of the other way around. In practice, the cursor queue
1178 // isn't used so this isn't a huge issue.
1179
1180 loop {
1181 let events = event_manager
1182 .wait_ctx
1183 .wait()
1184 .context("failed polling for gpu worker events")?;
1185
1186 let mut signal_used_cursor = false;
1187 let mut signal_used_ctrl = false;
1188 let mut ctrl_available = false;
1189 let mut display_available = false;
1190 let mut needs_config_interrupt = false;
1191
1192 // Remove event triggers that have been hung-up to prevent unnecessary worker wake-ups
1193 // (see b/244486346#comment62 for context).
1194 for event in events.iter().filter(|e| e.is_hungup) {
1195 error!(
1196 "unhandled virtio-gpu worker event hang-up detected: {:?}",
1197 event.token
1198 );
1199 event_manager.delete(event.token);
1200 }
1201
1202 for event in events.iter().filter(|e| e.is_readable) {
1203 match event.token {
1204 WorkerToken::CtrlQueue => {
1205 let _ = ctrl_evt.wait();
1206 // Set flag that control queue is available to be read, but defer reading
1207 // until rest of the events are processed.
1208 ctrl_available = true;
1209 }
1210 WorkerToken::CursorQueue => {
1211 let _ = cursor_evt.wait();
1212 if self.state.process_queue(
1213 &activation_resources.mem,
1214 &activation_resources.cursor_queue,
1215 ) {
1216 signal_used_cursor = true;
1217 }
1218 }
1219 WorkerToken::Display => {
1220 // We only need to process_display once-per-wake, regardless of how many
1221 // WorkerToken::Display events are received.
1222 display_available = true;
1223 }
1224 #[cfg(windows)]
1225 WorkerToken::DisplayDescriptorRequest => {
1226 if let Ok(req) = self
1227 .gpu_display_wait_descriptor_ctrl_rd
1228 .recv::<ModifyWaitContext>()
1229 {
1230 match req {
1231 ModifyWaitContext::Add(desc) => {
1232 if let Err(e) =
1233 event_manager.wait_ctx.add(&desc, WorkerToken::Display)
1234 {
1235 error!(
1236 "failed to add extra descriptor from display \
1237 to GPU worker wait context: {:?}",
1238 e
1239 )
1240 }
1241 }
1242 }
1243 } else {
1244 error!("failed to receive ModifyWaitContext request.")
1245 }
1246 }
1247 WorkerToken::GpuControl => {
1248 let req = self
1249 .gpu_control_tube
1250 .recv()
1251 .context("failed to recv from gpu control socket")?;
1252 let resp = self.state.process_gpu_control_command(req);
1253
1254 if let GpuControlResult::DisplaysUpdated = resp {
1255 needs_config_interrupt = true;
1256 }
1257
1258 self.gpu_control_tube
1259 .send(&resp)
1260 .context("failed to send gpu control socket response")?;
1261 }
1262 WorkerToken::ResourceBridge { index } => {
1263 self.resource_bridges.set_should_process(index);
1264 }
1265 WorkerToken::VirtioGpuPoll => {
1266 self.state.event_poll();
1267 }
1268 WorkerToken::Sleep => {
1269 return Ok(WorkerStopReason::Sleep);
1270 }
1271 WorkerToken::Kill => {
1272 return Ok(WorkerStopReason::Kill);
1273 }
1274 }
1275 }
1276
1277 if display_available {
1278 match self.state.process_display() {
1279 ProcessDisplayResult::CloseRequested => {
1280 let _ = self.exit_evt_wrtube.send::<VmEventType>(&VmEventType::Exit);
1281 }
1282 ProcessDisplayResult::Error(_e) => {
1283 base::error!("Display processing failed, disabling display event handler.");
1284 event_manager.delete(WorkerToken::Display);
1285 }
1286 ProcessDisplayResult::Success => (),
1287 };
1288 }
1289
1290 if ctrl_available
1291 && self
1292 .state
1293 .process_queue(&activation_resources.mem, &activation_resources.ctrl_queue)
1294 {
1295 signal_used_ctrl = true;
1296 }
1297
1298 // Process the entire control queue before the resource bridge in case a resource is
1299 // created or destroyed by the control queue. Processing the resource bridge first may
1300 // lead to a race condition.
1301 // TODO(davidriley): This is still inherently racey if both the control queue request
1302 // and the resource bridge request come in at the same time after the control queue is
1303 // processed above and before the corresponding bridge is processed below.
1304 self.resource_bridges
1305 .process_resource_bridges(&mut self.state, &mut event_manager.wait_ctx);
1306
1307 if signal_used_ctrl {
1308 activation_resources.ctrl_queue.signal_used();
1309 }
1310
1311 if signal_used_cursor {
1312 activation_resources.cursor_queue.signal_used();
1313 }
1314
1315 if needs_config_interrupt {
1316 activation_resources.interrupt.signal_config_changed();
1317 }
1318 }
1319 }
1320 }
1321
1322 /// Indicates a backend that should be tried for the gpu to use for display.
1323 ///
1324 /// Several instances of this enum are used in an ordered list to give the gpu device many backends
1325 /// to use as fallbacks in case some do not work.
1326 #[derive(Clone)]
1327 pub enum DisplayBackend {
1328 #[cfg(any(target_os = "android", target_os = "linux"))]
1329 /// Use the wayland backend with the given socket path if given.
1330 Wayland(Option<PathBuf>),
1331 #[cfg(any(target_os = "android", target_os = "linux"))]
1332 /// Open a connection to the X server at the given display if given.
1333 X(Option<String>),
1334 /// Emulate a display without actually displaying it.
1335 Stub,
1336 #[cfg(windows)]
1337 /// Open a window using WinAPI.
1338 WinApi,
1339 #[cfg(feature = "android_display")]
1340 /// The display buffer is backed by an Android surface. The surface is set via an AIDL service
1341 /// that the backend hosts. Currently, the AIDL service is registered to the service manager
1342 /// using the name given here. The entity holding the surface is expected to locate the service
1343 /// via this name, and pass the surface to it.
1344 Android(String),
1345 }
1346
1347 impl DisplayBackend {
build( &self, #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>, #[cfg(windows)] gpu_display_wait_descriptor_ctrl: SendTube, ) -> std::result::Result<GpuDisplay, GpuDisplayError>1348 fn build(
1349 &self,
1350 #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>,
1351 #[cfg(windows)] gpu_display_wait_descriptor_ctrl: SendTube,
1352 ) -> std::result::Result<GpuDisplay, GpuDisplayError> {
1353 match self {
1354 #[cfg(any(target_os = "android", target_os = "linux"))]
1355 DisplayBackend::Wayland(path) => GpuDisplay::open_wayland(path.as_ref()),
1356 #[cfg(any(target_os = "android", target_os = "linux"))]
1357 DisplayBackend::X(display) => GpuDisplay::open_x(display.as_deref()),
1358 DisplayBackend::Stub => GpuDisplay::open_stub(),
1359 #[cfg(windows)]
1360 DisplayBackend::WinApi => match wndproc_thread.take() {
1361 Some(wndproc_thread) => GpuDisplay::open_winapi(
1362 wndproc_thread,
1363 /* win_metrics= */ None,
1364 gpu_display_wait_descriptor_ctrl,
1365 None,
1366 ),
1367 None => {
1368 error!("wndproc_thread is none");
1369 Err(GpuDisplayError::Allocate)
1370 }
1371 },
1372 #[cfg(feature = "android_display")]
1373 DisplayBackend::Android(service_name) => GpuDisplay::open_android(service_name),
1374 }
1375 }
1376 }
1377
1378 pub struct Gpu {
1379 exit_evt_wrtube: SendTube,
1380 pub gpu_control_tube: Option<Tube>,
1381 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
1382 resource_bridges: Option<ResourceBridges>,
1383 event_devices: Option<Vec<EventDevice>>,
1384 worker_suspend_evt: Option<Event>,
1385 worker_request_sender: Option<mpsc::Sender<WorkerRequest>>,
1386 worker_response_receiver: Option<mpsc::Receiver<anyhow::Result<WorkerResponse>>>,
1387 worker_state: WorkerState,
1388 worker_thread: Option<WorkerThread<()>>,
1389 display_backends: Vec<DisplayBackend>,
1390 display_params: Vec<GpuDisplayParameters>,
1391 display_event: Arc<AtomicBool>,
1392 rutabaga_builder: RutabagaBuilder,
1393 pci_address: Option<PciAddress>,
1394 pci_bar_size: u64,
1395 external_blob: bool,
1396 fixed_blob_mapping: bool,
1397 rutabaga_component: RutabagaComponentType,
1398 #[cfg(windows)]
1399 wndproc_thread: Option<WindowProcedureThread>,
1400 base_features: u64,
1401 udmabuf: bool,
1402 rutabaga_server_descriptor: Option<SafeDescriptor>,
1403 #[cfg(windows)]
1404 /// Because the Windows GpuDisplay can't expose an epollfd, it has to inform the GPU worker
1405 /// which descriptors to add to its wait context. That's what this Tube is used for (it is
1406 /// provided to each display backend.
1407 gpu_display_wait_descriptor_ctrl_wr: SendTube,
1408 #[cfg(windows)]
1409 /// The GPU worker uses this Tube to receive the descriptors that should be added to its wait
1410 /// context.
1411 gpu_display_wait_descriptor_ctrl_rd: Option<RecvTube>,
1412 capset_mask: u64,
1413 #[cfg(any(target_os = "android", target_os = "linux"))]
1414 gpu_cgroup_path: Option<PathBuf>,
1415 snapshot_scratch_directory: Option<PathBuf>,
1416 }
1417
1418 impl Gpu {
new( exit_evt_wrtube: SendTube, gpu_control_tube: Tube, resource_bridges: Vec<Tube>, display_backends: Vec<DisplayBackend>, gpu_parameters: &GpuParameters, rutabaga_server_descriptor: Option<SafeDescriptor>, event_devices: Vec<EventDevice>, base_features: u64, channels: &BTreeMap<String, PathBuf>, #[cfg(windows)] wndproc_thread: WindowProcedureThread, #[cfg(any(target_os = "android", target_os = "linux"))] gpu_cgroup_path: Option<&PathBuf>, ) -> Gpu1419 pub fn new(
1420 exit_evt_wrtube: SendTube,
1421 gpu_control_tube: Tube,
1422 resource_bridges: Vec<Tube>,
1423 display_backends: Vec<DisplayBackend>,
1424 gpu_parameters: &GpuParameters,
1425 rutabaga_server_descriptor: Option<SafeDescriptor>,
1426 event_devices: Vec<EventDevice>,
1427 base_features: u64,
1428 channels: &BTreeMap<String, PathBuf>,
1429 #[cfg(windows)] wndproc_thread: WindowProcedureThread,
1430 #[cfg(any(target_os = "android", target_os = "linux"))] gpu_cgroup_path: Option<&PathBuf>,
1431 ) -> Gpu {
1432 let mut display_params = gpu_parameters.display_params.clone();
1433 if display_params.is_empty() {
1434 display_params.push(Default::default());
1435 }
1436 let (display_width, display_height) = display_params[0].get_virtual_display_size();
1437
1438 let mut rutabaga_channels: Vec<RutabagaChannel> = Vec::new();
1439 for (channel_name, path) in channels {
1440 match &channel_name[..] {
1441 "" => rutabaga_channels.push(RutabagaChannel {
1442 base_channel: path.clone(),
1443 channel_type: RUTABAGA_CHANNEL_TYPE_WAYLAND,
1444 }),
1445 "mojo" => rutabaga_channels.push(RutabagaChannel {
1446 base_channel: path.clone(),
1447 channel_type: RUTABAGA_CHANNEL_TYPE_CAMERA,
1448 }),
1449 _ => error!("unknown rutabaga channel"),
1450 }
1451 }
1452
1453 let rutabaga_channels_opt = Some(rutabaga_channels);
1454 let component = match gpu_parameters.mode {
1455 GpuMode::Mode2D => RutabagaComponentType::Rutabaga2D,
1456 #[cfg(feature = "virgl_renderer")]
1457 GpuMode::ModeVirglRenderer => RutabagaComponentType::VirglRenderer,
1458 #[cfg(feature = "gfxstream")]
1459 GpuMode::ModeGfxstream => RutabagaComponentType::Gfxstream,
1460 };
1461
1462 // Only allow virglrenderer to fork its own render server when explicitly requested.
1463 // Caller can enforce its own restrictions (e.g. not allowed when sandboxed) and set the
1464 // allow flag appropriately.
1465 let use_render_server = rutabaga_server_descriptor.is_some()
1466 || gpu_parameters.allow_implicit_render_server_exec;
1467
1468 let rutabaga_wsi = match gpu_parameters.wsi {
1469 Some(GpuWsi::Vulkan) => RutabagaWsi::VulkanSwapchain,
1470 _ => RutabagaWsi::Surfaceless,
1471 };
1472
1473 let rutabaga_builder = RutabagaBuilder::new(component, gpu_parameters.capset_mask)
1474 .set_display_width(display_width)
1475 .set_display_height(display_height)
1476 .set_rutabaga_channels(rutabaga_channels_opt)
1477 .set_use_egl(gpu_parameters.renderer_use_egl)
1478 .set_use_gles(gpu_parameters.renderer_use_gles)
1479 .set_use_glx(gpu_parameters.renderer_use_glx)
1480 .set_use_surfaceless(gpu_parameters.renderer_use_surfaceless)
1481 .set_use_vulkan(gpu_parameters.use_vulkan.unwrap_or_default())
1482 .set_wsi(rutabaga_wsi)
1483 .set_use_external_blob(gpu_parameters.external_blob)
1484 .set_use_system_blob(gpu_parameters.system_blob)
1485 .set_use_render_server(use_render_server)
1486 .set_renderer_features(gpu_parameters.renderer_features.clone());
1487
1488 #[cfg(windows)]
1489 let (gpu_display_wait_descriptor_ctrl_wr, gpu_display_wait_descriptor_ctrl_rd) =
1490 Tube::directional_pair().expect("failed to create wait descriptor control pair.");
1491
1492 Gpu {
1493 exit_evt_wrtube,
1494 gpu_control_tube: Some(gpu_control_tube),
1495 mapper: Arc::new(Mutex::new(None)),
1496 resource_bridges: Some(ResourceBridges::new(resource_bridges)),
1497 event_devices: Some(event_devices),
1498 worker_request_sender: None,
1499 worker_response_receiver: None,
1500 worker_suspend_evt: None,
1501 worker_state: WorkerState::Inactive,
1502 worker_thread: None,
1503 display_backends,
1504 display_params,
1505 display_event: Arc::new(AtomicBool::new(false)),
1506 rutabaga_builder,
1507 pci_address: gpu_parameters.pci_address,
1508 pci_bar_size: gpu_parameters.pci_bar_size,
1509 external_blob: gpu_parameters.external_blob,
1510 fixed_blob_mapping: gpu_parameters.fixed_blob_mapping,
1511 rutabaga_component: component,
1512 #[cfg(windows)]
1513 wndproc_thread: Some(wndproc_thread),
1514 base_features,
1515 udmabuf: gpu_parameters.udmabuf,
1516 rutabaga_server_descriptor,
1517 #[cfg(windows)]
1518 gpu_display_wait_descriptor_ctrl_wr,
1519 #[cfg(windows)]
1520 gpu_display_wait_descriptor_ctrl_rd: Some(gpu_display_wait_descriptor_ctrl_rd),
1521 capset_mask: gpu_parameters.capset_mask,
1522 #[cfg(any(target_os = "android", target_os = "linux"))]
1523 gpu_cgroup_path: gpu_cgroup_path.cloned(),
1524 snapshot_scratch_directory: gpu_parameters.snapshot_scratch_path.clone(),
1525 }
1526 }
1527
1528 /// Initializes the internal device state so that it can begin processing virtqueues.
1529 ///
1530 /// Only used by vhost-user GPU.
initialize_frontend( &mut self, fence_state: Arc<Mutex<FenceState>>, fence_handler: RutabagaFenceHandler, mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>, ) -> Option<Frontend>1531 pub fn initialize_frontend(
1532 &mut self,
1533 fence_state: Arc<Mutex<FenceState>>,
1534 fence_handler: RutabagaFenceHandler,
1535 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
1536 ) -> Option<Frontend> {
1537 let rutabaga_server_descriptor = self.rutabaga_server_descriptor.as_ref().map(|d| {
1538 to_rutabaga_descriptor(d.try_clone().expect("failed to clone server descriptor"))
1539 });
1540 let rutabaga = self
1541 .rutabaga_builder
1542 .clone()
1543 .build(fence_handler, rutabaga_server_descriptor)
1544 .map_err(|e| error!("failed to build rutabaga {}", e))
1545 .ok()?;
1546
1547 let mut virtio_gpu = build(
1548 &self.display_backends,
1549 self.display_params.clone(),
1550 self.display_event.clone(),
1551 rutabaga,
1552 mapper,
1553 self.external_blob,
1554 self.fixed_blob_mapping,
1555 #[cfg(windows)]
1556 &mut self.wndproc_thread,
1557 self.udmabuf,
1558 #[cfg(windows)]
1559 self.gpu_display_wait_descriptor_ctrl_wr
1560 .try_clone()
1561 .expect("failed to clone wait context control channel"),
1562 self.snapshot_scratch_directory.clone(),
1563 )?;
1564
1565 for event_device in self.event_devices.take().expect("missing event_devices") {
1566 virtio_gpu
1567 .import_event_device(event_device)
1568 // We lost the `EventDevice`, so fail hard.
1569 .expect("failed to import event device");
1570 }
1571
1572 Some(Frontend::new(virtio_gpu, fence_state))
1573 }
1574
1575 // This is not invoked when running with vhost-user GPU.
start_worker_thread(&mut self)1576 fn start_worker_thread(&mut self) {
1577 let suspend_evt = Event::new().unwrap();
1578 let suspend_evt_copy = suspend_evt
1579 .try_clone()
1580 .context("error cloning suspend event")
1581 .unwrap();
1582
1583 let exit_evt_wrtube = self
1584 .exit_evt_wrtube
1585 .try_clone()
1586 .context("error cloning exit tube")
1587 .unwrap();
1588
1589 let gpu_control_tube = self
1590 .gpu_control_tube
1591 .take()
1592 .context("gpu_control_tube is none")
1593 .unwrap();
1594
1595 let resource_bridges = self
1596 .resource_bridges
1597 .take()
1598 .context("resource_bridges is none")
1599 .unwrap();
1600
1601 let display_backends = self.display_backends.clone();
1602 let display_params = self.display_params.clone();
1603 let display_event = self.display_event.clone();
1604 let event_devices = self.event_devices.take().expect("missing event_devices");
1605 let external_blob = self.external_blob;
1606 let fixed_blob_mapping = self.fixed_blob_mapping;
1607 let udmabuf = self.udmabuf;
1608 let snapshot_scratch_directory = self.snapshot_scratch_directory.clone();
1609
1610 #[cfg(windows)]
1611 let mut wndproc_thread = self.wndproc_thread.take();
1612
1613 #[cfg(windows)]
1614 let gpu_display_wait_descriptor_ctrl_wr = self
1615 .gpu_display_wait_descriptor_ctrl_wr
1616 .try_clone()
1617 .expect("failed to clone wait context ctrl channel");
1618
1619 #[cfg(windows)]
1620 let gpu_display_wait_descriptor_ctrl_rd = self
1621 .gpu_display_wait_descriptor_ctrl_rd
1622 .take()
1623 .expect("failed to take gpu_display_wait_descriptor_ctrl_rd");
1624
1625 #[cfg(any(target_os = "android", target_os = "linux"))]
1626 let gpu_cgroup_path = self.gpu_cgroup_path.clone();
1627
1628 let mapper = Arc::clone(&self.mapper);
1629
1630 let rutabaga_builder = self.rutabaga_builder.clone();
1631 let rutabaga_server_descriptor = self.rutabaga_server_descriptor.as_ref().map(|d| {
1632 to_rutabaga_descriptor(d.try_clone().expect("failed to clone server descriptor"))
1633 });
1634
1635 let (init_finished_tx, init_finished_rx) = mpsc::channel();
1636
1637 let (worker_request_sender, worker_request_receiver) = mpsc::channel();
1638 let (worker_response_sender, worker_response_receiver) = mpsc::channel();
1639
1640 let worker_thread = WorkerThread::start("v_gpu", move |kill_evt| {
1641 #[cfg(any(target_os = "android", target_os = "linux"))]
1642 if let Some(cgroup_path) = gpu_cgroup_path {
1643 move_task_to_cgroup(cgroup_path, base::gettid())
1644 .expect("Failed to move v_gpu into requested cgroup");
1645 }
1646
1647 let mut worker = Worker::new(
1648 rutabaga_builder,
1649 rutabaga_server_descriptor,
1650 display_backends,
1651 display_params,
1652 display_event,
1653 mapper,
1654 event_devices,
1655 external_blob,
1656 fixed_blob_mapping,
1657 udmabuf,
1658 worker_request_receiver,
1659 worker_response_sender,
1660 exit_evt_wrtube,
1661 gpu_control_tube,
1662 resource_bridges,
1663 suspend_evt_copy,
1664 kill_evt,
1665 #[cfg(windows)]
1666 wndproc_thread,
1667 #[cfg(windows)]
1668 gpu_display_wait_descriptor_ctrl_rd,
1669 #[cfg(windows)]
1670 gpu_display_wait_descriptor_ctrl_wr,
1671 snapshot_scratch_directory,
1672 )
1673 .expect("Failed to create virtio gpu worker thread");
1674
1675 // Tell the parent thread that the init phase is complete.
1676 let _ = init_finished_tx.send(());
1677
1678 worker.run()
1679 });
1680
1681 self.worker_request_sender = Some(worker_request_sender);
1682 self.worker_response_receiver = Some(worker_response_receiver);
1683 self.worker_suspend_evt = Some(suspend_evt);
1684 self.worker_state = WorkerState::Inactive;
1685 self.worker_thread = Some(worker_thread);
1686
1687 match init_finished_rx.recv() {
1688 Ok(()) => {}
1689 Err(mpsc::RecvError) => panic!("virtio-gpu worker thread init failed"),
1690 }
1691 }
1692
stop_worker_thread(&mut self)1693 fn stop_worker_thread(&mut self) {
1694 self.worker_request_sender.take();
1695 self.worker_response_receiver.take();
1696 self.worker_suspend_evt.take();
1697 if let Some(worker_thread) = self.worker_thread.take() {
1698 worker_thread.stop();
1699 }
1700 }
1701
get_config(&self) -> virtio_gpu_config1702 fn get_config(&self) -> virtio_gpu_config {
1703 let mut events_read = 0;
1704
1705 if self.display_event.load(Ordering::Relaxed) {
1706 events_read |= VIRTIO_GPU_EVENT_DISPLAY;
1707 }
1708
1709 let num_capsets = match self.capset_mask {
1710 0 => {
1711 match self.rutabaga_component {
1712 RutabagaComponentType::Rutabaga2D => 0,
1713 _ => {
1714 #[allow(unused_mut)]
1715 let mut num_capsets = 0;
1716
1717 // Three capsets for virgl_renderer
1718 #[cfg(feature = "virgl_renderer")]
1719 {
1720 num_capsets += 3;
1721 }
1722
1723 // One capset for gfxstream
1724 #[cfg(feature = "gfxstream")]
1725 {
1726 num_capsets += 1;
1727 }
1728
1729 num_capsets
1730 }
1731 }
1732 }
1733 _ => self.capset_mask.count_ones(),
1734 };
1735
1736 virtio_gpu_config {
1737 events_read: Le32::from(events_read),
1738 events_clear: Le32::from(0),
1739 num_scanouts: Le32::from(VIRTIO_GPU_MAX_SCANOUTS as u32),
1740 num_capsets: Le32::from(num_capsets),
1741 }
1742 }
1743
1744 /// Send a request to exit the process to VMM.
send_exit_evt(&self) -> anyhow::Result<()>1745 pub fn send_exit_evt(&self) -> anyhow::Result<()> {
1746 self.exit_evt_wrtube
1747 .send::<VmEventType>(&VmEventType::Exit)
1748 .context("failed to send exit event")
1749 }
1750 }
1751
1752 impl VirtioDevice for Gpu {
keep_rds(&self) -> Vec<RawDescriptor>1753 fn keep_rds(&self) -> Vec<RawDescriptor> {
1754 let mut keep_rds = Vec::new();
1755
1756 // To find the RawDescriptor associated with stdout and stderr on Windows is difficult.
1757 // Resource bridges are used only for Wayland displays. There is also no meaningful way
1758 // casting the underlying DMA buffer wrapped in File to a copyable RawDescriptor.
1759 // TODO(davidriley): Remove once virgl has another path to include
1760 // debugging logs.
1761 #[cfg(any(target_os = "android", target_os = "linux"))]
1762 if cfg!(debug_assertions) {
1763 keep_rds.push(libc::STDOUT_FILENO);
1764 keep_rds.push(libc::STDERR_FILENO);
1765 }
1766
1767 if let Some(ref mapper) = *self.mapper.lock() {
1768 if let Some(descriptor) = mapper.as_raw_descriptor() {
1769 keep_rds.push(descriptor);
1770 }
1771 }
1772
1773 if let Some(ref rutabaga_server_descriptor) = self.rutabaga_server_descriptor {
1774 keep_rds.push(rutabaga_server_descriptor.as_raw_descriptor());
1775 }
1776
1777 keep_rds.push(self.exit_evt_wrtube.as_raw_descriptor());
1778
1779 if let Some(gpu_control_tube) = &self.gpu_control_tube {
1780 keep_rds.push(gpu_control_tube.as_raw_descriptor());
1781 }
1782
1783 if let Some(resource_bridges) = &self.resource_bridges {
1784 resource_bridges.append_raw_descriptors(&mut keep_rds);
1785 }
1786
1787 for event_device in self.event_devices.iter().flatten() {
1788 keep_rds.push(event_device.as_raw_descriptor());
1789 }
1790
1791 keep_rds
1792 }
1793
device_type(&self) -> DeviceType1794 fn device_type(&self) -> DeviceType {
1795 DeviceType::Gpu
1796 }
1797
queue_max_sizes(&self) -> &[u16]1798 fn queue_max_sizes(&self) -> &[u16] {
1799 QUEUE_SIZES
1800 }
1801
features(&self) -> u641802 fn features(&self) -> u64 {
1803 let mut virtio_gpu_features = 1 << VIRTIO_GPU_F_EDID;
1804
1805 // If a non-2D component is specified, enable 3D features. It is possible to run display
1806 // contexts without 3D backend (i.e, gfxstream / virglrender), so check for that too.
1807 if self.rutabaga_component != RutabagaComponentType::Rutabaga2D || self.capset_mask != 0 {
1808 virtio_gpu_features |= 1 << VIRTIO_GPU_F_VIRGL
1809 | 1 << VIRTIO_GPU_F_RESOURCE_UUID
1810 | 1 << VIRTIO_GPU_F_RESOURCE_BLOB
1811 | 1 << VIRTIO_GPU_F_CONTEXT_INIT
1812 | 1 << VIRTIO_GPU_F_EDID;
1813
1814 if self.udmabuf {
1815 virtio_gpu_features |= 1 << VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
1816 }
1817
1818 // New experimental/unstable feature, not upstreamed.
1819 // Safe to enable because guest must explicitly opt-in.
1820 virtio_gpu_features |= 1 << VIRTIO_GPU_F_FENCE_PASSING;
1821 }
1822
1823 self.base_features | virtio_gpu_features
1824 }
1825
ack_features(&mut self, value: u64)1826 fn ack_features(&mut self, value: u64) {
1827 let _ = value;
1828 }
1829
read_config(&self, offset: u64, data: &mut [u8])1830 fn read_config(&self, offset: u64, data: &mut [u8]) {
1831 copy_config(data, 0, self.get_config().as_bytes(), offset);
1832 }
1833
write_config(&mut self, offset: u64, data: &[u8])1834 fn write_config(&mut self, offset: u64, data: &[u8]) {
1835 let mut cfg = self.get_config();
1836 copy_config(cfg.as_mut_bytes(), offset, data, 0);
1837 if (cfg.events_clear.to_native() & VIRTIO_GPU_EVENT_DISPLAY) != 0 {
1838 self.display_event.store(false, Ordering::Relaxed);
1839 }
1840 }
1841
on_device_sandboxed(&mut self)1842 fn on_device_sandboxed(&mut self) {
1843 // Unlike most Virtio devices which start their worker thread in activate(),
1844 // the Gpu's worker thread is started earlier here so that rutabaga and the
1845 // underlying render server have a chance to initialize before the guest OS
1846 // starts. This is needed because the Virtio GPU kernel module has a timeout
1847 // for some calls during initialization and some host GPU drivers have been
1848 // observed to be extremely slow to initialize on fresh GCE instances. The
1849 // entire worker thread is started here (as opposed to just initializing
1850 // rutabaga and the underlying render server) as OpenGL based renderers may
1851 // expect to be initialized on the same thread that later processes commands.
1852 self.start_worker_thread();
1853 }
1854
activate( &mut self, mem: GuestMemory, interrupt: Interrupt, mut queues: BTreeMap<usize, Queue>, ) -> anyhow::Result<()>1855 fn activate(
1856 &mut self,
1857 mem: GuestMemory,
1858 interrupt: Interrupt,
1859 mut queues: BTreeMap<usize, Queue>,
1860 ) -> anyhow::Result<()> {
1861 if queues.len() != QUEUE_SIZES.len() {
1862 return Err(anyhow!(
1863 "expected {} queues, got {}",
1864 QUEUE_SIZES.len(),
1865 queues.len()
1866 ));
1867 }
1868
1869 let ctrl_queue = SharedQueueReader::new(queues.remove(&0).unwrap());
1870 let cursor_queue = LocalQueueReader::new(queues.remove(&1).unwrap());
1871
1872 self.worker_request_sender
1873 .as_ref()
1874 .context("worker thread missing on activate?")?
1875 .send(WorkerRequest::Activate(WorkerActivateRequest {
1876 resources: GpuActivationResources {
1877 mem,
1878 interrupt,
1879 ctrl_queue,
1880 cursor_queue,
1881 },
1882 }))
1883 .map_err(|e| anyhow!("failed to send virtio gpu worker activate request: {:?}", e))?;
1884
1885 self.worker_response_receiver
1886 .as_ref()
1887 .context("worker thread missing on activate?")?
1888 .recv()
1889 .inspect(|_| self.worker_state = WorkerState::Active)
1890 .inspect_err(|_| self.worker_state = WorkerState::Error)
1891 .context("failed to receive response for virtio gpu worker resume request")??;
1892
1893 Ok(())
1894 }
1895
pci_address(&self) -> Option<PciAddress>1896 fn pci_address(&self) -> Option<PciAddress> {
1897 self.pci_address
1898 }
1899
get_shared_memory_region(&self) -> Option<SharedMemoryRegion>1900 fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
1901 Some(SharedMemoryRegion {
1902 id: VIRTIO_GPU_SHM_ID_HOST_VISIBLE,
1903 length: self.pci_bar_size,
1904 })
1905 }
1906
set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>)1907 fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
1908 self.mapper.lock().replace(mapper);
1909 }
1910
expose_shmem_descriptors_with_viommu(&self) -> bool1911 fn expose_shmem_descriptors_with_viommu(&self) -> bool {
1912 // TODO(b/323368701): integrate with fixed_blob_mapping so this can always return true.
1913 !self.fixed_blob_mapping
1914 }
1915
get_shared_memory_prepare_type(&mut self) -> SharedMemoryPrepareType1916 fn get_shared_memory_prepare_type(&mut self) -> SharedMemoryPrepareType {
1917 if self.fixed_blob_mapping {
1918 let cache_type = if cfg!(feature = "noncoherent-dma") {
1919 MemCacheType::CacheNonCoherent
1920 } else {
1921 MemCacheType::CacheCoherent
1922 };
1923 SharedMemoryPrepareType::SingleMappingOnFirst(cache_type)
1924 } else {
1925 SharedMemoryPrepareType::DynamicPerMapping
1926 }
1927 }
1928
1929 // Notes on sleep/wake/snapshot/restore functionality.
1930 //
1931 // * Only 2d mode is supported so far.
1932 // * We only snapshot the state relevant to the virtio-gpu 2d mode protocol (i.e. scanouts,
1933 // resources, fences).
1934 // * The GpuDisplay is recreated from scratch, we don't want to snapshot the state of a
1935 // Wayland socket (for example).
1936 // * No state about pending virtio requests needs to be snapshotted because the 2d backend
1937 // completes them synchronously.
virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>>1938 fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
1939 match self.worker_state {
1940 WorkerState::Error => {
1941 return Err(anyhow!(
1942 "failed to sleep virtio gpu worker which is in error state"
1943 ));
1944 }
1945 WorkerState::Inactive => {
1946 return Ok(None);
1947 }
1948 _ => (),
1949 };
1950
1951 if let (
1952 Some(worker_request_sender),
1953 Some(worker_response_receiver),
1954 Some(worker_suspend_evt),
1955 ) = (
1956 &self.worker_request_sender,
1957 &self.worker_response_receiver,
1958 &self.worker_suspend_evt,
1959 ) {
1960 worker_request_sender
1961 .send(WorkerRequest::Suspend)
1962 .map_err(|e| {
1963 anyhow!(
1964 "failed to send suspend request to virtio gpu worker: {:?}",
1965 e
1966 )
1967 })?;
1968
1969 worker_suspend_evt
1970 .signal()
1971 .context("failed to signal virtio gpu worker suspend event")?;
1972
1973 let response = worker_response_receiver
1974 .recv()
1975 .inspect(|_| self.worker_state = WorkerState::Inactive)
1976 .inspect_err(|_| self.worker_state = WorkerState::Error)
1977 .context("failed to receive response for virtio gpu worker suspend request")??;
1978
1979 worker_suspend_evt
1980 .reset()
1981 .context("failed to reset virtio gpu worker suspend event")?;
1982
1983 match response {
1984 WorkerResponse::Suspend(deactivation_resources) => Ok(deactivation_resources
1985 .queues
1986 .map(|q| q.into_iter().enumerate().collect())),
1987 _ => {
1988 panic!("unexpected response from virtio gpu worker sleep request");
1989 }
1990 }
1991 } else {
1992 Err(anyhow!("virtio gpu worker not available for sleep"))
1993 }
1994 }
1995
virtio_wake( &mut self, queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>, ) -> anyhow::Result<()>1996 fn virtio_wake(
1997 &mut self,
1998 queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
1999 ) -> anyhow::Result<()> {
2000 match self.worker_state {
2001 WorkerState::Error => {
2002 return Err(anyhow!(
2003 "failed to wake virtio gpu worker which is in error state"
2004 ));
2005 }
2006 WorkerState::Active => {
2007 return Ok(());
2008 }
2009 _ => (),
2010 };
2011
2012 match queues_state {
2013 None => Ok(()),
2014 Some((mem, interrupt, queues)) => {
2015 // TODO(khei): activate is just what we want at the moment, but we should probably
2016 // move it into a "start workers" function to make it obvious that it isn't
2017 // strictly used for activate events.
2018 self.activate(mem, interrupt, queues)?;
2019 Ok(())
2020 }
2021 }
2022 }
2023
virtio_snapshot(&mut self) -> anyhow::Result<AnySnapshot>2024 fn virtio_snapshot(&mut self) -> anyhow::Result<AnySnapshot> {
2025 match self.worker_state {
2026 WorkerState::Error => {
2027 return Err(anyhow!(
2028 "failed to snapshot virtio gpu worker which is in error state"
2029 ));
2030 }
2031 WorkerState::Active => {
2032 return Err(anyhow!(
2033 "failed to snapshot virtio gpu worker which is in active state"
2034 ));
2035 }
2036 _ => (),
2037 };
2038
2039 if let (Some(worker_request_sender), Some(worker_response_receiver)) =
2040 (&self.worker_request_sender, &self.worker_response_receiver)
2041 {
2042 worker_request_sender
2043 .send(WorkerRequest::Snapshot)
2044 .map_err(|e| {
2045 anyhow!(
2046 "failed to send snapshot request to virtio gpu worker: {:?}",
2047 e
2048 )
2049 })?;
2050
2051 match worker_response_receiver
2052 .recv()
2053 .inspect_err(|_| self.worker_state = WorkerState::Error)
2054 .context("failed to receive response for virtio gpu worker suspend request")??
2055 {
2056 WorkerResponse::Snapshot(snapshot) => Ok(AnySnapshot::to_any(snapshot)?),
2057 _ => {
2058 panic!("unexpected response from virtio gpu worker sleep request");
2059 }
2060 }
2061 } else {
2062 Err(anyhow!("virtio gpu worker not available for snapshot"))
2063 }
2064 }
2065
virtio_restore(&mut self, data: AnySnapshot) -> anyhow::Result<()>2066 fn virtio_restore(&mut self, data: AnySnapshot) -> anyhow::Result<()> {
2067 match self.worker_state {
2068 WorkerState::Error => {
2069 return Err(anyhow!(
2070 "failed to restore virtio gpu worker which is in error state"
2071 ));
2072 }
2073 WorkerState::Active => {
2074 return Err(anyhow!(
2075 "failed to restore virtio gpu worker which is in active state"
2076 ));
2077 }
2078 _ => (),
2079 };
2080
2081 let snapshot: WorkerSnapshot = AnySnapshot::from_any(data)?;
2082
2083 if let (Some(worker_request_sender), Some(worker_response_receiver)) =
2084 (&self.worker_request_sender, &self.worker_response_receiver)
2085 {
2086 worker_request_sender
2087 .send(WorkerRequest::Restore(snapshot))
2088 .map_err(|e| {
2089 anyhow!(
2090 "failed to send suspend request to virtio gpu worker: {:?}",
2091 e
2092 )
2093 })?;
2094
2095 let response = worker_response_receiver
2096 .recv()
2097 .inspect_err(|_| self.worker_state = WorkerState::Error)
2098 .context("failed to receive response for virtio gpu worker suspend request")??;
2099
2100 match response {
2101 WorkerResponse::Ok => Ok(()),
2102 _ => {
2103 panic!("unexpected response from virtio gpu worker sleep request");
2104 }
2105 }
2106 } else {
2107 Err(anyhow!("virtio gpu worker not available for restore"))
2108 }
2109 }
2110
reset(&mut self) -> anyhow::Result<()>2111 fn reset(&mut self) -> anyhow::Result<()> {
2112 self.stop_worker_thread();
2113 Ok(())
2114 }
2115 }
2116
2117 impl Drop for Gpu {
drop(&mut self)2118 fn drop(&mut self) {
2119 let _ = self.reset();
2120 }
2121 }
2122
2123 /// This struct takes the ownership of resource bridges and tracks which ones should be processed.
2124 struct ResourceBridges {
2125 resource_bridges: Vec<Tube>,
2126 should_process: Vec<bool>,
2127 }
2128
2129 impl ResourceBridges {
new(resource_bridges: Vec<Tube>) -> Self2130 pub fn new(resource_bridges: Vec<Tube>) -> Self {
2131 #[cfg(windows)]
2132 assert!(
2133 resource_bridges.is_empty(),
2134 "resource bridges are not supported on Windows"
2135 );
2136
2137 let mut resource_bridges = Self {
2138 resource_bridges,
2139 should_process: Default::default(),
2140 };
2141 resource_bridges.reset_should_process();
2142 resource_bridges
2143 }
2144
2145 // Appends raw descriptors of all resource bridges to the given vector.
append_raw_descriptors(&self, rds: &mut Vec<RawDescriptor>)2146 pub fn append_raw_descriptors(&self, rds: &mut Vec<RawDescriptor>) {
2147 for bridge in &self.resource_bridges {
2148 rds.push(bridge.as_raw_descriptor());
2149 }
2150 }
2151
2152 /// Adds all resource bridges to WaitContext.
add_to_wait_context(&self, wait_ctx: &mut WaitContext<WorkerToken>)2153 pub fn add_to_wait_context(&self, wait_ctx: &mut WaitContext<WorkerToken>) {
2154 for (index, bridge) in self.resource_bridges.iter().enumerate() {
2155 if let Err(e) = wait_ctx.add(bridge, WorkerToken::ResourceBridge { index }) {
2156 error!("failed to add resource bridge to WaitContext: {}", e);
2157 }
2158 }
2159 }
2160
2161 /// Marks that the resource bridge at the given index should be processed when
2162 /// `process_resource_bridges()` is called.
set_should_process(&mut self, index: usize)2163 pub fn set_should_process(&mut self, index: usize) {
2164 self.should_process[index] = true;
2165 }
2166
2167 /// Processes all resource bridges that have been marked as should be processed. The markings
2168 /// will be cleared before returning. Faulty resource bridges will be removed from WaitContext.
process_resource_bridges( &mut self, state: &mut Frontend, wait_ctx: &mut WaitContext<WorkerToken>, )2169 pub fn process_resource_bridges(
2170 &mut self,
2171 state: &mut Frontend,
2172 wait_ctx: &mut WaitContext<WorkerToken>,
2173 ) {
2174 for (bridge, &should_process) in self.resource_bridges.iter().zip(&self.should_process) {
2175 if should_process {
2176 if let Err(e) = state.process_resource_bridge(bridge) {
2177 error!("Failed to process resource bridge: {:#}", e);
2178 error!("Removing that resource bridge from the wait context.");
2179 wait_ctx.delete(bridge).unwrap_or_else(|e| {
2180 error!("Failed to remove faulty resource bridge: {:#}", e)
2181 });
2182 }
2183 }
2184 }
2185 self.reset_should_process();
2186 }
2187
reset_should_process(&mut self)2188 fn reset_should_process(&mut self) {
2189 self.should_process.clear();
2190 self.should_process
2191 .resize(self.resource_bridges.len(), false);
2192 }
2193 }
2194