• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 mod protocol;
6 mod udmabuf;
7 mod udmabuf_bindings;
8 mod virtio_gpu;
9 
10 use std::cell::RefCell;
11 use std::collections::{BTreeMap, VecDeque};
12 use std::convert::TryFrom;
13 use std::io::Read;
14 use std::mem::{self, size_of};
15 use std::path::PathBuf;
16 use std::rc::Rc;
17 use std::sync::Arc;
18 use std::thread;
19 
20 use anyhow::Context;
21 
22 use base::{
23     debug, error, warn, AsRawDescriptor, Event, ExternalMapping, PollToken, RawDescriptor,
24     SafeDescriptor, Tube, WaitContext,
25 };
26 
27 use data_model::*;
28 
29 pub use gpu_display::EventDevice;
30 use gpu_display::*;
31 use rutabaga_gfx::*;
32 
33 use resources::Alloc;
34 
35 use serde::{Deserialize, Serialize};
36 use sync::Mutex;
37 use vm_memory::{GuestAddress, GuestMemory};
38 
39 use super::{
40     copy_config, resource_bridge::*, DescriptorChain, Interrupt, Queue, Reader,
41     SignalableInterrupt, VirtioDevice, Writer, TYPE_GPU,
42 };
43 
44 use super::{PciCapabilityType, VirtioPciShmCap};
45 
46 use self::protocol::*;
47 pub use self::protocol::{
48     virtio_gpu_config, VIRTIO_GPU_F_CONTEXT_INIT, VIRTIO_GPU_F_CREATE_GUEST_HANDLE,
49     VIRTIO_GPU_F_EDID, VIRTIO_GPU_F_RESOURCE_BLOB, VIRTIO_GPU_F_RESOURCE_SYNC,
50     VIRTIO_GPU_F_RESOURCE_UUID, VIRTIO_GPU_F_VIRGL, VIRTIO_GPU_SHM_ID_HOST_VISIBLE,
51 };
52 use self::virtio_gpu::VirtioGpu;
53 
54 use crate::pci::{
55     PciAddress, PciBarConfiguration, PciBarPrefetchable, PciBarRegionType, PciCapability,
56 };
57 
58 pub const DEFAULT_DISPLAY_WIDTH: u32 = 1280;
59 pub const DEFAULT_DISPLAY_HEIGHT: u32 = 1024;
60 
61 #[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
62 pub enum GpuMode {
63     Mode2D,
64     ModeVirglRenderer,
65     ModeGfxstream,
66 }
67 
68 #[derive(Copy, Clone, Debug, Serialize, Deserialize)]
69 pub struct GpuDisplayParameters {
70     pub width: u32,
71     pub height: u32,
72 }
73 
74 impl Default for GpuDisplayParameters {
default() -> Self75     fn default() -> Self {
76         GpuDisplayParameters {
77             width: DEFAULT_DISPLAY_WIDTH,
78             height: DEFAULT_DISPLAY_HEIGHT,
79         }
80     }
81 }
82 
83 #[derive(Debug, Serialize, Deserialize)]
84 #[serde(default)]
85 pub struct GpuParameters {
86     pub displays: Vec<GpuDisplayParameters>,
87     pub renderer_use_egl: bool,
88     pub renderer_use_gles: bool,
89     pub renderer_use_glx: bool,
90     pub renderer_use_surfaceless: bool,
91     pub gfxstream_use_guest_angle: bool,
92     pub gfxstream_use_syncfd: bool,
93     pub use_vulkan: bool,
94     pub udmabuf: bool,
95     pub mode: GpuMode,
96     pub cache_path: Option<String>,
97     pub cache_size: Option<String>,
98 }
99 
100 // First queue is for virtio gpu commands. Second queue is for cursor commands, which we expect
101 // there to be fewer of.
102 pub const QUEUE_SIZES: &[u16] = &[256, 16];
103 
104 pub const GPU_BAR_NUM: u8 = 4;
105 pub const GPU_BAR_OFFSET: u64 = 0;
106 pub const GPU_BAR_SIZE: u64 = 1 << 28;
107 
108 impl Default for GpuParameters {
default() -> Self109     fn default() -> Self {
110         GpuParameters {
111             displays: vec![],
112             renderer_use_egl: true,
113             renderer_use_gles: true,
114             renderer_use_glx: false,
115             renderer_use_surfaceless: true,
116             gfxstream_use_guest_angle: false,
117             gfxstream_use_syncfd: true,
118             use_vulkan: false,
119             mode: if cfg!(feature = "virgl_renderer") {
120                 GpuMode::ModeVirglRenderer
121             } else {
122                 GpuMode::Mode2D
123             },
124             cache_path: None,
125             cache_size: None,
126             udmabuf: false,
127         }
128     }
129 }
130 
131 #[derive(Copy, Clone, Debug)]
132 pub struct VirtioScanoutBlobData {
133     pub width: u32,
134     pub height: u32,
135     pub drm_format: DrmFormat,
136     pub strides: [u32; 4],
137     pub offsets: [u32; 4],
138 }
139 
140 #[derive(PartialEq, Eq, PartialOrd, Ord)]
141 enum VirtioGpuRing {
142     Global,
143     ContextSpecific { ctx_id: u32, ring_idx: u8 },
144 }
145 
146 struct FenceDescriptor {
147     ring: VirtioGpuRing,
148     fence_id: u64,
149     index: u16,
150     len: u32,
151 }
152 
153 #[derive(Default)]
154 pub struct FenceState {
155     descs: Vec<FenceDescriptor>,
156     completed_fences: BTreeMap<VirtioGpuRing, u64>,
157 }
158 
159 pub trait QueueReader {
pop(&self, mem: &GuestMemory) -> Option<DescriptorChain>160     fn pop(&self, mem: &GuestMemory) -> Option<DescriptorChain>;
add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32)161     fn add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32);
signal_used(&self, mem: &GuestMemory)162     fn signal_used(&self, mem: &GuestMemory);
163 }
164 
165 struct LocalQueueReader {
166     queue: RefCell<Queue>,
167     interrupt: Arc<Interrupt>,
168 }
169 
170 impl LocalQueueReader {
new(queue: Queue, interrupt: &Arc<Interrupt>) -> Self171     fn new(queue: Queue, interrupt: &Arc<Interrupt>) -> Self {
172         Self {
173             queue: RefCell::new(queue),
174             interrupt: interrupt.clone(),
175         }
176     }
177 }
178 
179 impl QueueReader for LocalQueueReader {
pop(&self, mem: &GuestMemory) -> Option<DescriptorChain>180     fn pop(&self, mem: &GuestMemory) -> Option<DescriptorChain> {
181         self.queue.borrow_mut().pop(mem)
182     }
183 
add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32)184     fn add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32) {
185         self.queue.borrow_mut().add_used(mem, desc_index, len)
186     }
187 
signal_used(&self, mem: &GuestMemory)188     fn signal_used(&self, mem: &GuestMemory) {
189         self.queue
190             .borrow_mut()
191             .trigger_interrupt(mem, &*self.interrupt);
192     }
193 }
194 
195 #[derive(Clone)]
196 struct SharedQueueReader {
197     queue: Arc<Mutex<Queue>>,
198     interrupt: Arc<Interrupt>,
199 }
200 
201 impl SharedQueueReader {
new(queue: Queue, interrupt: &Arc<Interrupt>) -> Self202     fn new(queue: Queue, interrupt: &Arc<Interrupt>) -> Self {
203         Self {
204             queue: Arc::new(Mutex::new(queue)),
205             interrupt: interrupt.clone(),
206         }
207     }
208 }
209 
210 impl QueueReader for SharedQueueReader {
pop(&self, mem: &GuestMemory) -> Option<DescriptorChain>211     fn pop(&self, mem: &GuestMemory) -> Option<DescriptorChain> {
212         self.queue.lock().pop(mem)
213     }
214 
add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32)215     fn add_used(&self, mem: &GuestMemory, desc_index: u16, len: u32) {
216         self.queue.lock().add_used(mem, desc_index, len)
217     }
218 
signal_used(&self, mem: &GuestMemory)219     fn signal_used(&self, mem: &GuestMemory) {
220         self.queue.lock().trigger_interrupt(mem, &*self.interrupt);
221     }
222 }
223 
224 /// Initializes the virtio_gpu state tracker.
build( display_backends: &[DisplayBackend], display_params: Vec<GpuDisplayParameters>, rutabaga_builder: RutabagaBuilder, event_devices: Vec<EventDevice>, gpu_device_tube: Tube, pci_bar: Alloc, map_request: Arc<Mutex<Option<ExternalMapping>>>, external_blob: bool, udmabuf: bool, fence_handler: RutabagaFenceHandler, render_server_fd: Option<SafeDescriptor>, ) -> Option<VirtioGpu>225 fn build(
226     display_backends: &[DisplayBackend],
227     display_params: Vec<GpuDisplayParameters>,
228     rutabaga_builder: RutabagaBuilder,
229     event_devices: Vec<EventDevice>,
230     gpu_device_tube: Tube,
231     pci_bar: Alloc,
232     map_request: Arc<Mutex<Option<ExternalMapping>>>,
233     external_blob: bool,
234     udmabuf: bool,
235     fence_handler: RutabagaFenceHandler,
236     render_server_fd: Option<SafeDescriptor>,
237 ) -> Option<VirtioGpu> {
238     let mut display_opt = None;
239     for display_backend in display_backends {
240         match display_backend.build() {
241             Ok(c) => {
242                 display_opt = Some(c);
243                 break;
244             }
245             Err(e) => error!("failed to open display: {}", e),
246         };
247     }
248 
249     let display = match display_opt {
250         Some(d) => d,
251         None => {
252             error!("failed to open any displays");
253             return None;
254         }
255     };
256 
257     VirtioGpu::new(
258         display,
259         display_params,
260         rutabaga_builder,
261         event_devices,
262         gpu_device_tube,
263         pci_bar,
264         map_request,
265         external_blob,
266         udmabuf,
267         fence_handler,
268         render_server_fd,
269     )
270 }
271 
272 /// Create a handler that writes into the completed fence queue
create_fence_handler<Q>( mem: GuestMemory, ctrl_queue: Q, fence_state: Arc<Mutex<FenceState>>, ) -> RutabagaFenceHandler where Q: QueueReader + Send + Clone + 'static,273 pub fn create_fence_handler<Q>(
274     mem: GuestMemory,
275     ctrl_queue: Q,
276     fence_state: Arc<Mutex<FenceState>>,
277 ) -> RutabagaFenceHandler
278 where
279     Q: QueueReader + Send + Clone + 'static,
280 {
281     RutabagaFenceClosure::new(move |completed_fence| {
282         let mut signal = false;
283 
284         {
285             let ring = match completed_fence.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
286                 0 => VirtioGpuRing::Global,
287                 _ => VirtioGpuRing::ContextSpecific {
288                     ctx_id: completed_fence.ctx_id,
289                     ring_idx: completed_fence.ring_idx,
290                 },
291             };
292 
293             let mut fence_state = fence_state.lock();
294             fence_state.descs.retain(|f_desc| {
295                 if f_desc.ring == ring && f_desc.fence_id <= completed_fence.fence_id {
296                     ctrl_queue.add_used(&mem, f_desc.index, f_desc.len);
297                     signal = true;
298                     return false;
299                 }
300                 true
301             });
302             // Update the last completed fence for this context
303             fence_state
304                 .completed_fences
305                 .insert(ring, completed_fence.fence_id);
306         }
307 
308         if signal {
309             ctrl_queue.signal_used(&mem);
310         }
311     })
312 }
313 
314 pub struct ReturnDescriptor {
315     pub index: u16,
316     pub len: u32,
317 }
318 
319 pub struct Frontend {
320     fence_state: Arc<Mutex<FenceState>>,
321     return_cursor_descriptors: VecDeque<ReturnDescriptor>,
322     virtio_gpu: VirtioGpu,
323 }
324 
325 impl Frontend {
new(virtio_gpu: VirtioGpu, fence_state: Arc<Mutex<FenceState>>) -> Frontend326     fn new(virtio_gpu: VirtioGpu, fence_state: Arc<Mutex<FenceState>>) -> Frontend {
327         Frontend {
328             fence_state,
329             return_cursor_descriptors: Default::default(),
330             virtio_gpu,
331         }
332     }
333 
334     /// Returns the internal connection to the compositor and its associated state.
display(&mut self) -> &Rc<RefCell<GpuDisplay>>335     pub fn display(&mut self) -> &Rc<RefCell<GpuDisplay>> {
336         self.virtio_gpu.display()
337     }
338 
339     /// Processes the internal `display` events and returns `true` if any display was closed.
process_display(&mut self) -> bool340     pub fn process_display(&mut self) -> bool {
341         self.virtio_gpu.process_display()
342     }
343 
344     /// Processes incoming requests on `resource_bridge`.
process_resource_bridge(&mut self, resource_bridge: &Tube) -> anyhow::Result<()>345     pub fn process_resource_bridge(&mut self, resource_bridge: &Tube) -> anyhow::Result<()> {
346         let response = match resource_bridge.recv() {
347             Ok(ResourceRequest::GetBuffer { id }) => self.virtio_gpu.export_resource(id),
348             Ok(ResourceRequest::GetFence { seqno }) => {
349                 // The seqno originated from self.backend, so
350                 // it should fit in a u32.
351                 match u32::try_from(seqno) {
352                     Ok(fence_id) => self.virtio_gpu.export_fence(fence_id),
353                     Err(_) => ResourceResponse::Invalid,
354                 }
355             }
356             Err(e) => return Err(e).context("Error receiving resource bridge request"),
357         };
358 
359         resource_bridge
360             .send(&response)
361             .context("Error sending resource bridge response")?;
362 
363         Ok(())
364     }
365 
process_gpu_command( &mut self, mem: &GuestMemory, cmd: GpuCommand, reader: &mut Reader, ) -> VirtioGpuResult366     fn process_gpu_command(
367         &mut self,
368         mem: &GuestMemory,
369         cmd: GpuCommand,
370         reader: &mut Reader,
371     ) -> VirtioGpuResult {
372         self.virtio_gpu.force_ctx_0();
373 
374         match cmd {
375             GpuCommand::GetDisplayInfo(_) => Ok(GpuResponse::OkDisplayInfo(
376                 self.virtio_gpu.display_info().to_vec(),
377             )),
378             GpuCommand::ResourceCreate2d(info) => {
379                 let resource_id = info.resource_id.to_native();
380 
381                 let resource_create_3d = ResourceCreate3D {
382                     target: RUTABAGA_PIPE_TEXTURE_2D,
383                     format: info.format.to_native(),
384                     bind: RUTABAGA_PIPE_BIND_RENDER_TARGET,
385                     width: info.width.to_native(),
386                     height: info.height.to_native(),
387                     depth: 1,
388                     array_size: 1,
389                     last_level: 0,
390                     nr_samples: 0,
391                     flags: 0,
392                 };
393 
394                 self.virtio_gpu
395                     .resource_create_3d(resource_id, resource_create_3d)
396             }
397             GpuCommand::ResourceUnref(info) => {
398                 self.virtio_gpu.unref_resource(info.resource_id.to_native())
399             }
400             GpuCommand::SetScanout(info) => self.virtio_gpu.set_scanout(
401                 info.scanout_id.to_native(),
402                 info.resource_id.to_native(),
403                 None,
404             ),
405             GpuCommand::ResourceFlush(info) => {
406                 self.virtio_gpu.flush_resource(info.resource_id.to_native())
407             }
408             GpuCommand::TransferToHost2d(info) => {
409                 let resource_id = info.resource_id.to_native();
410                 let transfer = Transfer3D::new_2d(
411                     info.r.x.to_native(),
412                     info.r.y.to_native(),
413                     info.r.width.to_native(),
414                     info.r.height.to_native(),
415                 );
416                 self.virtio_gpu.transfer_write(0, resource_id, transfer)
417             }
418             GpuCommand::ResourceAttachBacking(info) => {
419                 let available_bytes = reader.available_bytes();
420                 if available_bytes != 0 {
421                     let entry_count = info.nr_entries.to_native() as usize;
422                     let mut vecs = Vec::with_capacity(entry_count);
423                     for _ in 0..entry_count {
424                         match reader.read_obj::<virtio_gpu_mem_entry>() {
425                             Ok(entry) => {
426                                 let addr = GuestAddress(entry.addr.to_native());
427                                 let len = entry.length.to_native() as usize;
428                                 vecs.push((addr, len))
429                             }
430                             Err(_) => return Err(GpuResponse::ErrUnspec),
431                         }
432                     }
433                     self.virtio_gpu
434                         .attach_backing(info.resource_id.to_native(), mem, vecs)
435                 } else {
436                     error!("missing data for command {:?}", cmd);
437                     Err(GpuResponse::ErrUnspec)
438                 }
439             }
440             GpuCommand::ResourceDetachBacking(info) => {
441                 self.virtio_gpu.detach_backing(info.resource_id.to_native())
442             }
443             GpuCommand::UpdateCursor(info) => self.virtio_gpu.update_cursor(
444                 info.resource_id.to_native(),
445                 info.pos.scanout_id.to_native(),
446                 info.pos.x.into(),
447                 info.pos.y.into(),
448             ),
449             GpuCommand::MoveCursor(info) => self.virtio_gpu.move_cursor(
450                 info.pos.scanout_id.to_native(),
451                 info.pos.x.into(),
452                 info.pos.y.into(),
453             ),
454             GpuCommand::ResourceAssignUuid(info) => {
455                 let resource_id = info.resource_id.to_native();
456                 self.virtio_gpu.resource_assign_uuid(resource_id)
457             }
458             GpuCommand::GetCapsetInfo(info) => self
459                 .virtio_gpu
460                 .get_capset_info(info.capset_index.to_native()),
461             GpuCommand::GetCapset(info) => self
462                 .virtio_gpu
463                 .get_capset(info.capset_id.to_native(), info.capset_version.to_native()),
464             GpuCommand::CtxCreate(info) => self
465                 .virtio_gpu
466                 .create_context(info.hdr.ctx_id.to_native(), info.context_init.to_native()),
467             GpuCommand::CtxDestroy(info) => {
468                 self.virtio_gpu.destroy_context(info.hdr.ctx_id.to_native())
469             }
470             GpuCommand::CtxAttachResource(info) => self
471                 .virtio_gpu
472                 .context_attach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
473             GpuCommand::CtxDetachResource(info) => self
474                 .virtio_gpu
475                 .context_detach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
476             GpuCommand::ResourceCreate3d(info) => {
477                 let resource_id = info.resource_id.to_native();
478                 let resource_create_3d = ResourceCreate3D {
479                     target: info.target.to_native(),
480                     format: info.format.to_native(),
481                     bind: info.bind.to_native(),
482                     width: info.width.to_native(),
483                     height: info.height.to_native(),
484                     depth: info.depth.to_native(),
485                     array_size: info.array_size.to_native(),
486                     last_level: info.last_level.to_native(),
487                     nr_samples: info.nr_samples.to_native(),
488                     flags: info.flags.to_native(),
489                 };
490 
491                 self.virtio_gpu
492                     .resource_create_3d(resource_id, resource_create_3d)
493             }
494             GpuCommand::TransferToHost3d(info) => {
495                 let ctx_id = info.hdr.ctx_id.to_native();
496                 let resource_id = info.resource_id.to_native();
497 
498                 let transfer = Transfer3D {
499                     x: info.box_.x.to_native(),
500                     y: info.box_.y.to_native(),
501                     z: info.box_.z.to_native(),
502                     w: info.box_.w.to_native(),
503                     h: info.box_.h.to_native(),
504                     d: info.box_.d.to_native(),
505                     level: info.level.to_native(),
506                     stride: info.stride.to_native(),
507                     layer_stride: info.layer_stride.to_native(),
508                     offset: info.offset.to_native(),
509                 };
510 
511                 self.virtio_gpu
512                     .transfer_write(ctx_id, resource_id, transfer)
513             }
514             GpuCommand::TransferFromHost3d(info) => {
515                 let ctx_id = info.hdr.ctx_id.to_native();
516                 let resource_id = info.resource_id.to_native();
517 
518                 let transfer = Transfer3D {
519                     x: info.box_.x.to_native(),
520                     y: info.box_.y.to_native(),
521                     z: info.box_.z.to_native(),
522                     w: info.box_.w.to_native(),
523                     h: info.box_.h.to_native(),
524                     d: info.box_.d.to_native(),
525                     level: info.level.to_native(),
526                     stride: info.stride.to_native(),
527                     layer_stride: info.layer_stride.to_native(),
528                     offset: info.offset.to_native(),
529                 };
530 
531                 self.virtio_gpu
532                     .transfer_read(ctx_id, resource_id, transfer, None)
533             }
534             GpuCommand::CmdSubmit3d(info) => {
535                 if reader.available_bytes() != 0 {
536                     let cmd_size = info.size.to_native() as usize;
537                     let mut cmd_buf = vec![0; cmd_size];
538                     if reader.read_exact(&mut cmd_buf[..]).is_ok() {
539                         self.virtio_gpu
540                             .submit_command(info.hdr.ctx_id.to_native(), &mut cmd_buf[..])
541                     } else {
542                         Err(GpuResponse::ErrInvalidParameter)
543                     }
544                 } else {
545                     // Silently accept empty command buffers to allow for
546                     // benchmarking.
547                     Ok(GpuResponse::OkNoData)
548                 }
549             }
550             GpuCommand::ResourceCreateBlob(info) => {
551                 let resource_id = info.resource_id.to_native();
552                 let ctx_id = info.hdr.ctx_id.to_native();
553 
554                 let resource_create_blob = ResourceCreateBlob {
555                     blob_mem: info.blob_mem.to_native(),
556                     blob_flags: info.blob_flags.to_native(),
557                     blob_id: info.blob_id.to_native(),
558                     size: info.size.to_native(),
559                 };
560 
561                 let entry_count = info.nr_entries.to_native();
562                 if reader.available_bytes() == 0 && entry_count > 0 {
563                     return Err(GpuResponse::ErrUnspec);
564                 }
565 
566                 let mut vecs = Vec::with_capacity(entry_count as usize);
567                 for _ in 0..entry_count {
568                     match reader.read_obj::<virtio_gpu_mem_entry>() {
569                         Ok(entry) => {
570                             let addr = GuestAddress(entry.addr.to_native());
571                             let len = entry.length.to_native() as usize;
572                             vecs.push((addr, len))
573                         }
574                         Err(_) => return Err(GpuResponse::ErrUnspec),
575                     }
576                 }
577 
578                 self.virtio_gpu.resource_create_blob(
579                     ctx_id,
580                     resource_id,
581                     resource_create_blob,
582                     vecs,
583                     mem,
584                 )
585             }
586             GpuCommand::SetScanoutBlob(info) => {
587                 let scanout_id = info.scanout_id.to_native();
588                 let resource_id = info.resource_id.to_native();
589                 let virtio_gpu_format = info.format.to_native();
590                 let width = info.width.to_native();
591                 let height = info.width.to_native();
592                 let mut strides: [u32; 4] = [0; 4];
593                 let mut offsets: [u32; 4] = [0; 4];
594 
595                 // As of v4.19, virtio-gpu kms only really uses these formats.  If that changes,
596                 // the following may have to change too.
597                 let drm_format = match virtio_gpu_format {
598                     VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM => DrmFormat::new(b'X', b'R', b'2', b'4'),
599                     VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM => DrmFormat::new(b'A', b'R', b'2', b'4'),
600                     _ => {
601                         error!("unrecognized virtio-gpu format {}", virtio_gpu_format);
602                         return Err(GpuResponse::ErrUnspec);
603                     }
604                 };
605 
606                 for plane_index in 0..PLANE_INFO_MAX_COUNT {
607                     offsets[plane_index] = info.offsets[plane_index].to_native();
608                     strides[plane_index] = info.strides[plane_index].to_native();
609                 }
610 
611                 let scanout = VirtioScanoutBlobData {
612                     width,
613                     height,
614                     drm_format,
615                     strides,
616                     offsets,
617                 };
618 
619                 self.virtio_gpu
620                     .set_scanout(scanout_id, resource_id, Some(scanout))
621             }
622             GpuCommand::ResourceMapBlob(info) => {
623                 let resource_id = info.resource_id.to_native();
624                 let offset = info.offset.to_native();
625                 self.virtio_gpu.resource_map_blob(resource_id, offset)
626             }
627             GpuCommand::ResourceUnmapBlob(info) => {
628                 let resource_id = info.resource_id.to_native();
629                 self.virtio_gpu.resource_unmap_blob(resource_id)
630             }
631         }
632     }
633 
validate_desc(desc: &DescriptorChain) -> bool634     fn validate_desc(desc: &DescriptorChain) -> bool {
635         desc.len as usize >= size_of::<virtio_gpu_ctrl_hdr>() && !desc.is_write_only()
636     }
637 
638     /// Processes virtio messages on `queue`.
process_queue(&mut self, mem: &GuestMemory, queue: &dyn QueueReader) -> bool639     pub fn process_queue(&mut self, mem: &GuestMemory, queue: &dyn QueueReader) -> bool {
640         let mut signal_used = false;
641         while let Some(desc) = queue.pop(mem) {
642             if Frontend::validate_desc(&desc) {
643                 match (
644                     Reader::new(mem.clone(), desc.clone()),
645                     Writer::new(mem.clone(), desc.clone()),
646                 ) {
647                     (Ok(mut reader), Ok(mut writer)) => {
648                         if let Some(ret_desc) =
649                             self.process_descriptor(mem, desc.index, &mut reader, &mut writer)
650                         {
651                             queue.add_used(mem, ret_desc.index, ret_desc.len);
652                             signal_used = true;
653                         }
654                     }
655                     (_, Err(e)) | (Err(e), _) => {
656                         debug!("invalid descriptor: {}", e);
657                         queue.add_used(mem, desc.index, 0);
658                         signal_used = true;
659                     }
660                 }
661             } else {
662                 let likely_type = mem
663                     .read_obj_from_addr(desc.addr)
664                     .unwrap_or_else(|_| Le32::from(0));
665                 debug!(
666                     "queue bad descriptor index = {} len = {} write = {} type = {}",
667                     desc.index,
668                     desc.len,
669                     desc.is_write_only(),
670                     virtio_gpu_cmd_str(likely_type.to_native())
671                 );
672                 queue.add_used(mem, desc.index, 0);
673                 signal_used = true;
674             }
675         }
676 
677         signal_used
678     }
679 
process_descriptor( &mut self, mem: &GuestMemory, desc_index: u16, reader: &mut Reader, writer: &mut Writer, ) -> Option<ReturnDescriptor>680     fn process_descriptor(
681         &mut self,
682         mem: &GuestMemory,
683         desc_index: u16,
684         reader: &mut Reader,
685         writer: &mut Writer,
686     ) -> Option<ReturnDescriptor> {
687         let mut resp = Err(GpuResponse::ErrUnspec);
688         let mut gpu_cmd = None;
689         let mut len = 0;
690         match GpuCommand::decode(reader) {
691             Ok(cmd) => {
692                 resp = self.process_gpu_command(mem, cmd, reader);
693                 gpu_cmd = Some(cmd);
694             }
695             Err(e) => debug!("descriptor decode error: {}", e),
696         }
697 
698         let mut gpu_response = match resp {
699             Ok(gpu_response) => gpu_response,
700             Err(gpu_response) => {
701                 debug!("{:?} -> {:?}", gpu_cmd, gpu_response);
702                 gpu_response
703             }
704         };
705 
706         if writer.available_bytes() != 0 {
707             let mut fence_id = 0;
708             let mut ctx_id = 0;
709             let mut flags = 0;
710             let mut ring_idx = 0;
711             if let Some(cmd) = gpu_cmd {
712                 let ctrl_hdr = cmd.ctrl_hdr();
713                 if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
714                     flags = ctrl_hdr.flags.to_native();
715                     fence_id = ctrl_hdr.fence_id.to_native();
716                     ctx_id = ctrl_hdr.ctx_id.to_native();
717                     ring_idx = ctrl_hdr.ring_idx;
718 
719                     let fence = RutabagaFence {
720                         flags,
721                         fence_id,
722                         ctx_id,
723                         ring_idx,
724                     };
725                     gpu_response = match self.virtio_gpu.create_fence(fence) {
726                         Ok(_) => gpu_response,
727                         Err(fence_resp) => {
728                             warn!("create_fence {} -> {:?}", fence_id, fence_resp);
729                             fence_resp
730                         }
731                     };
732                 }
733             }
734 
735             // Prepare the response now, even if it is going to wait until
736             // fence is complete.
737             match gpu_response.encode(flags, fence_id, ctx_id, ring_idx, writer) {
738                 Ok(l) => len = l,
739                 Err(e) => debug!("ctrl queue response encode error: {}", e),
740             }
741 
742             if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
743                 let ring = match flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
744                     0 => VirtioGpuRing::Global,
745                     _ => VirtioGpuRing::ContextSpecific { ctx_id, ring_idx },
746                 };
747 
748                 // In case the fence is signaled immediately after creation, don't add a return
749                 // FenceDescriptor.
750                 let mut fence_state = self.fence_state.lock();
751                 if fence_id > *fence_state.completed_fences.get(&ring).unwrap_or(&0) {
752                     fence_state.descs.push(FenceDescriptor {
753                         ring,
754                         fence_id,
755                         index: desc_index,
756                         len,
757                     });
758 
759                     return None;
760                 }
761             }
762 
763             // No fence (or already completed fence), respond now.
764         }
765         Some(ReturnDescriptor {
766             index: desc_index,
767             len,
768         })
769     }
770 
return_cursor(&mut self) -> Option<ReturnDescriptor>771     pub fn return_cursor(&mut self) -> Option<ReturnDescriptor> {
772         self.return_cursor_descriptors.pop_front()
773     }
774 
poll(&self)775     pub fn poll(&self) {
776         self.virtio_gpu.poll();
777     }
778 }
779 
780 struct Worker {
781     interrupt: Arc<Interrupt>,
782     exit_evt: Event,
783     mem: GuestMemory,
784     ctrl_queue: SharedQueueReader,
785     ctrl_evt: Event,
786     cursor_queue: LocalQueueReader,
787     cursor_evt: Event,
788     resource_bridges: Vec<Tube>,
789     kill_evt: Event,
790     state: Frontend,
791 }
792 
793 impl Worker {
run(&mut self)794     fn run(&mut self) {
795         #[derive(PollToken)]
796         enum Token {
797             CtrlQueue,
798             CursorQueue,
799             Display,
800             InterruptResample,
801             Kill,
802             ResourceBridge { index: usize },
803             VirtioGpuPoll,
804         }
805 
806         let wait_ctx: WaitContext<Token> = match WaitContext::build_with(&[
807             (&self.ctrl_evt, Token::CtrlQueue),
808             (&self.cursor_evt, Token::CursorQueue),
809             (&*self.state.display().borrow(), Token::Display),
810             (&self.kill_evt, Token::Kill),
811         ]) {
812             Ok(pc) => pc,
813             Err(e) => {
814                 error!("failed creating WaitContext: {}", e);
815                 return;
816             }
817         };
818         if let Some(resample_evt) = self.interrupt.get_resample_evt() {
819             if wait_ctx
820                 .add(resample_evt, Token::InterruptResample)
821                 .is_err()
822             {
823                 error!("failed creating WaitContext");
824                 return;
825             }
826         }
827 
828         for (index, bridge) in self.resource_bridges.iter().enumerate() {
829             if let Err(e) = wait_ctx.add(bridge, Token::ResourceBridge { index }) {
830                 error!("failed to add resource bridge to WaitContext: {}", e);
831             }
832         }
833 
834         if let Some(poll_desc) = self.state.virtio_gpu.poll_descriptor() {
835             if let Err(e) = wait_ctx.add(&poll_desc, Token::VirtioGpuPoll) {
836                 error!("failed adding poll eventfd to WaitContext: {}", e);
837                 return;
838             }
839         }
840 
841         // TODO(davidriley): The entire main loop processing is somewhat racey and incorrect with
842         // respect to cursor vs control queue processing.  As both currently and originally
843         // written, while the control queue is only processed/read from after the the cursor queue
844         // is finished, the entire queue will be processed at that time.  The end effect of this
845         // racyiness is that control queue descriptors that are issued after cursors descriptors
846         // might be handled first instead of the other way around.  In practice, the cursor queue
847         // isn't used so this isn't a huge issue.
848 
849         // Declare this outside the loop so we don't keep allocating and freeing the vector.
850         let mut process_resource_bridge = Vec::with_capacity(self.resource_bridges.len());
851         'wait: loop {
852             let events = match wait_ctx.wait() {
853                 Ok(v) => v,
854                 Err(e) => {
855                     error!("failed polling for events: {}", e);
856                     break;
857                 }
858             };
859             let mut signal_used_cursor = false;
860             let mut signal_used_ctrl = false;
861             let mut ctrl_available = false;
862 
863             // Clear the old values and re-initialize with false.
864             process_resource_bridge.clear();
865             process_resource_bridge.resize(self.resource_bridges.len(), false);
866 
867             // This display isn't typically used when the virt-wl device is available and it can
868             // lead to hung fds (crbug.com/1027379). Disable if it's hung.
869             for event in events.iter().filter(|e| e.is_hungup) {
870                 if let Token::Display = event.token {
871                     error!("default display hang-up detected");
872                     let _ = wait_ctx.delete(&*self.state.display().borrow());
873                 }
874             }
875 
876             for event in events.iter().filter(|e| e.is_readable) {
877                 match event.token {
878                     Token::CtrlQueue => {
879                         let _ = self.ctrl_evt.read();
880                         // Set flag that control queue is available to be read, but defer reading
881                         // until rest of the events are processed.
882                         ctrl_available = true;
883                     }
884                     Token::CursorQueue => {
885                         let _ = self.cursor_evt.read();
886                         if self.state.process_queue(&self.mem, &self.cursor_queue) {
887                             signal_used_cursor = true;
888                         }
889                     }
890                     Token::Display => {
891                         let close_requested = self.state.process_display();
892                         if close_requested {
893                             let _ = self.exit_evt.write(1);
894                         }
895                     }
896                     Token::ResourceBridge { index } => {
897                         process_resource_bridge[index] = true;
898                     }
899                     Token::InterruptResample => {
900                         self.interrupt.interrupt_resample();
901                     }
902                     Token::VirtioGpuPoll => {
903                         self.state.poll();
904                     }
905                     Token::Kill => {
906                         break 'wait;
907                     }
908                 }
909             }
910 
911             // All cursor commands go first because they have higher priority.
912             while let Some(desc) = self.state.return_cursor() {
913                 self.cursor_queue.add_used(&self.mem, desc.index, desc.len);
914                 signal_used_cursor = true;
915             }
916 
917             if ctrl_available && self.state.process_queue(&self.mem, &self.ctrl_queue) {
918                 signal_used_ctrl = true;
919             }
920 
921             // Process the entire control queue before the resource bridge in case a resource is
922             // created or destroyed by the control queue. Processing the resource bridge first may
923             // lead to a race condition.
924             // TODO(davidriley): This is still inherently racey if both the control queue request
925             // and the resource bridge request come in at the same time after the control queue is
926             // processed above and before the corresponding bridge is processed below.
927             for (bridge, &should_process) in
928                 self.resource_bridges.iter().zip(&process_resource_bridge)
929             {
930                 if should_process {
931                     if let Err(e) = self.state.process_resource_bridge(bridge) {
932                         error!("Failed to process resource bridge: {:#}", e);
933                         error!("Removing that resource bridge from the wait context.");
934                         wait_ctx.delete(bridge).unwrap_or_else(|e| {
935                             error!("Failed to remove faulty resource bridge: {:#}", e)
936                         });
937                     }
938                 }
939             }
940 
941             if signal_used_ctrl {
942                 self.ctrl_queue.signal_used(&self.mem);
943             }
944 
945             if signal_used_cursor {
946                 self.cursor_queue.signal_used(&self.mem);
947             }
948         }
949     }
950 }
951 
952 /// Indicates a backend that should be tried for the gpu to use for display.
953 ///
954 /// Several instances of this enum are used in an ordered list to give the gpu device many backends
955 /// to use as fallbacks in case some do not work.
956 #[derive(Clone)]
957 pub enum DisplayBackend {
958     /// Use the wayland backend with the given socket path if given.
959     Wayland(Option<PathBuf>),
960     /// Open a connection to the X server at the given display if given.
961     X(Option<String>),
962     /// Emulate a display without actually displaying it.
963     Stub,
964 }
965 
966 impl DisplayBackend {
build(&self) -> std::result::Result<GpuDisplay, GpuDisplayError>967     fn build(&self) -> std::result::Result<GpuDisplay, GpuDisplayError> {
968         match self {
969             DisplayBackend::Wayland(path) => GpuDisplay::open_wayland(path.as_ref()),
970             DisplayBackend::X(display) => GpuDisplay::open_x(display.as_ref()),
971             DisplayBackend::Stub => GpuDisplay::open_stub(),
972         }
973     }
974 }
975 
976 pub struct Gpu {
977     exit_evt: Event,
978     gpu_device_tube: Option<Tube>,
979     resource_bridges: Vec<Tube>,
980     event_devices: Vec<EventDevice>,
981     kill_evt: Option<Event>,
982     config_event: bool,
983     worker_thread: Option<thread::JoinHandle<()>>,
984     display_backends: Vec<DisplayBackend>,
985     display_params: Vec<GpuDisplayParameters>,
986     rutabaga_builder: Option<RutabagaBuilder>,
987     pci_bar: Option<Alloc>,
988     map_request: Arc<Mutex<Option<ExternalMapping>>>,
989     external_blob: bool,
990     rutabaga_component: RutabagaComponentType,
991     base_features: u64,
992     udmabuf: bool,
993     render_server_fd: Option<SafeDescriptor>,
994 }
995 
996 impl Gpu {
new( exit_evt: Event, gpu_device_tube: Option<Tube>, resource_bridges: Vec<Tube>, display_backends: Vec<DisplayBackend>, gpu_parameters: &GpuParameters, render_server_fd: Option<SafeDescriptor>, event_devices: Vec<EventDevice>, map_request: Arc<Mutex<Option<ExternalMapping>>>, external_blob: bool, base_features: u64, channels: BTreeMap<String, PathBuf>, ) -> Gpu997     pub fn new(
998         exit_evt: Event,
999         gpu_device_tube: Option<Tube>,
1000         resource_bridges: Vec<Tube>,
1001         display_backends: Vec<DisplayBackend>,
1002         gpu_parameters: &GpuParameters,
1003         render_server_fd: Option<SafeDescriptor>,
1004         event_devices: Vec<EventDevice>,
1005         map_request: Arc<Mutex<Option<ExternalMapping>>>,
1006         external_blob: bool,
1007         base_features: u64,
1008         channels: BTreeMap<String, PathBuf>,
1009     ) -> Gpu {
1010         let virglrenderer_flags = VirglRendererFlags::new()
1011             .use_egl(gpu_parameters.renderer_use_egl)
1012             .use_gles(gpu_parameters.renderer_use_gles)
1013             .use_glx(gpu_parameters.renderer_use_glx)
1014             .use_surfaceless(gpu_parameters.renderer_use_surfaceless)
1015             .use_external_blob(external_blob)
1016             .use_venus(gpu_parameters.use_vulkan)
1017             .use_render_server(render_server_fd.is_some())
1018             .use_thread_sync(true)
1019             .use_async_fence_cb(true);
1020         let gfxstream_flags = GfxstreamFlags::new()
1021             .use_egl(gpu_parameters.renderer_use_egl)
1022             .use_gles(gpu_parameters.renderer_use_gles)
1023             .use_glx(gpu_parameters.renderer_use_glx)
1024             .use_surfaceless(gpu_parameters.renderer_use_surfaceless)
1025             .use_guest_angle(gpu_parameters.gfxstream_use_guest_angle)
1026             .use_syncfd(gpu_parameters.gfxstream_use_syncfd)
1027             .use_vulkan(gpu_parameters.use_vulkan)
1028             .use_async_fence_cb(true);
1029 
1030         let mut rutabaga_channels: Vec<RutabagaChannel> = Vec::new();
1031         for (channel_name, path) in &channels {
1032             match &channel_name[..] {
1033                 "" => rutabaga_channels.push(RutabagaChannel {
1034                     base_channel: path.clone(),
1035                     channel_type: RUTABAGA_CHANNEL_TYPE_WAYLAND,
1036                 }),
1037                 "mojo" => rutabaga_channels.push(RutabagaChannel {
1038                     base_channel: path.clone(),
1039                     channel_type: RUTABAGA_CHANNEL_TYPE_CAMERA,
1040                 }),
1041                 _ => error!("unknown rutabaga channel"),
1042             }
1043         }
1044 
1045         let rutabaga_channels_opt = Some(rutabaga_channels);
1046         let component = match gpu_parameters.mode {
1047             GpuMode::Mode2D => RutabagaComponentType::Rutabaga2D,
1048             GpuMode::ModeVirglRenderer => RutabagaComponentType::VirglRenderer,
1049             GpuMode::ModeGfxstream => RutabagaComponentType::Gfxstream,
1050         };
1051 
1052         let mut display_width = DEFAULT_DISPLAY_WIDTH;
1053         let mut display_height = DEFAULT_DISPLAY_HEIGHT;
1054         if !gpu_parameters.displays.is_empty() {
1055             display_width = gpu_parameters.displays[0].width;
1056             display_height = gpu_parameters.displays[0].height;
1057         }
1058 
1059         let rutabaga_builder = RutabagaBuilder::new(component)
1060             .set_display_width(display_width)
1061             .set_display_height(display_height)
1062             .set_virglrenderer_flags(virglrenderer_flags)
1063             .set_gfxstream_flags(gfxstream_flags)
1064             .set_rutabaga_channels(rutabaga_channels_opt);
1065 
1066         Gpu {
1067             exit_evt,
1068             gpu_device_tube,
1069             resource_bridges,
1070             event_devices,
1071             config_event: false,
1072             kill_evt: None,
1073             worker_thread: None,
1074             display_backends,
1075             display_params: gpu_parameters.displays.clone(),
1076             rutabaga_builder: Some(rutabaga_builder),
1077             pci_bar: None,
1078             map_request,
1079             external_blob,
1080             rutabaga_component: component,
1081             base_features,
1082             udmabuf: gpu_parameters.udmabuf,
1083             render_server_fd,
1084         }
1085     }
1086 
1087     /// Initializes the internal device state so that it can begin processing virtqueues.
initialize_frontend( &mut self, fence_state: Arc<Mutex<FenceState>>, fence_handler: RutabagaFenceHandler, ) -> Option<Frontend>1088     pub fn initialize_frontend(
1089         &mut self,
1090         fence_state: Arc<Mutex<FenceState>>,
1091         fence_handler: RutabagaFenceHandler,
1092     ) -> Option<Frontend> {
1093         let tube = self.gpu_device_tube.take()?;
1094         let pci_bar = self.pci_bar.take()?;
1095         let rutabaga_builder = self.rutabaga_builder.take()?;
1096         let render_server_fd = self.render_server_fd.take();
1097         let event_devices = self.event_devices.split_off(0);
1098 
1099         build(
1100             &self.display_backends,
1101             self.display_params.clone(),
1102             rutabaga_builder,
1103             event_devices,
1104             tube,
1105             pci_bar,
1106             self.map_request.clone(),
1107             self.external_blob,
1108             self.udmabuf,
1109             fence_handler,
1110             render_server_fd,
1111         )
1112         .map(|vgpu| Frontend::new(vgpu, fence_state))
1113     }
1114 
1115     /// Returns the device tube to the main process.
device_tube(&self) -> Option<&Tube>1116     pub fn device_tube(&self) -> Option<&Tube> {
1117         self.gpu_device_tube.as_ref()
1118     }
1119 
1120     /// Sets the device tube to the main process.
set_device_tube(&mut self, tube: Tube)1121     pub fn set_device_tube(&mut self, tube: Tube) {
1122         self.gpu_device_tube = Some(tube);
1123     }
1124 
get_config(&self) -> virtio_gpu_config1125     fn get_config(&self) -> virtio_gpu_config {
1126         let mut events_read = 0;
1127         if self.config_event {
1128             events_read |= VIRTIO_GPU_EVENT_DISPLAY;
1129         }
1130 
1131         let num_capsets = match self.rutabaga_component {
1132             RutabagaComponentType::Rutabaga2D => 0,
1133             _ => {
1134                 let mut num_capsets = 0;
1135 
1136                 // Cross-domain (like virtio_wl with llvmpipe) is always available.
1137                 num_capsets += 1;
1138 
1139                 // Three capsets for virgl_renderer
1140                 #[cfg(feature = "virgl_renderer")]
1141                 {
1142                     num_capsets += 3;
1143                 }
1144 
1145                 // One capset for gfxstream
1146                 #[cfg(feature = "gfxstream")]
1147                 {
1148                     num_capsets += 1;
1149                 }
1150 
1151                 num_capsets
1152             }
1153         };
1154 
1155         virtio_gpu_config {
1156             events_read: Le32::from(events_read),
1157             events_clear: Le32::from(0),
1158             num_scanouts: Le32::from(self.display_params.len() as u32),
1159             num_capsets: Le32::from(num_capsets),
1160         }
1161     }
1162 }
1163 
1164 impl Drop for Gpu {
drop(&mut self)1165     fn drop(&mut self) {
1166         if let Some(kill_evt) = self.kill_evt.take() {
1167             // Ignore the result because there is nothing we can do about it.
1168             let _ = kill_evt.write(1);
1169         }
1170 
1171         if let Some(worker_thread) = self.worker_thread.take() {
1172             let _ = worker_thread.join();
1173         }
1174     }
1175 }
1176 
1177 impl VirtioDevice for Gpu {
keep_rds(&self) -> Vec<RawDescriptor>1178     fn keep_rds(&self) -> Vec<RawDescriptor> {
1179         let mut keep_rds = Vec::new();
1180         // TODO(davidriley): Remove once virgl has another path to include
1181         // debugging logs.
1182         if cfg!(debug_assertions) {
1183             keep_rds.push(libc::STDOUT_FILENO);
1184             keep_rds.push(libc::STDERR_FILENO);
1185         }
1186 
1187         if let Some(ref gpu_device_tube) = self.gpu_device_tube {
1188             keep_rds.push(gpu_device_tube.as_raw_descriptor());
1189         }
1190 
1191         if let Some(ref render_server_fd) = self.render_server_fd {
1192             keep_rds.push(render_server_fd.as_raw_descriptor());
1193         }
1194 
1195         keep_rds.push(self.exit_evt.as_raw_descriptor());
1196         for bridge in &self.resource_bridges {
1197             keep_rds.push(bridge.as_raw_descriptor());
1198         }
1199 
1200         keep_rds
1201     }
1202 
device_type(&self) -> u321203     fn device_type(&self) -> u32 {
1204         TYPE_GPU
1205     }
1206 
queue_max_sizes(&self) -> &[u16]1207     fn queue_max_sizes(&self) -> &[u16] {
1208         QUEUE_SIZES
1209     }
1210 
features(&self) -> u641211     fn features(&self) -> u64 {
1212         let rutabaga_features = match self.rutabaga_component {
1213             RutabagaComponentType::Rutabaga2D => 0,
1214             _ => {
1215                 let mut features_3d = 0;
1216 
1217                 features_3d |= 1 << VIRTIO_GPU_F_VIRGL
1218                     | 1 << VIRTIO_GPU_F_RESOURCE_UUID
1219                     | 1 << VIRTIO_GPU_F_RESOURCE_BLOB
1220                     | 1 << VIRTIO_GPU_F_CONTEXT_INIT
1221                     | 1 << VIRTIO_GPU_F_RESOURCE_SYNC;
1222 
1223                 if self.udmabuf {
1224                     features_3d |= 1 << VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
1225                 }
1226 
1227                 features_3d
1228             }
1229         };
1230 
1231         self.base_features | rutabaga_features
1232     }
1233 
ack_features(&mut self, value: u64)1234     fn ack_features(&mut self, value: u64) {
1235         let _ = value;
1236     }
1237 
read_config(&self, offset: u64, data: &mut [u8])1238     fn read_config(&self, offset: u64, data: &mut [u8]) {
1239         copy_config(data, 0, self.get_config().as_slice(), offset);
1240     }
1241 
write_config(&mut self, offset: u64, data: &[u8])1242     fn write_config(&mut self, offset: u64, data: &[u8]) {
1243         let mut cfg = self.get_config();
1244         copy_config(cfg.as_mut_slice(), offset, data, 0);
1245         if (cfg.events_clear.to_native() & VIRTIO_GPU_EVENT_DISPLAY) != 0 {
1246             self.config_event = false;
1247         }
1248     }
1249 
activate( &mut self, mem: GuestMemory, interrupt: Interrupt, mut queues: Vec<Queue>, mut queue_evts: Vec<Event>, )1250     fn activate(
1251         &mut self,
1252         mem: GuestMemory,
1253         interrupt: Interrupt,
1254         mut queues: Vec<Queue>,
1255         mut queue_evts: Vec<Event>,
1256     ) {
1257         if queues.len() != QUEUE_SIZES.len() || queue_evts.len() != QUEUE_SIZES.len() {
1258             return;
1259         }
1260 
1261         let exit_evt = match self.exit_evt.try_clone() {
1262             Ok(e) => e,
1263             Err(e) => {
1264                 error!("error cloning exit event: {}", e);
1265                 return;
1266             }
1267         };
1268 
1269         let (self_kill_evt, kill_evt) = match Event::new().and_then(|e| Ok((e.try_clone()?, e))) {
1270             Ok(v) => v,
1271             Err(e) => {
1272                 error!("error creating kill Event pair: {}", e);
1273                 return;
1274             }
1275         };
1276         self.kill_evt = Some(self_kill_evt);
1277 
1278         let resource_bridges = mem::take(&mut self.resource_bridges);
1279 
1280         let irq = Arc::new(interrupt);
1281         let ctrl_queue = SharedQueueReader::new(queues.remove(0), &irq);
1282         let ctrl_evt = queue_evts.remove(0);
1283         let cursor_queue = LocalQueueReader::new(queues.remove(0), &irq);
1284         let cursor_evt = queue_evts.remove(0);
1285         let display_backends = self.display_backends.clone();
1286         let display_params = self.display_params.clone();
1287         let event_devices = self.event_devices.split_off(0);
1288         let map_request = Arc::clone(&self.map_request);
1289         let external_blob = self.external_blob;
1290         let udmabuf = self.udmabuf;
1291         let fence_state = Arc::new(Mutex::new(Default::default()));
1292         let render_server_fd = self.render_server_fd.take();
1293         if let (Some(gpu_device_tube), Some(pci_bar), Some(rutabaga_builder)) = (
1294             self.gpu_device_tube.take(),
1295             self.pci_bar.take(),
1296             self.rutabaga_builder.take(),
1297         ) {
1298             let worker_result =
1299                 thread::Builder::new()
1300                     .name("virtio_gpu".to_string())
1301                     .spawn(move || {
1302                         let fence_handler = create_fence_handler(
1303                             mem.clone(),
1304                             ctrl_queue.clone(),
1305                             fence_state.clone(),
1306                         );
1307 
1308                         let virtio_gpu = match build(
1309                             &display_backends,
1310                             display_params,
1311                             rutabaga_builder,
1312                             event_devices,
1313                             gpu_device_tube,
1314                             pci_bar,
1315                             map_request,
1316                             external_blob,
1317                             udmabuf,
1318                             fence_handler,
1319                             render_server_fd,
1320                         ) {
1321                             Some(backend) => backend,
1322                             None => return,
1323                         };
1324 
1325                         Worker {
1326                             interrupt: irq,
1327                             exit_evt,
1328                             mem,
1329                             ctrl_queue: ctrl_queue.clone(),
1330                             ctrl_evt,
1331                             cursor_queue,
1332                             cursor_evt,
1333                             resource_bridges,
1334                             kill_evt,
1335                             state: Frontend::new(virtio_gpu, fence_state),
1336                         }
1337                         .run()
1338                     });
1339 
1340             match worker_result {
1341                 Err(e) => {
1342                     error!("failed to spawn virtio_gpu worker: {}", e);
1343                     return;
1344                 }
1345                 Ok(join_handle) => {
1346                     self.worker_thread = Some(join_handle);
1347                 }
1348             }
1349         }
1350     }
1351 
1352     // Require 1 BAR for mapping 3D buffers
get_device_bars(&mut self, address: PciAddress) -> Vec<PciBarConfiguration>1353     fn get_device_bars(&mut self, address: PciAddress) -> Vec<PciBarConfiguration> {
1354         self.pci_bar = Some(Alloc::PciBar {
1355             bus: address.bus,
1356             dev: address.dev,
1357             func: address.func,
1358             bar: GPU_BAR_NUM,
1359         });
1360         vec![PciBarConfiguration::new(
1361             GPU_BAR_NUM as usize,
1362             GPU_BAR_SIZE,
1363             PciBarRegionType::Memory64BitRegion,
1364             PciBarPrefetchable::NotPrefetchable,
1365         )]
1366     }
1367 
get_device_caps(&self) -> Vec<Box<dyn PciCapability>>1368     fn get_device_caps(&self) -> Vec<Box<dyn PciCapability>> {
1369         vec![Box::new(VirtioPciShmCap::new(
1370             PciCapabilityType::SharedMemoryConfig,
1371             GPU_BAR_NUM,
1372             GPU_BAR_OFFSET,
1373             GPU_BAR_SIZE,
1374             VIRTIO_GPU_SHM_ID_HOST_VISIBLE,
1375         ))]
1376     }
1377 }
1378