1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 mod protocol;
6 mod udmabuf;
7 mod udmabuf_bindings;
8 mod virtio_gpu;
9
10 use std::cell::RefCell;
11 use std::collections::{BTreeMap, VecDeque};
12 use std::convert::TryFrom;
13 use std::i64;
14 use std::io::Read;
15 use std::mem::{self, size_of};
16 use std::num::NonZeroU8;
17 use std::path::PathBuf;
18 use std::rc::Rc;
19 use std::sync::Arc;
20 use std::thread;
21 use std::time::Duration;
22
23 use base::{
24 debug, error, warn, AsRawDescriptor, AsRawDescriptors, Event, ExternalMapping, PollToken,
25 RawDescriptor, Tube, WaitContext,
26 };
27
28 use data_model::*;
29
30 pub use gpu_display::EventDevice;
31 use gpu_display::*;
32 use rutabaga_gfx::*;
33
34 use resources::Alloc;
35
36 use sync::Mutex;
37 use vm_memory::{GuestAddress, GuestMemory};
38
39 use super::{
40 copy_config, resource_bridge::*, DescriptorChain, Interrupt, Queue, Reader,
41 SignalableInterrupt, VirtioDevice, Writer, TYPE_GPU,
42 };
43
44 use super::{PciCapabilityType, VirtioPciShmCap};
45
46 use self::protocol::*;
47 use self::virtio_gpu::VirtioGpu;
48
49 use crate::pci::{
50 PciAddress, PciBarConfiguration, PciBarPrefetchable, PciBarRegionType, PciCapability,
51 };
52
53 pub const DEFAULT_DISPLAY_WIDTH: u32 = 1280;
54 pub const DEFAULT_DISPLAY_HEIGHT: u32 = 1024;
55
56 #[derive(Copy, Clone, Debug, PartialEq)]
57 pub enum GpuMode {
58 Mode2D,
59 ModeVirglRenderer,
60 ModeGfxstream,
61 }
62
63 #[derive(Debug)]
64 pub struct GpuParameters {
65 pub display_width: u32,
66 pub display_height: u32,
67 pub renderer_use_egl: bool,
68 pub renderer_use_gles: bool,
69 pub renderer_use_glx: bool,
70 pub renderer_use_surfaceless: bool,
71 pub gfxstream_use_guest_angle: bool,
72 pub gfxstream_use_syncfd: bool,
73 pub use_vulkan: bool,
74 pub udmabuf: bool,
75 pub mode: GpuMode,
76 pub cache_path: Option<String>,
77 pub cache_size: Option<String>,
78 }
79
80 // First queue is for virtio gpu commands. Second queue is for cursor commands, which we expect
81 // there to be fewer of.
82 const QUEUE_SIZES: &[u16] = &[256, 16];
83 const FENCE_POLL_MS: u64 = 1;
84
85 const GPU_BAR_NUM: u8 = 4;
86 const GPU_BAR_OFFSET: u64 = 0;
87 const GPU_BAR_SIZE: u64 = 1 << 28;
88
89 impl Default for GpuParameters {
default() -> Self90 fn default() -> Self {
91 GpuParameters {
92 display_width: DEFAULT_DISPLAY_WIDTH,
93 display_height: DEFAULT_DISPLAY_HEIGHT,
94 renderer_use_egl: true,
95 renderer_use_gles: true,
96 renderer_use_glx: false,
97 renderer_use_surfaceless: true,
98 gfxstream_use_guest_angle: false,
99 gfxstream_use_syncfd: true,
100 use_vulkan: false,
101 mode: GpuMode::ModeVirglRenderer,
102 cache_path: None,
103 cache_size: None,
104 udmabuf: false,
105 }
106 }
107 }
108
109 #[derive(Copy, Clone, Debug)]
110 pub struct VirtioScanoutBlobData {
111 pub width: u32,
112 pub height: u32,
113 pub drm_format: DrmFormat,
114 pub strides: [u32; 4],
115 pub offsets: [u32; 4],
116 }
117
118 /// Initializes the virtio_gpu state tracker.
build( possible_displays: &[DisplayBackend], display_width: u32, display_height: u32, rutabaga_builder: RutabagaBuilder, event_devices: Vec<EventDevice>, gpu_device_tube: Tube, pci_bar: Alloc, map_request: Arc<Mutex<Option<ExternalMapping>>>, external_blob: bool, udmabuf: bool, ) -> Option<VirtioGpu>119 fn build(
120 possible_displays: &[DisplayBackend],
121 display_width: u32,
122 display_height: u32,
123 rutabaga_builder: RutabagaBuilder,
124 event_devices: Vec<EventDevice>,
125 gpu_device_tube: Tube,
126 pci_bar: Alloc,
127 map_request: Arc<Mutex<Option<ExternalMapping>>>,
128 external_blob: bool,
129 udmabuf: bool,
130 ) -> Option<VirtioGpu> {
131 let mut display_opt = None;
132 for display in possible_displays {
133 match display.build() {
134 Ok(c) => {
135 display_opt = Some(c);
136 break;
137 }
138 Err(e) => error!("failed to open display: {}", e),
139 };
140 }
141
142 let display = match display_opt {
143 Some(d) => d,
144 None => {
145 error!("failed to open any displays");
146 return None;
147 }
148 };
149
150 VirtioGpu::new(
151 display,
152 display_width,
153 display_height,
154 rutabaga_builder,
155 event_devices,
156 gpu_device_tube,
157 pci_bar,
158 map_request,
159 external_blob,
160 udmabuf,
161 )
162 }
163
164 struct ReturnDescriptor {
165 index: u16,
166 len: u32,
167 }
168
169 struct FenceDescriptor {
170 desc_fence: RutabagaFenceData,
171 index: u16,
172 len: u32,
173 }
174
fence_ctx_equal(desc_fence: &RutabagaFenceData, completed: &RutabagaFenceData) -> bool175 fn fence_ctx_equal(desc_fence: &RutabagaFenceData, completed: &RutabagaFenceData) -> bool {
176 let desc_fence_ctx = desc_fence.flags & VIRTIO_GPU_FLAG_INFO_FENCE_CTX_IDX != 0;
177 let completed_fence_ctx = completed.flags & VIRTIO_GPU_FLAG_INFO_FENCE_CTX_IDX != 0;
178
179 // Both fences on global timeline -- only case with upstream kernel. The rest of the logic
180 // is for per fence context prototype.
181 if !completed_fence_ctx && !desc_fence_ctx {
182 return true;
183 }
184
185 // One fence is on global timeline
186 if desc_fence_ctx != completed_fence_ctx {
187 return false;
188 }
189
190 // Different 3D contexts
191 if desc_fence.ctx_id != completed.ctx_id {
192 return false;
193 }
194
195 // Different fence contexts with same 3D context
196 if desc_fence.fence_ctx_idx != completed.fence_ctx_idx {
197 return false;
198 }
199
200 true
201 }
202
203 struct Frontend {
204 return_ctrl_descriptors: VecDeque<ReturnDescriptor>,
205 return_cursor_descriptors: VecDeque<ReturnDescriptor>,
206 fence_descriptors: Vec<FenceDescriptor>,
207 virtio_gpu: VirtioGpu,
208 }
209
210 impl Frontend {
new(virtio_gpu: VirtioGpu) -> Frontend211 fn new(virtio_gpu: VirtioGpu) -> Frontend {
212 Frontend {
213 return_ctrl_descriptors: Default::default(),
214 return_cursor_descriptors: Default::default(),
215 fence_descriptors: Default::default(),
216 virtio_gpu,
217 }
218 }
219
display(&mut self) -> &Rc<RefCell<GpuDisplay>>220 fn display(&mut self) -> &Rc<RefCell<GpuDisplay>> {
221 self.virtio_gpu.display()
222 }
223
process_display(&mut self) -> bool224 fn process_display(&mut self) -> bool {
225 self.virtio_gpu.process_display()
226 }
227
process_resource_bridge(&mut self, resource_bridge: &Tube)228 fn process_resource_bridge(&mut self, resource_bridge: &Tube) {
229 let response = match resource_bridge.recv() {
230 Ok(ResourceRequest::GetBuffer { id }) => self.virtio_gpu.export_resource(id),
231 Ok(ResourceRequest::GetFence { seqno }) => {
232 // The seqno originated from self.backend, so
233 // it should fit in a u32.
234 match u32::try_from(seqno) {
235 Ok(fence_id) => self.virtio_gpu.export_fence(fence_id),
236 Err(_) => ResourceResponse::Invalid,
237 }
238 }
239 Err(e) => {
240 error!("error receiving resource bridge request: {}", e);
241 return;
242 }
243 };
244
245 if let Err(e) = resource_bridge.send(&response) {
246 error!("error sending resource bridge request: {}", e);
247 }
248 }
249
process_gpu_command( &mut self, mem: &GuestMemory, cmd: GpuCommand, reader: &mut Reader, ) -> VirtioGpuResult250 fn process_gpu_command(
251 &mut self,
252 mem: &GuestMemory,
253 cmd: GpuCommand,
254 reader: &mut Reader,
255 ) -> VirtioGpuResult {
256 self.virtio_gpu.force_ctx_0();
257
258 match cmd {
259 GpuCommand::GetDisplayInfo(_) => Ok(GpuResponse::OkDisplayInfo(
260 self.virtio_gpu.display_info().to_vec(),
261 )),
262 GpuCommand::ResourceCreate2d(info) => {
263 let resource_id = info.resource_id.to_native();
264
265 let resource_create_3d = ResourceCreate3D {
266 target: RUTABAGA_PIPE_TEXTURE_2D,
267 format: info.format.to_native(),
268 bind: RUTABAGA_PIPE_BIND_RENDER_TARGET,
269 width: info.width.to_native(),
270 height: info.height.to_native(),
271 depth: 1,
272 array_size: 1,
273 last_level: 0,
274 nr_samples: 0,
275 flags: 0,
276 };
277
278 self.virtio_gpu
279 .resource_create_3d(resource_id, resource_create_3d)
280 }
281 GpuCommand::ResourceUnref(info) => {
282 self.virtio_gpu.unref_resource(info.resource_id.to_native())
283 }
284 GpuCommand::SetScanout(info) => self.virtio_gpu.set_scanout(
285 info.scanout_id.to_native(),
286 info.resource_id.to_native(),
287 None,
288 ),
289 GpuCommand::ResourceFlush(info) => {
290 self.virtio_gpu.flush_resource(info.resource_id.to_native())
291 }
292 GpuCommand::TransferToHost2d(info) => {
293 let resource_id = info.resource_id.to_native();
294 let transfer = Transfer3D::new_2d(
295 info.r.x.to_native(),
296 info.r.y.to_native(),
297 info.r.width.to_native(),
298 info.r.height.to_native(),
299 );
300 self.virtio_gpu.transfer_write(0, resource_id, transfer)
301 }
302 GpuCommand::ResourceAttachBacking(info) => {
303 let available_bytes = reader.available_bytes();
304 if available_bytes != 0 {
305 let entry_count = info.nr_entries.to_native() as usize;
306 let mut vecs = Vec::with_capacity(entry_count);
307 for _ in 0..entry_count {
308 match reader.read_obj::<virtio_gpu_mem_entry>() {
309 Ok(entry) => {
310 let addr = GuestAddress(entry.addr.to_native());
311 let len = entry.length.to_native() as usize;
312 vecs.push((addr, len))
313 }
314 Err(_) => return Err(GpuResponse::ErrUnspec),
315 }
316 }
317 self.virtio_gpu
318 .attach_backing(info.resource_id.to_native(), mem, vecs)
319 } else {
320 error!("missing data for command {:?}", cmd);
321 Err(GpuResponse::ErrUnspec)
322 }
323 }
324 GpuCommand::ResourceDetachBacking(info) => {
325 self.virtio_gpu.detach_backing(info.resource_id.to_native())
326 }
327 GpuCommand::UpdateCursor(info) => self.virtio_gpu.update_cursor(
328 info.resource_id.to_native(),
329 info.pos.x.into(),
330 info.pos.y.into(),
331 ),
332 GpuCommand::MoveCursor(info) => self
333 .virtio_gpu
334 .move_cursor(info.pos.x.into(), info.pos.y.into()),
335 GpuCommand::ResourceAssignUuid(info) => {
336 let resource_id = info.resource_id.to_native();
337 self.virtio_gpu.resource_assign_uuid(resource_id)
338 }
339 GpuCommand::GetCapsetInfo(info) => self
340 .virtio_gpu
341 .get_capset_info(info.capset_index.to_native()),
342 GpuCommand::GetCapset(info) => self
343 .virtio_gpu
344 .get_capset(info.capset_id.to_native(), info.capset_version.to_native()),
345 GpuCommand::CtxCreate(info) => self
346 .virtio_gpu
347 .create_context(info.hdr.ctx_id.to_native(), info.context_init.to_native()),
348 GpuCommand::CtxDestroy(info) => {
349 self.virtio_gpu.destroy_context(info.hdr.ctx_id.to_native())
350 }
351 GpuCommand::CtxAttachResource(info) => self
352 .virtio_gpu
353 .context_attach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
354 GpuCommand::CtxDetachResource(info) => self
355 .virtio_gpu
356 .context_detach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
357 GpuCommand::ResourceCreate3d(info) => {
358 let resource_id = info.resource_id.to_native();
359 let resource_create_3d = ResourceCreate3D {
360 target: info.target.to_native(),
361 format: info.format.to_native(),
362 bind: info.bind.to_native(),
363 width: info.width.to_native(),
364 height: info.height.to_native(),
365 depth: info.depth.to_native(),
366 array_size: info.array_size.to_native(),
367 last_level: info.last_level.to_native(),
368 nr_samples: info.nr_samples.to_native(),
369 flags: info.flags.to_native(),
370 };
371
372 self.virtio_gpu
373 .resource_create_3d(resource_id, resource_create_3d)
374 }
375 GpuCommand::TransferToHost3d(info) => {
376 let ctx_id = info.hdr.ctx_id.to_native();
377 let resource_id = info.resource_id.to_native();
378
379 let transfer = Transfer3D {
380 x: info.box_.x.to_native(),
381 y: info.box_.y.to_native(),
382 z: info.box_.z.to_native(),
383 w: info.box_.w.to_native(),
384 h: info.box_.h.to_native(),
385 d: info.box_.d.to_native(),
386 level: info.level.to_native(),
387 stride: info.stride.to_native(),
388 layer_stride: info.layer_stride.to_native(),
389 offset: info.offset.to_native(),
390 };
391
392 self.virtio_gpu
393 .transfer_write(ctx_id, resource_id, transfer)
394 }
395 GpuCommand::TransferFromHost3d(info) => {
396 let ctx_id = info.hdr.ctx_id.to_native();
397 let resource_id = info.resource_id.to_native();
398
399 let transfer = Transfer3D {
400 x: info.box_.x.to_native(),
401 y: info.box_.y.to_native(),
402 z: info.box_.z.to_native(),
403 w: info.box_.w.to_native(),
404 h: info.box_.h.to_native(),
405 d: info.box_.d.to_native(),
406 level: info.level.to_native(),
407 stride: info.stride.to_native(),
408 layer_stride: info.layer_stride.to_native(),
409 offset: info.offset.to_native(),
410 };
411
412 self.virtio_gpu
413 .transfer_read(ctx_id, resource_id, transfer, None)
414 }
415 GpuCommand::CmdSubmit3d(info) => {
416 if reader.available_bytes() != 0 {
417 let cmd_size = info.size.to_native() as usize;
418 let mut cmd_buf = vec![0; cmd_size];
419 if reader.read_exact(&mut cmd_buf[..]).is_ok() {
420 self.virtio_gpu
421 .submit_command(info.hdr.ctx_id.to_native(), &mut cmd_buf[..])
422 } else {
423 Err(GpuResponse::ErrInvalidParameter)
424 }
425 } else {
426 // Silently accept empty command buffers to allow for
427 // benchmarking.
428 Ok(GpuResponse::OkNoData)
429 }
430 }
431 GpuCommand::ResourceCreateBlob(info) => {
432 let resource_id = info.resource_id.to_native();
433 let ctx_id = info.hdr.ctx_id.to_native();
434
435 let resource_create_blob = ResourceCreateBlob {
436 blob_mem: info.blob_mem.to_native(),
437 blob_flags: info.blob_flags.to_native(),
438 blob_id: info.blob_id.to_native(),
439 size: info.size.to_native(),
440 };
441
442 let entry_count = info.nr_entries.to_native();
443 if entry_count > VIRTIO_GPU_MAX_IOVEC_ENTRIES
444 || (reader.available_bytes() == 0 && entry_count > 0)
445 {
446 return Err(GpuResponse::ErrUnspec);
447 }
448
449 let mut vecs = Vec::with_capacity(entry_count as usize);
450 for _ in 0..entry_count {
451 match reader.read_obj::<virtio_gpu_mem_entry>() {
452 Ok(entry) => {
453 let addr = GuestAddress(entry.addr.to_native());
454 let len = entry.length.to_native() as usize;
455 vecs.push((addr, len))
456 }
457 Err(_) => return Err(GpuResponse::ErrUnspec),
458 }
459 }
460
461 self.virtio_gpu.resource_create_blob(
462 ctx_id,
463 resource_id,
464 resource_create_blob,
465 vecs,
466 mem,
467 )
468 }
469 GpuCommand::SetScanoutBlob(info) => {
470 let scanout_id = info.scanout_id.to_native();
471 let resource_id = info.resource_id.to_native();
472 let virtio_gpu_format = info.format.to_native();
473 let width = info.width.to_native();
474 let height = info.width.to_native();
475 let mut strides: [u32; 4] = [0; 4];
476 let mut offsets: [u32; 4] = [0; 4];
477
478 // As of v4.19, virtio-gpu kms only really uses these formats. If that changes,
479 // the following may have to change too.
480 let drm_format = match virtio_gpu_format {
481 VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM => DrmFormat::new(b'X', b'R', b'2', b'4'),
482 VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM => DrmFormat::new(b'A', b'R', b'2', b'4'),
483 _ => {
484 error!("unrecognized virtio-gpu format {}", virtio_gpu_format);
485 return Err(GpuResponse::ErrUnspec);
486 }
487 };
488
489 for plane_index in 0..PLANE_INFO_MAX_COUNT {
490 offsets[plane_index] = info.offsets[plane_index].to_native();
491 strides[plane_index] = info.strides[plane_index].to_native();
492 }
493
494 let scanout = VirtioScanoutBlobData {
495 width,
496 height,
497 drm_format,
498 strides,
499 offsets,
500 };
501
502 self.virtio_gpu
503 .set_scanout(scanout_id, resource_id, Some(scanout))
504 }
505 GpuCommand::ResourceMapBlob(info) => {
506 let resource_id = info.resource_id.to_native();
507 let offset = info.offset.to_native();
508 self.virtio_gpu.resource_map_blob(resource_id, offset)
509 }
510 GpuCommand::ResourceUnmapBlob(info) => {
511 let resource_id = info.resource_id.to_native();
512 self.virtio_gpu.resource_unmap_blob(resource_id)
513 }
514 }
515 }
516
validate_desc(desc: &DescriptorChain) -> bool517 fn validate_desc(desc: &DescriptorChain) -> bool {
518 desc.len as usize >= size_of::<virtio_gpu_ctrl_hdr>() && !desc.is_write_only()
519 }
520
process_queue(&mut self, mem: &GuestMemory, queue: &mut Queue) -> bool521 fn process_queue(&mut self, mem: &GuestMemory, queue: &mut Queue) -> bool {
522 let mut signal_used = false;
523 while let Some(desc) = queue.pop(mem) {
524 if Frontend::validate_desc(&desc) {
525 match (
526 Reader::new(mem.clone(), desc.clone()),
527 Writer::new(mem.clone(), desc.clone()),
528 ) {
529 (Ok(mut reader), Ok(mut writer)) => {
530 if let Some(ret_desc) =
531 self.process_descriptor(mem, desc.index, &mut reader, &mut writer)
532 {
533 queue.add_used(&mem, ret_desc.index, ret_desc.len);
534 signal_used = true;
535 }
536 }
537 (_, Err(e)) | (Err(e), _) => {
538 debug!("invalid descriptor: {}", e);
539 queue.add_used(&mem, desc.index, 0);
540 signal_used = true;
541 }
542 }
543 } else {
544 let likely_type = mem
545 .read_obj_from_addr(desc.addr)
546 .unwrap_or_else(|_| Le32::from(0));
547 debug!(
548 "queue bad descriptor index = {} len = {} write = {} type = {}",
549 desc.index,
550 desc.len,
551 desc.is_write_only(),
552 virtio_gpu_cmd_str(likely_type.to_native())
553 );
554 queue.add_used(&mem, desc.index, 0);
555 signal_used = true;
556 }
557 }
558
559 signal_used
560 }
561
process_descriptor( &mut self, mem: &GuestMemory, desc_index: u16, reader: &mut Reader, writer: &mut Writer, ) -> Option<ReturnDescriptor>562 fn process_descriptor(
563 &mut self,
564 mem: &GuestMemory,
565 desc_index: u16,
566 reader: &mut Reader,
567 writer: &mut Writer,
568 ) -> Option<ReturnDescriptor> {
569 let mut resp = Err(GpuResponse::ErrUnspec);
570 let mut gpu_cmd = None;
571 let mut len = 0;
572 match GpuCommand::decode(reader) {
573 Ok(cmd) => {
574 resp = self.process_gpu_command(mem, cmd, reader);
575 gpu_cmd = Some(cmd);
576 }
577 Err(e) => debug!("descriptor decode error: {}", e),
578 }
579
580 let mut gpu_response = match resp {
581 Ok(gpu_response) => gpu_response,
582 Err(gpu_response) => {
583 debug!("{:?} -> {:?}", gpu_cmd, gpu_response);
584 gpu_response
585 }
586 };
587
588 if writer.available_bytes() != 0 {
589 let mut fence_id = 0;
590 let mut ctx_id = 0;
591 let mut flags = 0;
592 let mut info = 0;
593 if let Some(cmd) = gpu_cmd {
594 let ctrl_hdr = cmd.ctrl_hdr();
595 if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
596 flags = ctrl_hdr.flags.to_native();
597 fence_id = ctrl_hdr.fence_id.to_native();
598 ctx_id = ctrl_hdr.ctx_id.to_native();
599 // The only possible current value for hdr info.
600 info = ctrl_hdr.info.to_native();
601
602 let fence_data = RutabagaFenceData {
603 flags,
604 fence_id,
605 ctx_id,
606 fence_ctx_idx: info,
607 };
608 gpu_response = match self.virtio_gpu.create_fence(fence_data) {
609 Ok(_) => gpu_response,
610 Err(fence_resp) => {
611 warn!("create_fence {} -> {:?}", fence_id, fence_resp);
612 fence_resp
613 }
614 };
615 }
616 }
617
618 // Prepare the response now, even if it is going to wait until
619 // fence is complete.
620 match gpu_response.encode(flags, fence_id, ctx_id, info, writer) {
621 Ok(l) => len = l,
622 Err(e) => debug!("ctrl queue response encode error: {}", e),
623 }
624
625 if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
626 self.fence_descriptors.push(FenceDescriptor {
627 desc_fence: RutabagaFenceData {
628 flags,
629 fence_id,
630 ctx_id,
631 fence_ctx_idx: info,
632 },
633 index: desc_index,
634 len,
635 });
636
637 return None;
638 }
639
640 // No fence, respond now.
641 }
642 Some(ReturnDescriptor {
643 index: desc_index,
644 len,
645 })
646 }
647
return_cursor(&mut self) -> Option<ReturnDescriptor>648 fn return_cursor(&mut self) -> Option<ReturnDescriptor> {
649 self.return_cursor_descriptors.pop_front()
650 }
651
return_ctrl(&mut self) -> Option<ReturnDescriptor>652 fn return_ctrl(&mut self) -> Option<ReturnDescriptor> {
653 self.return_ctrl_descriptors.pop_front()
654 }
655
fence_poll(&mut self)656 fn fence_poll(&mut self) {
657 let completed_fences = self.virtio_gpu.fence_poll();
658 let return_descs = &mut self.return_ctrl_descriptors;
659
660 self.fence_descriptors.retain(|f_desc| {
661 for completed in &completed_fences {
662 if fence_ctx_equal(&f_desc.desc_fence, completed)
663 && f_desc.desc_fence.fence_id <= completed.fence_id
664 {
665 return_descs.push_back(ReturnDescriptor {
666 index: f_desc.index,
667 len: f_desc.len,
668 });
669 return false;
670 }
671 }
672 true
673 })
674 }
675 }
676
677 struct Worker {
678 interrupt: Interrupt,
679 exit_evt: Event,
680 mem: GuestMemory,
681 ctrl_queue: Queue,
682 ctrl_evt: Event,
683 cursor_queue: Queue,
684 cursor_evt: Event,
685 resource_bridges: Vec<Tube>,
686 kill_evt: Event,
687 state: Frontend,
688 }
689
690 impl Worker {
run(&mut self)691 fn run(&mut self) {
692 #[derive(PollToken)]
693 enum Token {
694 CtrlQueue,
695 CursorQueue,
696 Display,
697 InterruptResample,
698 Kill,
699 ResourceBridge { index: usize },
700 }
701
702 let wait_ctx: WaitContext<Token> = match WaitContext::build_with(&[
703 (&self.ctrl_evt, Token::CtrlQueue),
704 (&self.cursor_evt, Token::CursorQueue),
705 (&*self.state.display().borrow(), Token::Display),
706 (&self.kill_evt, Token::Kill),
707 ]) {
708 Ok(pc) => pc,
709 Err(e) => {
710 error!("failed creating WaitContext: {}", e);
711 return;
712 }
713 };
714 if let Some(resample_evt) = self.interrupt.get_resample_evt() {
715 if wait_ctx
716 .add(resample_evt, Token::InterruptResample)
717 .is_err()
718 {
719 error!("failed creating WaitContext");
720 return;
721 }
722 }
723
724 for (index, bridge) in self.resource_bridges.iter().enumerate() {
725 if let Err(e) = wait_ctx.add(bridge, Token::ResourceBridge { index }) {
726 error!("failed to add resource bridge to WaitContext: {}", e);
727 }
728 }
729
730 // TODO(davidriley): The entire main loop processing is somewhat racey and incorrect with
731 // respect to cursor vs control queue processing. As both currently and originally
732 // written, while the control queue is only processed/read from after the the cursor queue
733 // is finished, the entire queue will be processed at that time. The end effect of this
734 // racyiness is that control queue descriptors that are issued after cursors descriptors
735 // might be handled first instead of the other way around. In practice, the cursor queue
736 // isn't used so this isn't a huge issue.
737
738 // Declare this outside the loop so we don't keep allocating and freeing the vector.
739 let mut process_resource_bridge = Vec::with_capacity(self.resource_bridges.len());
740 'wait: loop {
741 // If there are outstanding fences, wake up early to poll them.
742 let duration = if !self.state.fence_descriptors.is_empty() {
743 Duration::from_millis(FENCE_POLL_MS)
744 } else {
745 Duration::new(i64::MAX as u64, 0)
746 };
747
748 let events = match wait_ctx.wait_timeout(duration) {
749 Ok(v) => v,
750 Err(e) => {
751 error!("failed polling for events: {}", e);
752 break;
753 }
754 };
755 let mut signal_used_cursor = false;
756 let mut signal_used_ctrl = false;
757 let mut ctrl_available = false;
758
759 // Clear the old values and re-initialize with false.
760 process_resource_bridge.clear();
761 process_resource_bridge.resize(self.resource_bridges.len(), false);
762
763 // This display isn't typically used when the virt-wl device is available and it can
764 // lead to hung fds (crbug.com/1027379). Disable if it's hung.
765 for event in events.iter().filter(|e| e.is_hungup) {
766 if let Token::Display = event.token {
767 error!("default display hang-up detected");
768 let _ = wait_ctx.delete(&*self.state.display().borrow());
769 }
770 }
771
772 for event in events.iter().filter(|e| e.is_readable) {
773 match event.token {
774 Token::CtrlQueue => {
775 let _ = self.ctrl_evt.read();
776 // Set flag that control queue is available to be read, but defer reading
777 // until rest of the events are processed.
778 ctrl_available = true;
779 }
780 Token::CursorQueue => {
781 let _ = self.cursor_evt.read();
782 if self.state.process_queue(&self.mem, &mut self.cursor_queue) {
783 signal_used_cursor = true;
784 }
785 }
786 Token::Display => {
787 let close_requested = self.state.process_display();
788 if close_requested {
789 let _ = self.exit_evt.write(1);
790 }
791 }
792 Token::ResourceBridge { index } => {
793 process_resource_bridge[index] = true;
794 }
795 Token::InterruptResample => {
796 self.interrupt.interrupt_resample();
797 }
798 Token::Kill => {
799 break 'wait;
800 }
801 }
802 }
803
804 // All cursor commands go first because they have higher priority.
805 while let Some(desc) = self.state.return_cursor() {
806 self.cursor_queue.add_used(&self.mem, desc.index, desc.len);
807 signal_used_cursor = true;
808 }
809
810 if ctrl_available && self.state.process_queue(&self.mem, &mut self.ctrl_queue) {
811 signal_used_ctrl = true;
812 }
813
814 self.state.fence_poll();
815
816 while let Some(desc) = self.state.return_ctrl() {
817 self.ctrl_queue.add_used(&self.mem, desc.index, desc.len);
818 signal_used_ctrl = true;
819 }
820
821 // Process the entire control queue before the resource bridge in case a resource is
822 // created or destroyed by the control queue. Processing the resource bridge first may
823 // lead to a race condition.
824 // TODO(davidriley): This is still inherently racey if both the control queue request
825 // and the resource bridge request come in at the same time after the control queue is
826 // processed above and before the corresponding bridge is processed below.
827 for (bridge, &should_process) in
828 self.resource_bridges.iter().zip(&process_resource_bridge)
829 {
830 if should_process {
831 self.state.process_resource_bridge(bridge);
832 }
833 }
834
835 if signal_used_ctrl {
836 self.interrupt.signal_used_queue(self.ctrl_queue.vector);
837 }
838
839 if signal_used_cursor {
840 self.interrupt.signal_used_queue(self.cursor_queue.vector);
841 }
842 }
843 }
844 }
845
846 /// Indicates a backend that should be tried for the gpu to use for display.
847 ///
848 /// Several instances of this enum are used in an ordered list to give the gpu device many backends
849 /// to use as fallbacks in case some do not work.
850 #[derive(Clone)]
851 pub enum DisplayBackend {
852 /// Use the wayland backend with the given socket path if given.
853 Wayland(Option<PathBuf>),
854 /// Open a connection to the X server at the given display if given.
855 X(Option<String>),
856 /// Emulate a display without actually displaying it.
857 Stub,
858 }
859
860 impl DisplayBackend {
build(&self) -> std::result::Result<GpuDisplay, GpuDisplayError>861 fn build(&self) -> std::result::Result<GpuDisplay, GpuDisplayError> {
862 match self {
863 DisplayBackend::Wayland(path) => GpuDisplay::open_wayland(path.as_ref()),
864 DisplayBackend::X(display) => GpuDisplay::open_x(display.as_ref()),
865 DisplayBackend::Stub => GpuDisplay::open_stub(),
866 }
867 }
868 }
869
870 pub struct Gpu {
871 exit_evt: Event,
872 gpu_device_tube: Option<Tube>,
873 resource_bridges: Vec<Tube>,
874 event_devices: Vec<EventDevice>,
875 kill_evt: Option<Event>,
876 config_event: bool,
877 worker_thread: Option<thread::JoinHandle<()>>,
878 num_scanouts: NonZeroU8,
879 display_backends: Vec<DisplayBackend>,
880 display_width: u32,
881 display_height: u32,
882 rutabaga_builder: Option<RutabagaBuilder>,
883 pci_bar: Option<Alloc>,
884 map_request: Arc<Mutex<Option<ExternalMapping>>>,
885 external_blob: bool,
886 rutabaga_component: RutabagaComponentType,
887 base_features: u64,
888 mem: GuestMemory,
889 udmabuf: bool,
890 }
891
892 impl Gpu {
new( exit_evt: Event, gpu_device_tube: Option<Tube>, num_scanouts: NonZeroU8, resource_bridges: Vec<Tube>, display_backends: Vec<DisplayBackend>, gpu_parameters: &GpuParameters, event_devices: Vec<EventDevice>, map_request: Arc<Mutex<Option<ExternalMapping>>>, external_blob: bool, base_features: u64, channels: BTreeMap<String, PathBuf>, mem: GuestMemory, ) -> Gpu893 pub fn new(
894 exit_evt: Event,
895 gpu_device_tube: Option<Tube>,
896 num_scanouts: NonZeroU8,
897 resource_bridges: Vec<Tube>,
898 display_backends: Vec<DisplayBackend>,
899 gpu_parameters: &GpuParameters,
900 event_devices: Vec<EventDevice>,
901 map_request: Arc<Mutex<Option<ExternalMapping>>>,
902 external_blob: bool,
903 base_features: u64,
904 channels: BTreeMap<String, PathBuf>,
905 mem: GuestMemory,
906 ) -> Gpu {
907 let virglrenderer_flags = VirglRendererFlags::new()
908 .use_egl(gpu_parameters.renderer_use_egl)
909 .use_gles(gpu_parameters.renderer_use_gles)
910 .use_glx(gpu_parameters.renderer_use_glx)
911 .use_surfaceless(gpu_parameters.renderer_use_surfaceless)
912 .use_external_blob(external_blob)
913 .use_venus(gpu_parameters.use_vulkan);
914 let gfxstream_flags = GfxstreamFlags::new()
915 .use_egl(gpu_parameters.renderer_use_egl)
916 .use_gles(gpu_parameters.renderer_use_gles)
917 .use_glx(gpu_parameters.renderer_use_glx)
918 .use_surfaceless(gpu_parameters.renderer_use_surfaceless)
919 .use_guest_angle(gpu_parameters.gfxstream_use_guest_angle)
920 .use_syncfd(gpu_parameters.gfxstream_use_syncfd)
921 .use_vulkan(gpu_parameters.use_vulkan);
922
923 let mut rutabaga_channels: Vec<RutabagaChannel> = Vec::new();
924 for (channel_name, path) in &channels {
925 match &channel_name[..] {
926 "" => rutabaga_channels.push(RutabagaChannel {
927 base_channel: path.clone(),
928 channel_type: RUTABAGA_CHANNEL_TYPE_WAYLAND,
929 }),
930 "mojo" => rutabaga_channels.push(RutabagaChannel {
931 base_channel: path.clone(),
932 channel_type: RUTABAGA_CHANNEL_TYPE_CAMERA,
933 }),
934 _ => error!("unknown rutabaga channel"),
935 }
936 }
937
938 let rutabaga_channels_opt = Some(rutabaga_channels);
939 let component = match gpu_parameters.mode {
940 GpuMode::Mode2D => RutabagaComponentType::Rutabaga2D,
941 GpuMode::ModeVirglRenderer => RutabagaComponentType::VirglRenderer,
942 GpuMode::ModeGfxstream => RutabagaComponentType::Gfxstream,
943 };
944
945 let rutabaga_builder = RutabagaBuilder::new(component)
946 .set_display_width(gpu_parameters.display_width)
947 .set_display_height(gpu_parameters.display_height)
948 .set_virglrenderer_flags(virglrenderer_flags)
949 .set_gfxstream_flags(gfxstream_flags)
950 .set_rutabaga_channels(rutabaga_channels_opt);
951
952 Gpu {
953 exit_evt,
954 gpu_device_tube,
955 num_scanouts,
956 resource_bridges,
957 event_devices,
958 config_event: false,
959 kill_evt: None,
960 worker_thread: None,
961 display_backends,
962 display_width: gpu_parameters.display_width,
963 display_height: gpu_parameters.display_height,
964 rutabaga_builder: Some(rutabaga_builder),
965 pci_bar: None,
966 map_request,
967 external_blob,
968 rutabaga_component: component,
969 base_features,
970 mem,
971 udmabuf: gpu_parameters.udmabuf,
972 }
973 }
974
get_config(&self) -> virtio_gpu_config975 fn get_config(&self) -> virtio_gpu_config {
976 let mut events_read = 0;
977 if self.config_event {
978 events_read |= VIRTIO_GPU_EVENT_DISPLAY;
979 }
980
981 let num_capsets = match self.rutabaga_component {
982 RutabagaComponentType::Rutabaga2D => 0,
983 _ => {
984 let mut num_capsets = 0;
985
986 // Cross-domain (like virtio_wl with llvmpipe) is always available.
987 num_capsets += 1;
988
989 // Three capsets for virgl_renderer
990 #[cfg(feature = "virgl_renderer")]
991 {
992 num_capsets += 3;
993 }
994
995 // One capset for gfxstream
996 #[cfg(feature = "gfxstream")]
997 {
998 num_capsets += 1;
999 }
1000
1001 num_capsets
1002 }
1003 };
1004
1005 virtio_gpu_config {
1006 events_read: Le32::from(events_read),
1007 events_clear: Le32::from(0),
1008 num_scanouts: Le32::from(self.num_scanouts.get() as u32),
1009 num_capsets: Le32::from(num_capsets),
1010 }
1011 }
1012 }
1013
1014 impl Drop for Gpu {
drop(&mut self)1015 fn drop(&mut self) {
1016 if let Some(kill_evt) = self.kill_evt.take() {
1017 // Ignore the result because there is nothing we can do about it.
1018 let _ = kill_evt.write(1);
1019 }
1020
1021 if let Some(worker_thread) = self.worker_thread.take() {
1022 let _ = worker_thread.join();
1023 }
1024 }
1025 }
1026
1027 impl VirtioDevice for Gpu {
keep_rds(&self) -> Vec<RawDescriptor>1028 fn keep_rds(&self) -> Vec<RawDescriptor> {
1029 let mut keep_rds = Vec::new();
1030 // TODO(davidriley): Remove once virgl has another path to include
1031 // debugging logs.
1032 if cfg!(debug_assertions) {
1033 keep_rds.push(libc::STDOUT_FILENO);
1034 keep_rds.push(libc::STDERR_FILENO);
1035 }
1036
1037 if self.udmabuf {
1038 keep_rds.append(&mut self.mem.as_raw_descriptors());
1039 }
1040
1041 if let Some(ref gpu_device_tube) = self.gpu_device_tube {
1042 keep_rds.push(gpu_device_tube.as_raw_descriptor());
1043 }
1044
1045 keep_rds.push(self.exit_evt.as_raw_descriptor());
1046 for bridge in &self.resource_bridges {
1047 keep_rds.push(bridge.as_raw_descriptor());
1048 }
1049
1050 keep_rds
1051 }
1052
device_type(&self) -> u321053 fn device_type(&self) -> u32 {
1054 TYPE_GPU
1055 }
1056
queue_max_sizes(&self) -> &[u16]1057 fn queue_max_sizes(&self) -> &[u16] {
1058 QUEUE_SIZES
1059 }
1060
features(&self) -> u641061 fn features(&self) -> u64 {
1062 let rutabaga_features = match self.rutabaga_component {
1063 RutabagaComponentType::Rutabaga2D => 0,
1064 _ => {
1065 let mut features_3d = 0;
1066
1067 features_3d |= 1 << VIRTIO_GPU_F_VIRGL
1068 | 1 << VIRTIO_GPU_F_RESOURCE_UUID
1069 | 1 << VIRTIO_GPU_F_RESOURCE_BLOB
1070 | 1 << VIRTIO_GPU_F_CONTEXT_INIT
1071 | 1 << VIRTIO_GPU_F_RESOURCE_SYNC;
1072
1073 if self.udmabuf {
1074 features_3d |= 1 << VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
1075 }
1076
1077 features_3d
1078 }
1079 };
1080
1081 self.base_features | rutabaga_features
1082 }
1083
ack_features(&mut self, value: u64)1084 fn ack_features(&mut self, value: u64) {
1085 let _ = value;
1086 }
1087
read_config(&self, offset: u64, data: &mut [u8])1088 fn read_config(&self, offset: u64, data: &mut [u8]) {
1089 copy_config(data, 0, self.get_config().as_slice(), offset);
1090 }
1091
write_config(&mut self, offset: u64, data: &[u8])1092 fn write_config(&mut self, offset: u64, data: &[u8]) {
1093 let mut cfg = self.get_config();
1094 copy_config(cfg.as_mut_slice(), offset, data, 0);
1095 if (cfg.events_clear.to_native() & VIRTIO_GPU_EVENT_DISPLAY) != 0 {
1096 self.config_event = false;
1097 }
1098 }
1099
activate( &mut self, mem: GuestMemory, interrupt: Interrupt, mut queues: Vec<Queue>, mut queue_evts: Vec<Event>, )1100 fn activate(
1101 &mut self,
1102 mem: GuestMemory,
1103 interrupt: Interrupt,
1104 mut queues: Vec<Queue>,
1105 mut queue_evts: Vec<Event>,
1106 ) {
1107 if queues.len() != QUEUE_SIZES.len() || queue_evts.len() != QUEUE_SIZES.len() {
1108 return;
1109 }
1110
1111 let exit_evt = match self.exit_evt.try_clone() {
1112 Ok(e) => e,
1113 Err(e) => {
1114 error!("error cloning exit event: {}", e);
1115 return;
1116 }
1117 };
1118
1119 let (self_kill_evt, kill_evt) = match Event::new().and_then(|e| Ok((e.try_clone()?, e))) {
1120 Ok(v) => v,
1121 Err(e) => {
1122 error!("error creating kill Event pair: {}", e);
1123 return;
1124 }
1125 };
1126 self.kill_evt = Some(self_kill_evt);
1127
1128 let resource_bridges = mem::replace(&mut self.resource_bridges, Vec::new());
1129
1130 let ctrl_queue = queues.remove(0);
1131 let ctrl_evt = queue_evts.remove(0);
1132 let cursor_queue = queues.remove(0);
1133 let cursor_evt = queue_evts.remove(0);
1134 let display_backends = self.display_backends.clone();
1135 let display_width = self.display_width;
1136 let display_height = self.display_height;
1137 let event_devices = self.event_devices.split_off(0);
1138 let map_request = Arc::clone(&self.map_request);
1139 let external_blob = self.external_blob;
1140 let udmabuf = self.udmabuf;
1141 if let (Some(gpu_device_tube), Some(pci_bar), Some(rutabaga_builder)) = (
1142 self.gpu_device_tube.take(),
1143 self.pci_bar.take(),
1144 self.rutabaga_builder.take(),
1145 ) {
1146 let worker_result =
1147 thread::Builder::new()
1148 .name("virtio_gpu".to_string())
1149 .spawn(move || {
1150 let virtio_gpu = match build(
1151 &display_backends,
1152 display_width,
1153 display_height,
1154 rutabaga_builder,
1155 event_devices,
1156 gpu_device_tube,
1157 pci_bar,
1158 map_request,
1159 external_blob,
1160 udmabuf,
1161 ) {
1162 Some(backend) => backend,
1163 None => return,
1164 };
1165
1166 Worker {
1167 interrupt,
1168 exit_evt,
1169 mem,
1170 ctrl_queue,
1171 ctrl_evt,
1172 cursor_queue,
1173 cursor_evt,
1174 resource_bridges,
1175 kill_evt,
1176 state: Frontend::new(virtio_gpu),
1177 }
1178 .run()
1179 });
1180
1181 match worker_result {
1182 Err(e) => {
1183 error!("failed to spawn virtio_gpu worker: {}", e);
1184 return;
1185 }
1186 Ok(join_handle) => {
1187 self.worker_thread = Some(join_handle);
1188 }
1189 }
1190 }
1191 }
1192
1193 // Require 1 BAR for mapping 3D buffers
get_device_bars(&mut self, address: PciAddress) -> Vec<PciBarConfiguration>1194 fn get_device_bars(&mut self, address: PciAddress) -> Vec<PciBarConfiguration> {
1195 self.pci_bar = Some(Alloc::PciBar {
1196 bus: address.bus,
1197 dev: address.dev,
1198 func: address.func,
1199 bar: GPU_BAR_NUM,
1200 });
1201 vec![PciBarConfiguration::new(
1202 GPU_BAR_NUM as usize,
1203 GPU_BAR_SIZE,
1204 PciBarRegionType::Memory64BitRegion,
1205 PciBarPrefetchable::NotPrefetchable,
1206 )]
1207 }
1208
get_device_caps(&self) -> Vec<Box<dyn PciCapability>>1209 fn get_device_caps(&self) -> Vec<Box<dyn PciCapability>> {
1210 vec![Box::new(VirtioPciShmCap::new(
1211 PciCapabilityType::SharedMemoryConfig,
1212 GPU_BAR_NUM,
1213 GPU_BAR_OFFSET,
1214 GPU_BAR_SIZE,
1215 VIRTIO_GPU_SHM_ID_HOST_VISIBLE,
1216 ))]
1217 }
1218 }
1219