1 // Copyright 2018 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 mod edid;
6 mod parameters;
7 mod protocol;
8 mod virtio_gpu;
9
10 use std::cell::RefCell;
11 use std::collections::BTreeMap;
12 use std::io::Read;
13 use std::path::PathBuf;
14 use std::rc::Rc;
15 use std::sync::atomic::AtomicBool;
16 use std::sync::atomic::Ordering;
17 use std::sync::mpsc;
18 use std::sync::Arc;
19
20 use anyhow::anyhow;
21 use anyhow::Context;
22 use base::debug;
23 use base::error;
24 #[cfg(any(target_os = "android", target_os = "linux"))]
25 use base::linux::move_task_to_cgroup;
26 use base::warn;
27 use base::AsRawDescriptor;
28 use base::Event;
29 use base::EventToken;
30 use base::RawDescriptor;
31 use base::ReadNotifier;
32 #[cfg(windows)]
33 use base::RecvTube;
34 use base::Result;
35 use base::SafeDescriptor;
36 use base::SendTube;
37 use base::Tube;
38 use base::VmEventType;
39 use base::WaitContext;
40 use base::WorkerThread;
41 use data_model::*;
42 pub use gpu_display::EventDevice;
43 use gpu_display::*;
44 pub use parameters::GpuParameters;
45 use rutabaga_gfx::*;
46 use serde::Deserialize;
47 use serde::Serialize;
48 use sync::Mutex;
49 pub use vm_control::gpu::DisplayMode as GpuDisplayMode;
50 pub use vm_control::gpu::DisplayParameters as GpuDisplayParameters;
51 use vm_control::gpu::GpuControlCommand;
52 use vm_control::gpu::GpuControlResult;
53 pub use vm_control::gpu::MouseMode as GpuMouseMode;
54 pub use vm_control::gpu::DEFAULT_DISPLAY_HEIGHT;
55 pub use vm_control::gpu::DEFAULT_DISPLAY_WIDTH;
56 pub use vm_control::gpu::DEFAULT_REFRESH_RATE;
57 #[cfg(windows)]
58 use vm_control::ModifyWaitContext;
59 use vm_memory::GuestAddress;
60 use vm_memory::GuestMemory;
61 use zerocopy::AsBytes;
62
63 pub use self::protocol::virtio_gpu_config;
64 pub use self::protocol::VIRTIO_GPU_F_CONTEXT_INIT;
65 pub use self::protocol::VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
66 pub use self::protocol::VIRTIO_GPU_F_EDID;
67 pub use self::protocol::VIRTIO_GPU_F_FENCE_PASSING;
68 pub use self::protocol::VIRTIO_GPU_F_RESOURCE_BLOB;
69 pub use self::protocol::VIRTIO_GPU_F_RESOURCE_UUID;
70 pub use self::protocol::VIRTIO_GPU_F_VIRGL;
71 pub use self::protocol::VIRTIO_GPU_MAX_SCANOUTS;
72 pub use self::protocol::VIRTIO_GPU_SHM_ID_HOST_VISIBLE;
73 use self::protocol::*;
74 use self::virtio_gpu::to_rutabaga_descriptor;
75 pub use self::virtio_gpu::ProcessDisplayResult;
76 use self::virtio_gpu::VirtioGpu;
77 use self::virtio_gpu::VirtioGpuSnapshot;
78 use super::copy_config;
79 use super::resource_bridge::ResourceRequest;
80 use super::DescriptorChain;
81 use super::DeviceType;
82 use super::Interrupt;
83 use super::Queue;
84 use super::Reader;
85 use super::SharedMemoryMapper;
86 use super::SharedMemoryRegion;
87 use super::VirtioDevice;
88 use super::Writer;
89 use crate::PciAddress;
90
91 // First queue is for virtio gpu commands. Second queue is for cursor commands, which we expect
92 // there to be fewer of.
93 const QUEUE_SIZES: &[u16] = &[512, 16];
94
95 #[derive(Copy, Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
96 pub enum GpuMode {
97 #[serde(rename = "2d", alias = "2D")]
98 Mode2D,
99 #[cfg(feature = "virgl_renderer")]
100 #[serde(rename = "virglrenderer", alias = "3d", alias = "3D")]
101 ModeVirglRenderer,
102 #[cfg(feature = "gfxstream")]
103 #[serde(rename = "gfxstream")]
104 ModeGfxstream,
105 }
106
107 impl Default for GpuMode {
default() -> Self108 fn default() -> Self {
109 #[cfg(all(windows, feature = "gfxstream"))]
110 return GpuMode::ModeGfxstream;
111
112 #[cfg(all(unix, feature = "virgl_renderer"))]
113 return GpuMode::ModeVirglRenderer;
114
115 #[cfg(not(any(
116 all(windows, feature = "gfxstream"),
117 all(unix, feature = "virgl_renderer"),
118 )))]
119 return GpuMode::Mode2D;
120 }
121 }
122
123 #[derive(Clone, Debug, Serialize, Deserialize)]
124 #[serde(rename_all = "kebab-case")]
125 pub enum GpuWsi {
126 #[serde(alias = "vk")]
127 Vulkan,
128 }
129
130 #[derive(Copy, Clone, Debug)]
131 pub struct VirtioScanoutBlobData {
132 pub width: u32,
133 pub height: u32,
134 pub drm_format: DrmFormat,
135 pub strides: [u32; 4],
136 pub offsets: [u32; 4],
137 }
138
139 #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
140 enum VirtioGpuRing {
141 Global,
142 ContextSpecific { ctx_id: u32, ring_idx: u8 },
143 }
144
145 struct FenceDescriptor {
146 ring: VirtioGpuRing,
147 fence_id: u64,
148 desc_chain: DescriptorChain,
149 len: u32,
150 }
151
152 #[derive(Default)]
153 pub struct FenceState {
154 descs: Vec<FenceDescriptor>,
155 completed_fences: BTreeMap<VirtioGpuRing, u64>,
156 }
157
158 #[derive(Serialize, Deserialize)]
159 struct FenceStateSnapshot {
160 completed_fences: BTreeMap<VirtioGpuRing, u64>,
161 }
162
163 impl FenceState {
snapshot(&self) -> FenceStateSnapshot164 fn snapshot(&self) -> FenceStateSnapshot {
165 assert!(self.descs.is_empty(), "can't snapshot with pending fences");
166 FenceStateSnapshot {
167 completed_fences: self.completed_fences.clone(),
168 }
169 }
170
restore(&mut self, snapshot: FenceStateSnapshot)171 fn restore(&mut self, snapshot: FenceStateSnapshot) {
172 assert!(self.descs.is_empty(), "can't restore activated device");
173 self.completed_fences = snapshot.completed_fences;
174 }
175 }
176
177 pub trait QueueReader {
pop(&self) -> Option<DescriptorChain>178 fn pop(&self) -> Option<DescriptorChain>;
add_used(&self, desc_chain: DescriptorChain, len: u32)179 fn add_used(&self, desc_chain: DescriptorChain, len: u32);
signal_used(&self)180 fn signal_used(&self);
181 }
182
183 struct LocalQueueReader {
184 queue: RefCell<Queue>,
185 interrupt: Interrupt,
186 }
187
188 impl LocalQueueReader {
new(queue: Queue, interrupt: Interrupt) -> Self189 fn new(queue: Queue, interrupt: Interrupt) -> Self {
190 Self {
191 queue: RefCell::new(queue),
192 interrupt,
193 }
194 }
195 }
196
197 impl QueueReader for LocalQueueReader {
pop(&self) -> Option<DescriptorChain>198 fn pop(&self) -> Option<DescriptorChain> {
199 self.queue.borrow_mut().pop()
200 }
201
add_used(&self, desc_chain: DescriptorChain, len: u32)202 fn add_used(&self, desc_chain: DescriptorChain, len: u32) {
203 self.queue.borrow_mut().add_used(desc_chain, len)
204 }
205
signal_used(&self)206 fn signal_used(&self) {
207 self.queue.borrow_mut().trigger_interrupt(&self.interrupt);
208 }
209 }
210
211 #[derive(Clone)]
212 struct SharedQueueReader {
213 queue: Arc<Mutex<Queue>>,
214 interrupt: Interrupt,
215 }
216
217 impl SharedQueueReader {
new(queue: Queue, interrupt: Interrupt) -> Self218 fn new(queue: Queue, interrupt: Interrupt) -> Self {
219 Self {
220 queue: Arc::new(Mutex::new(queue)),
221 interrupt,
222 }
223 }
224 }
225
226 impl QueueReader for SharedQueueReader {
pop(&self) -> Option<DescriptorChain>227 fn pop(&self) -> Option<DescriptorChain> {
228 self.queue.lock().pop()
229 }
230
add_used(&self, desc_chain: DescriptorChain, len: u32)231 fn add_used(&self, desc_chain: DescriptorChain, len: u32) {
232 self.queue.lock().add_used(desc_chain, len)
233 }
234
signal_used(&self)235 fn signal_used(&self) {
236 self.queue.lock().trigger_interrupt(&self.interrupt);
237 }
238 }
239
240 /// Initializes the virtio_gpu state tracker.
build( display_backends: &[DisplayBackend], display_params: Vec<GpuDisplayParameters>, display_event: Arc<AtomicBool>, rutabaga: Rutabaga, mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>, external_blob: bool, fixed_blob_mapping: bool, #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>, udmabuf: bool, #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube, ) -> Option<VirtioGpu>241 fn build(
242 display_backends: &[DisplayBackend],
243 display_params: Vec<GpuDisplayParameters>,
244 display_event: Arc<AtomicBool>,
245 rutabaga: Rutabaga,
246 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
247 external_blob: bool,
248 fixed_blob_mapping: bool,
249 #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>,
250 udmabuf: bool,
251 #[cfg(windows)] gpu_display_wait_descriptor_ctrl_wr: SendTube,
252 ) -> Option<VirtioGpu> {
253 let mut display_opt = None;
254 for display_backend in display_backends {
255 match display_backend.build(
256 #[cfg(windows)]
257 wndproc_thread,
258 #[cfg(windows)]
259 gpu_display_wait_descriptor_ctrl_wr
260 .try_clone()
261 .expect("failed to clone wait context ctrl channel"),
262 ) {
263 Ok(c) => {
264 display_opt = Some(c);
265 break;
266 }
267 Err(e) => error!("failed to open display: {}", e),
268 };
269 }
270
271 let display = match display_opt {
272 Some(d) => d,
273 None => {
274 error!("failed to open any displays");
275 return None;
276 }
277 };
278
279 VirtioGpu::new(
280 display,
281 display_params,
282 display_event,
283 rutabaga,
284 mapper,
285 external_blob,
286 fixed_blob_mapping,
287 udmabuf,
288 )
289 }
290
291 /// Resources used by the fence handler.
292 pub struct FenceHandlerActivationResources<Q>
293 where
294 Q: QueueReader + Send + Clone + 'static,
295 {
296 pub mem: GuestMemory,
297 pub ctrl_queue: Q,
298 }
299
300 /// Create a handler that writes into the completed fence queue
create_fence_handler<Q>( fence_handler_resources: Arc<Mutex<Option<FenceHandlerActivationResources<Q>>>>, fence_state: Arc<Mutex<FenceState>>, ) -> RutabagaFenceHandler where Q: QueueReader + Send + Clone + 'static,301 pub fn create_fence_handler<Q>(
302 fence_handler_resources: Arc<Mutex<Option<FenceHandlerActivationResources<Q>>>>,
303 fence_state: Arc<Mutex<FenceState>>,
304 ) -> RutabagaFenceHandler
305 where
306 Q: QueueReader + Send + Clone + 'static,
307 {
308 RutabagaFenceHandler::new(move |completed_fence: RutabagaFence| {
309 let mut signal = false;
310
311 if let Some(ref fence_handler_resources) = *fence_handler_resources.lock() {
312 // Limits the lifetime of `fence_state`:
313 {
314 let ring = match completed_fence.flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
315 0 => VirtioGpuRing::Global,
316 _ => VirtioGpuRing::ContextSpecific {
317 ctx_id: completed_fence.ctx_id,
318 ring_idx: completed_fence.ring_idx,
319 },
320 };
321
322 let mut fence_state = fence_state.lock();
323 // TODO(dverkamp): use `drain_filter()` when it is stabilized
324 let mut i = 0;
325 while i < fence_state.descs.len() {
326 if fence_state.descs[i].ring == ring
327 && fence_state.descs[i].fence_id <= completed_fence.fence_id
328 {
329 let completed_desc = fence_state.descs.remove(i);
330 fence_handler_resources
331 .ctrl_queue
332 .add_used(completed_desc.desc_chain, completed_desc.len);
333 signal = true;
334 } else {
335 i += 1;
336 }
337 }
338
339 // Update the last completed fence for this context
340 fence_state
341 .completed_fences
342 .insert(ring, completed_fence.fence_id);
343 }
344
345 if signal {
346 fence_handler_resources.ctrl_queue.signal_used();
347 }
348 }
349 })
350 }
351
352 pub struct ReturnDescriptor {
353 pub desc_chain: DescriptorChain,
354 pub len: u32,
355 }
356
357 pub struct Frontend {
358 fence_state: Arc<Mutex<FenceState>>,
359 virtio_gpu: VirtioGpu,
360 }
361
362 impl Frontend {
new(virtio_gpu: VirtioGpu, fence_state: Arc<Mutex<FenceState>>) -> Frontend363 fn new(virtio_gpu: VirtioGpu, fence_state: Arc<Mutex<FenceState>>) -> Frontend {
364 Frontend {
365 fence_state,
366 virtio_gpu,
367 }
368 }
369
370 /// Returns the internal connection to the compositor and its associated state.
display(&mut self) -> &Rc<RefCell<GpuDisplay>>371 pub fn display(&mut self) -> &Rc<RefCell<GpuDisplay>> {
372 self.virtio_gpu.display()
373 }
374
375 /// Processes the internal `display` events and returns `true` if any display was closed.
process_display(&mut self) -> ProcessDisplayResult376 pub fn process_display(&mut self) -> ProcessDisplayResult {
377 self.virtio_gpu.process_display()
378 }
379
380 /// Processes incoming requests on `resource_bridge`.
process_resource_bridge(&mut self, resource_bridge: &Tube) -> anyhow::Result<()>381 pub fn process_resource_bridge(&mut self, resource_bridge: &Tube) -> anyhow::Result<()> {
382 let response = match resource_bridge.recv() {
383 Ok(ResourceRequest::GetBuffer { id }) => self.virtio_gpu.export_resource(id),
384 Ok(ResourceRequest::GetFence { seqno }) => self.virtio_gpu.export_fence(seqno),
385 Err(e) => return Err(e).context("Error receiving resource bridge request"),
386 };
387
388 resource_bridge
389 .send(&response)
390 .context("Error sending resource bridge response")?;
391
392 Ok(())
393 }
394
395 /// Processes the GPU control command and returns the result with a bool indicating if the
396 /// GPU device's config needs to be updated.
process_gpu_control_command(&mut self, cmd: GpuControlCommand) -> GpuControlResult397 pub fn process_gpu_control_command(&mut self, cmd: GpuControlCommand) -> GpuControlResult {
398 self.virtio_gpu.process_gpu_control_command(cmd)
399 }
400
process_gpu_command( &mut self, mem: &GuestMemory, cmd: GpuCommand, reader: &mut Reader, ) -> VirtioGpuResult401 fn process_gpu_command(
402 &mut self,
403 mem: &GuestMemory,
404 cmd: GpuCommand,
405 reader: &mut Reader,
406 ) -> VirtioGpuResult {
407 self.virtio_gpu.force_ctx_0();
408
409 match cmd {
410 GpuCommand::GetDisplayInfo(_) => Ok(GpuResponse::OkDisplayInfo(
411 self.virtio_gpu.display_info().to_vec(),
412 )),
413 GpuCommand::ResourceCreate2d(info) => {
414 let resource_id = info.resource_id.to_native();
415
416 let resource_create_3d = ResourceCreate3D {
417 target: RUTABAGA_PIPE_TEXTURE_2D,
418 format: info.format.to_native(),
419 bind: RUTABAGA_PIPE_BIND_RENDER_TARGET,
420 width: info.width.to_native(),
421 height: info.height.to_native(),
422 depth: 1,
423 array_size: 1,
424 last_level: 0,
425 nr_samples: 0,
426 flags: 0,
427 };
428
429 self.virtio_gpu
430 .resource_create_3d(resource_id, resource_create_3d)
431 }
432 GpuCommand::ResourceUnref(info) => {
433 self.virtio_gpu.unref_resource(info.resource_id.to_native())
434 }
435 GpuCommand::SetScanout(info) => self.virtio_gpu.set_scanout(
436 info.scanout_id.to_native(),
437 info.resource_id.to_native(),
438 None,
439 ),
440 GpuCommand::ResourceFlush(info) => {
441 self.virtio_gpu.flush_resource(info.resource_id.to_native())
442 }
443 GpuCommand::TransferToHost2d(info) => {
444 let resource_id = info.resource_id.to_native();
445 let transfer = Transfer3D::new_2d(
446 info.r.x.to_native(),
447 info.r.y.to_native(),
448 info.r.width.to_native(),
449 info.r.height.to_native(),
450 );
451 self.virtio_gpu.transfer_write(0, resource_id, transfer)
452 }
453 GpuCommand::ResourceAttachBacking(info) => {
454 let available_bytes = reader.available_bytes();
455 if available_bytes != 0 {
456 let entry_count = info.nr_entries.to_native() as usize;
457 let mut vecs = Vec::with_capacity(entry_count);
458 for _ in 0..entry_count {
459 match reader.read_obj::<virtio_gpu_mem_entry>() {
460 Ok(entry) => {
461 let addr = GuestAddress(entry.addr.to_native());
462 let len = entry.length.to_native() as usize;
463 vecs.push((addr, len))
464 }
465 Err(_) => return Err(GpuResponse::ErrUnspec),
466 }
467 }
468 self.virtio_gpu
469 .attach_backing(info.resource_id.to_native(), mem, vecs)
470 } else {
471 error!("missing data for command {:?}", cmd);
472 Err(GpuResponse::ErrUnspec)
473 }
474 }
475 GpuCommand::ResourceDetachBacking(info) => {
476 self.virtio_gpu.detach_backing(info.resource_id.to_native())
477 }
478 GpuCommand::UpdateCursor(info) => self.virtio_gpu.update_cursor(
479 info.resource_id.to_native(),
480 info.pos.scanout_id.to_native(),
481 info.pos.x.into(),
482 info.pos.y.into(),
483 ),
484 GpuCommand::MoveCursor(info) => self.virtio_gpu.move_cursor(
485 info.pos.scanout_id.to_native(),
486 info.pos.x.into(),
487 info.pos.y.into(),
488 ),
489 GpuCommand::ResourceAssignUuid(info) => {
490 let resource_id = info.resource_id.to_native();
491 self.virtio_gpu.resource_assign_uuid(resource_id)
492 }
493 GpuCommand::GetCapsetInfo(info) => self
494 .virtio_gpu
495 .get_capset_info(info.capset_index.to_native()),
496 GpuCommand::GetCapset(info) => self
497 .virtio_gpu
498 .get_capset(info.capset_id.to_native(), info.capset_version.to_native()),
499 GpuCommand::CtxCreate(info) => {
500 let context_name: Option<String> = String::from_utf8(info.debug_name.to_vec()).ok();
501 self.virtio_gpu.create_context(
502 info.hdr.ctx_id.to_native(),
503 info.context_init.to_native(),
504 context_name.as_deref(),
505 )
506 }
507 GpuCommand::CtxDestroy(info) => {
508 self.virtio_gpu.destroy_context(info.hdr.ctx_id.to_native())
509 }
510 GpuCommand::CtxAttachResource(info) => self
511 .virtio_gpu
512 .context_attach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
513 GpuCommand::CtxDetachResource(info) => self
514 .virtio_gpu
515 .context_detach_resource(info.hdr.ctx_id.to_native(), info.resource_id.to_native()),
516 GpuCommand::ResourceCreate3d(info) => {
517 let resource_id = info.resource_id.to_native();
518 let resource_create_3d = ResourceCreate3D {
519 target: info.target.to_native(),
520 format: info.format.to_native(),
521 bind: info.bind.to_native(),
522 width: info.width.to_native(),
523 height: info.height.to_native(),
524 depth: info.depth.to_native(),
525 array_size: info.array_size.to_native(),
526 last_level: info.last_level.to_native(),
527 nr_samples: info.nr_samples.to_native(),
528 flags: info.flags.to_native(),
529 };
530
531 self.virtio_gpu
532 .resource_create_3d(resource_id, resource_create_3d)
533 }
534 GpuCommand::TransferToHost3d(info) => {
535 let ctx_id = info.hdr.ctx_id.to_native();
536 let resource_id = info.resource_id.to_native();
537
538 let transfer = Transfer3D {
539 x: info.box_.x.to_native(),
540 y: info.box_.y.to_native(),
541 z: info.box_.z.to_native(),
542 w: info.box_.w.to_native(),
543 h: info.box_.h.to_native(),
544 d: info.box_.d.to_native(),
545 level: info.level.to_native(),
546 stride: info.stride.to_native(),
547 layer_stride: info.layer_stride.to_native(),
548 offset: info.offset.to_native(),
549 };
550
551 self.virtio_gpu
552 .transfer_write(ctx_id, resource_id, transfer)
553 }
554 GpuCommand::TransferFromHost3d(info) => {
555 let ctx_id = info.hdr.ctx_id.to_native();
556 let resource_id = info.resource_id.to_native();
557
558 let transfer = Transfer3D {
559 x: info.box_.x.to_native(),
560 y: info.box_.y.to_native(),
561 z: info.box_.z.to_native(),
562 w: info.box_.w.to_native(),
563 h: info.box_.h.to_native(),
564 d: info.box_.d.to_native(),
565 level: info.level.to_native(),
566 stride: info.stride.to_native(),
567 layer_stride: info.layer_stride.to_native(),
568 offset: info.offset.to_native(),
569 };
570
571 self.virtio_gpu
572 .transfer_read(ctx_id, resource_id, transfer, None)
573 }
574 GpuCommand::CmdSubmit3d(info) => {
575 if reader.available_bytes() != 0 {
576 let num_in_fences = info.num_in_fences.to_native() as usize;
577 let cmd_size = info.size.to_native() as usize;
578 let mut cmd_buf = vec![0; cmd_size];
579 let mut fence_ids: Vec<u64> = Vec::with_capacity(num_in_fences);
580 let ctx_id = info.hdr.ctx_id.to_native();
581
582 for _ in 0..num_in_fences {
583 match reader.read_obj::<Le64>() {
584 Ok(fence_id) => {
585 fence_ids.push(fence_id.to_native());
586 }
587 Err(_) => return Err(GpuResponse::ErrUnspec),
588 }
589 }
590
591 if reader.read_exact(&mut cmd_buf[..]).is_ok() {
592 self.virtio_gpu
593 .submit_command(ctx_id, &mut cmd_buf[..], &fence_ids[..])
594 } else {
595 Err(GpuResponse::ErrInvalidParameter)
596 }
597 } else {
598 // Silently accept empty command buffers to allow for
599 // benchmarking.
600 Ok(GpuResponse::OkNoData)
601 }
602 }
603 GpuCommand::ResourceCreateBlob(info) => {
604 let resource_id = info.resource_id.to_native();
605 let ctx_id = info.hdr.ctx_id.to_native();
606
607 let resource_create_blob = ResourceCreateBlob {
608 blob_mem: info.blob_mem.to_native(),
609 blob_flags: info.blob_flags.to_native(),
610 blob_id: info.blob_id.to_native(),
611 size: info.size.to_native(),
612 };
613
614 let entry_count = info.nr_entries.to_native();
615 if reader.available_bytes() == 0 && entry_count > 0 {
616 return Err(GpuResponse::ErrUnspec);
617 }
618
619 let mut vecs = Vec::with_capacity(entry_count as usize);
620 for _ in 0..entry_count {
621 match reader.read_obj::<virtio_gpu_mem_entry>() {
622 Ok(entry) => {
623 let addr = GuestAddress(entry.addr.to_native());
624 let len = entry.length.to_native() as usize;
625 vecs.push((addr, len))
626 }
627 Err(_) => return Err(GpuResponse::ErrUnspec),
628 }
629 }
630
631 self.virtio_gpu.resource_create_blob(
632 ctx_id,
633 resource_id,
634 resource_create_blob,
635 vecs,
636 mem,
637 )
638 }
639 GpuCommand::SetScanoutBlob(info) => {
640 let scanout_id = info.scanout_id.to_native();
641 let resource_id = info.resource_id.to_native();
642 let virtio_gpu_format = info.format.to_native();
643 let width = info.width.to_native();
644 let height = info.height.to_native();
645 let mut strides: [u32; 4] = [0; 4];
646 let mut offsets: [u32; 4] = [0; 4];
647
648 // As of v4.19, virtio-gpu kms only really uses these formats. If that changes,
649 // the following may have to change too.
650 let drm_format = match virtio_gpu_format {
651 VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM => DrmFormat::new(b'X', b'R', b'2', b'4'),
652 VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM => DrmFormat::new(b'A', b'R', b'2', b'4'),
653 _ => {
654 error!("unrecognized virtio-gpu format {}", virtio_gpu_format);
655 return Err(GpuResponse::ErrUnspec);
656 }
657 };
658
659 for plane_index in 0..PLANE_INFO_MAX_COUNT {
660 offsets[plane_index] = info.offsets[plane_index].to_native();
661 strides[plane_index] = info.strides[plane_index].to_native();
662 }
663
664 let scanout = VirtioScanoutBlobData {
665 width,
666 height,
667 drm_format,
668 strides,
669 offsets,
670 };
671
672 self.virtio_gpu
673 .set_scanout(scanout_id, resource_id, Some(scanout))
674 }
675 GpuCommand::ResourceMapBlob(info) => {
676 let resource_id = info.resource_id.to_native();
677 let offset = info.offset.to_native();
678 self.virtio_gpu.resource_map_blob(resource_id, offset)
679 }
680 GpuCommand::ResourceUnmapBlob(info) => {
681 let resource_id = info.resource_id.to_native();
682 self.virtio_gpu.resource_unmap_blob(resource_id)
683 }
684 GpuCommand::GetEdid(info) => self.virtio_gpu.get_edid(info.scanout.to_native()),
685 }
686 }
687
688 /// Processes virtio messages on `queue`.
process_queue(&mut self, mem: &GuestMemory, queue: &dyn QueueReader) -> bool689 pub fn process_queue(&mut self, mem: &GuestMemory, queue: &dyn QueueReader) -> bool {
690 let mut signal_used = false;
691 while let Some(desc) = queue.pop() {
692 if let Some(ret_desc) = self.process_descriptor(mem, desc) {
693 queue.add_used(ret_desc.desc_chain, ret_desc.len);
694 signal_used = true;
695 }
696 }
697
698 signal_used
699 }
700
process_descriptor( &mut self, mem: &GuestMemory, mut desc_chain: DescriptorChain, ) -> Option<ReturnDescriptor>701 fn process_descriptor(
702 &mut self,
703 mem: &GuestMemory,
704 mut desc_chain: DescriptorChain,
705 ) -> Option<ReturnDescriptor> {
706 let reader = &mut desc_chain.reader;
707 let writer = &mut desc_chain.writer;
708 let mut resp = Err(GpuResponse::ErrUnspec);
709 let mut gpu_cmd = None;
710 let mut len = 0;
711 match GpuCommand::decode(reader) {
712 Ok(cmd) => {
713 resp = self.process_gpu_command(mem, cmd, reader);
714 gpu_cmd = Some(cmd);
715 }
716 Err(e) => debug!("descriptor decode error: {}", e),
717 }
718
719 let mut gpu_response = match resp {
720 Ok(gpu_response) => gpu_response,
721 Err(gpu_response) => {
722 if let Some(gpu_cmd) = gpu_cmd {
723 error!(
724 "error processing gpu command {:?}: {:?}",
725 gpu_cmd, gpu_response
726 );
727 }
728 gpu_response
729 }
730 };
731
732 if writer.available_bytes() != 0 {
733 let mut fence_id = 0;
734 let mut ctx_id = 0;
735 let mut flags = 0;
736 let mut ring_idx = 0;
737 if let Some(cmd) = gpu_cmd {
738 let ctrl_hdr = cmd.ctrl_hdr();
739 if ctrl_hdr.flags.to_native() & VIRTIO_GPU_FLAG_FENCE != 0 {
740 flags = ctrl_hdr.flags.to_native();
741 fence_id = ctrl_hdr.fence_id.to_native();
742 ctx_id = ctrl_hdr.ctx_id.to_native();
743 ring_idx = ctrl_hdr.ring_idx;
744
745 let fence = RutabagaFence {
746 flags,
747 fence_id,
748 ctx_id,
749 ring_idx,
750 };
751 gpu_response = match self.virtio_gpu.create_fence(fence) {
752 Ok(_) => gpu_response,
753 Err(fence_resp) => {
754 warn!("create_fence {} -> {:?}", fence_id, fence_resp);
755 fence_resp
756 }
757 };
758 }
759 }
760
761 // Prepare the response now, even if it is going to wait until
762 // fence is complete.
763 match gpu_response.encode(flags, fence_id, ctx_id, ring_idx, writer) {
764 Ok(l) => len = l,
765 Err(e) => debug!("ctrl queue response encode error: {}", e),
766 }
767
768 if flags & VIRTIO_GPU_FLAG_FENCE != 0 {
769 let ring = match flags & VIRTIO_GPU_FLAG_INFO_RING_IDX {
770 0 => VirtioGpuRing::Global,
771 _ => VirtioGpuRing::ContextSpecific { ctx_id, ring_idx },
772 };
773
774 // In case the fence is signaled immediately after creation, don't add a return
775 // FenceDescriptor.
776 let mut fence_state = self.fence_state.lock();
777 if fence_id > *fence_state.completed_fences.get(&ring).unwrap_or(&0) {
778 fence_state.descs.push(FenceDescriptor {
779 ring,
780 fence_id,
781 desc_chain,
782 len,
783 });
784
785 return None;
786 }
787 }
788
789 // No fence (or already completed fence), respond now.
790 }
791 Some(ReturnDescriptor { desc_chain, len })
792 }
793
event_poll(&self)794 pub fn event_poll(&self) {
795 self.virtio_gpu.event_poll();
796 }
797 }
798
799 #[derive(EventToken, PartialEq, Eq, Clone, Copy, Debug)]
800 enum WorkerToken {
801 CtrlQueue,
802 CursorQueue,
803 Display,
804 GpuControl,
805 InterruptResample,
806 Kill,
807 ResourceBridge {
808 index: usize,
809 },
810 VirtioGpuPoll,
811 #[cfg(windows)]
812 DisplayDescriptorRequest,
813 }
814
815 struct EventManager<'a> {
816 pub wait_ctx: WaitContext<WorkerToken>,
817 events: Vec<(&'a dyn AsRawDescriptor, WorkerToken)>,
818 }
819
820 impl<'a> EventManager<'a> {
new() -> Result<EventManager<'a>>821 pub fn new() -> Result<EventManager<'a>> {
822 Ok(EventManager {
823 wait_ctx: WaitContext::new()?,
824 events: vec![],
825 })
826 }
827
build_with( triggers: &[(&'a dyn AsRawDescriptor, WorkerToken)], ) -> Result<EventManager<'a>>828 pub fn build_with(
829 triggers: &[(&'a dyn AsRawDescriptor, WorkerToken)],
830 ) -> Result<EventManager<'a>> {
831 let mut manager = EventManager::new()?;
832 manager.wait_ctx.add_many(triggers)?;
833
834 for (descriptor, token) in triggers {
835 manager.events.push((*descriptor, *token));
836 }
837 Ok(manager)
838 }
839
add(&mut self, descriptor: &'a dyn AsRawDescriptor, token: WorkerToken) -> Result<()>840 pub fn add(&mut self, descriptor: &'a dyn AsRawDescriptor, token: WorkerToken) -> Result<()> {
841 self.wait_ctx.add(descriptor, token)?;
842 self.events.push((descriptor, token));
843 Ok(())
844 }
845
delete(&mut self, token: WorkerToken)846 pub fn delete(&mut self, token: WorkerToken) {
847 self.events.retain(|event| {
848 if event.1 == token {
849 self.wait_ctx.delete(event.0).ok();
850 return false;
851 }
852 true
853 });
854 }
855 }
856
857 struct Worker {
858 interrupt: Interrupt,
859 exit_evt_wrtube: SendTube,
860 gpu_control_tube: Tube,
861 mem: GuestMemory,
862 ctrl_queue: SharedQueueReader,
863 cursor_queue: LocalQueueReader,
864 resource_bridges: ResourceBridges,
865 kill_evt: Event,
866 state: Frontend,
867 #[cfg(windows)]
868 gpu_display_wait_descriptor_ctrl_rd: RecvTube,
869 }
870
871 struct WorkerReturn {
872 gpu_control_tube: Tube,
873 resource_bridges: ResourceBridges,
874 event_devices: Vec<EventDevice>,
875 // None if device not yet activated.
876 activated_state: Option<(Vec<Queue>, WorkerSnapshot)>,
877 }
878
879 #[derive(Serialize, Deserialize)]
880 struct WorkerSnapshot {
881 fence_state_snapshot: FenceStateSnapshot,
882 virtio_gpu_snapshot: VirtioGpuSnapshot,
883 }
884
885 impl Worker {
run(&mut self)886 fn run(&mut self) {
887 let display_desc =
888 match SafeDescriptor::try_from(&*self.state.display().borrow() as &dyn AsRawDescriptor)
889 {
890 Ok(v) => v,
891 Err(e) => {
892 error!("failed getting event descriptor for display: {}", e);
893 return;
894 }
895 };
896
897 let ctrl_evt = self
898 .ctrl_queue
899 .queue
900 .lock()
901 .event()
902 .try_clone()
903 .expect("failed to clone queue event");
904 let cursor_evt = self
905 .cursor_queue
906 .queue
907 .borrow()
908 .event()
909 .try_clone()
910 .expect("failed to clone queue event");
911
912 let mut event_manager = match EventManager::build_with(&[
913 (&ctrl_evt, WorkerToken::CtrlQueue),
914 (&cursor_evt, WorkerToken::CursorQueue),
915 (&display_desc, WorkerToken::Display),
916 (
917 self.gpu_control_tube.get_read_notifier(),
918 WorkerToken::GpuControl,
919 ),
920 (&self.kill_evt, WorkerToken::Kill),
921 #[cfg(windows)]
922 (
923 self.gpu_display_wait_descriptor_ctrl_rd.get_read_notifier(),
924 WorkerToken::DisplayDescriptorRequest,
925 ),
926 ]) {
927 Ok(v) => v,
928 Err(e) => {
929 error!("failed creating WaitContext: {}", e);
930 return;
931 }
932 };
933
934 if let Some(resample_evt) = self.interrupt.get_resample_evt() {
935 if let Err(e) = event_manager.add(resample_evt, WorkerToken::InterruptResample) {
936 error!(
937 "failed adding interrupt resample event to WaitContext: {}",
938 e
939 );
940 return;
941 }
942 }
943
944 let poll_desc: SafeDescriptor;
945 if let Some(desc) = self.state.virtio_gpu.poll_descriptor() {
946 poll_desc = desc;
947 if let Err(e) = event_manager.add(&poll_desc, WorkerToken::VirtioGpuPoll) {
948 error!("failed adding poll event to WaitContext: {}", e);
949 return;
950 }
951 }
952
953 self.resource_bridges
954 .add_to_wait_context(&mut event_manager.wait_ctx);
955
956 // TODO(davidriley): The entire main loop processing is somewhat racey and incorrect with
957 // respect to cursor vs control queue processing. As both currently and originally
958 // written, while the control queue is only processed/read from after the the cursor queue
959 // is finished, the entire queue will be processed at that time. The end effect of this
960 // racyiness is that control queue descriptors that are issued after cursors descriptors
961 // might be handled first instead of the other way around. In practice, the cursor queue
962 // isn't used so this isn't a huge issue.
963
964 'wait: loop {
965 let events = match event_manager.wait_ctx.wait() {
966 Ok(v) => v,
967 Err(e) => {
968 error!("failed polling for events: {}", e);
969 break;
970 }
971 };
972 let mut signal_used_cursor = false;
973 let mut signal_used_ctrl = false;
974 let mut ctrl_available = false;
975 let mut display_available = false;
976 let mut needs_config_interrupt = false;
977
978 // Remove event triggers that have been hung-up to prevent unnecessary worker wake-ups
979 // (see b/244486346#comment62 for context).
980 for event in events.iter().filter(|e| e.is_hungup) {
981 error!(
982 "unhandled virtio-gpu worker event hang-up detected: {:?}",
983 event.token
984 );
985 event_manager.delete(event.token);
986 }
987
988 for event in events.iter().filter(|e| e.is_readable) {
989 match event.token {
990 WorkerToken::CtrlQueue => {
991 let _ = ctrl_evt.wait();
992 // Set flag that control queue is available to be read, but defer reading
993 // until rest of the events are processed.
994 ctrl_available = true;
995 }
996 WorkerToken::CursorQueue => {
997 let _ = cursor_evt.wait();
998 if self.state.process_queue(&self.mem, &self.cursor_queue) {
999 signal_used_cursor = true;
1000 }
1001 }
1002 WorkerToken::Display => {
1003 // We only need to process_display once-per-wake, regardless of how many
1004 // WorkerToken::Display events are received.
1005 display_available = true;
1006 }
1007 #[cfg(windows)]
1008 WorkerToken::DisplayDescriptorRequest => {
1009 if let Ok(req) = self
1010 .gpu_display_wait_descriptor_ctrl_rd
1011 .recv::<ModifyWaitContext>()
1012 {
1013 match req {
1014 ModifyWaitContext::Add(desc) => {
1015 if let Err(e) =
1016 event_manager.wait_ctx.add(&desc, WorkerToken::Display)
1017 {
1018 error!(
1019 "failed to add extra descriptor from display \
1020 to GPU worker wait context: {:?}",
1021 e
1022 )
1023 }
1024 }
1025 }
1026 } else {
1027 error!("failed to receive ModifyWaitContext request.")
1028 }
1029 }
1030 WorkerToken::GpuControl => {
1031 let req = match self.gpu_control_tube.recv() {
1032 Ok(req) => req,
1033 Err(e) => {
1034 error!("gpu control socket failed recv: {:?}", e);
1035 break 'wait;
1036 }
1037 };
1038
1039 let resp = self.state.process_gpu_control_command(req);
1040
1041 if let GpuControlResult::DisplaysUpdated = resp {
1042 needs_config_interrupt = true;
1043 }
1044
1045 if let Err(e) = self.gpu_control_tube.send(&resp) {
1046 error!("display control socket failed send: {}", e);
1047 break 'wait;
1048 }
1049 }
1050 WorkerToken::ResourceBridge { index } => {
1051 self.resource_bridges.set_should_process(index);
1052 }
1053 WorkerToken::InterruptResample => {
1054 self.interrupt.interrupt_resample();
1055 }
1056 WorkerToken::VirtioGpuPoll => {
1057 self.state.event_poll();
1058 }
1059 WorkerToken::Kill => {
1060 break 'wait;
1061 }
1062 }
1063 }
1064
1065 if display_available {
1066 match self.state.process_display() {
1067 ProcessDisplayResult::CloseRequested => {
1068 let _ = self.exit_evt_wrtube.send::<VmEventType>(&VmEventType::Exit);
1069 }
1070 ProcessDisplayResult::Error(_e) => {
1071 base::error!("Display processing failed, disabling display event handler.");
1072 event_manager.delete(WorkerToken::Display);
1073 }
1074 ProcessDisplayResult::Success => (),
1075 };
1076 }
1077
1078 if ctrl_available && self.state.process_queue(&self.mem, &self.ctrl_queue) {
1079 signal_used_ctrl = true;
1080 }
1081
1082 // Process the entire control queue before the resource bridge in case a resource is
1083 // created or destroyed by the control queue. Processing the resource bridge first may
1084 // lead to a race condition.
1085 // TODO(davidriley): This is still inherently racey if both the control queue request
1086 // and the resource bridge request come in at the same time after the control queue is
1087 // processed above and before the corresponding bridge is processed below.
1088 self.resource_bridges
1089 .process_resource_bridges(&mut self.state, &mut event_manager.wait_ctx);
1090
1091 if signal_used_ctrl {
1092 self.ctrl_queue.signal_used();
1093 }
1094
1095 if signal_used_cursor {
1096 self.cursor_queue.signal_used();
1097 }
1098
1099 if needs_config_interrupt {
1100 self.interrupt.signal_config_changed();
1101 }
1102 }
1103 }
1104 }
1105
1106 /// Indicates a backend that should be tried for the gpu to use for display.
1107 ///
1108 /// Several instances of this enum are used in an ordered list to give the gpu device many backends
1109 /// to use as fallbacks in case some do not work.
1110 #[derive(Clone)]
1111 pub enum DisplayBackend {
1112 #[cfg(any(target_os = "android", target_os = "linux"))]
1113 /// Use the wayland backend with the given socket path if given.
1114 Wayland(Option<PathBuf>),
1115 #[cfg(any(target_os = "android", target_os = "linux"))]
1116 /// Open a connection to the X server at the given display if given.
1117 X(Option<String>),
1118 /// Emulate a display without actually displaying it.
1119 Stub,
1120 #[cfg(windows)]
1121 /// Open a window using WinAPI.
1122 WinApi,
1123 #[cfg(feature = "android_display")]
1124 /// The display buffer is backed by an Android surface. The surface is set via an AIDL service
1125 /// that the backend hosts. Currently, the AIDL service is registered to the service manager
1126 /// using the name given here. The entity holding the surface is expected to locate the service
1127 /// via this name, and pass the surface to it.
1128 Android(String),
1129 }
1130
1131 impl DisplayBackend {
build( &self, #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>, #[cfg(windows)] gpu_display_wait_descriptor_ctrl: SendTube, ) -> std::result::Result<GpuDisplay, GpuDisplayError>1132 fn build(
1133 &self,
1134 #[cfg(windows)] wndproc_thread: &mut Option<WindowProcedureThread>,
1135 #[cfg(windows)] gpu_display_wait_descriptor_ctrl: SendTube,
1136 ) -> std::result::Result<GpuDisplay, GpuDisplayError> {
1137 match self {
1138 #[cfg(any(target_os = "android", target_os = "linux"))]
1139 DisplayBackend::Wayland(path) => GpuDisplay::open_wayland(path.as_ref()),
1140 #[cfg(any(target_os = "android", target_os = "linux"))]
1141 DisplayBackend::X(display) => GpuDisplay::open_x(display.as_deref()),
1142 DisplayBackend::Stub => GpuDisplay::open_stub(),
1143 #[cfg(windows)]
1144 DisplayBackend::WinApi => match wndproc_thread.take() {
1145 Some(wndproc_thread) => GpuDisplay::open_winapi(
1146 wndproc_thread,
1147 /* win_metrics= */ None,
1148 gpu_display_wait_descriptor_ctrl,
1149 None,
1150 ),
1151 None => {
1152 error!("wndproc_thread is none");
1153 Err(GpuDisplayError::Allocate)
1154 }
1155 },
1156 #[cfg(feature = "android_display")]
1157 DisplayBackend::Android(service_name) => GpuDisplay::open_android(service_name),
1158 }
1159 }
1160 }
1161
1162 /// Resources that are not available until the device is activated.
1163 struct GpuActivationResources {
1164 mem: GuestMemory,
1165 interrupt: Interrupt,
1166 ctrl_queue: SharedQueueReader,
1167 cursor_queue: LocalQueueReader,
1168 worker_snapshot: Option<WorkerSnapshot>,
1169 }
1170
1171 pub struct Gpu {
1172 exit_evt_wrtube: SendTube,
1173 pub gpu_control_tube: Option<Tube>,
1174 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
1175 resource_bridges: Option<ResourceBridges>,
1176 event_devices: Option<Vec<EventDevice>>,
1177 // The worker thread + a channel used to activate it.
1178 // NOTE: The worker thread doesn't respond to `WorkerThread::stop` when in the pre-activate
1179 // phase. You must drop the channel first. That is also why the channel is first in the tuple
1180 // (tuple members are dropped in order).
1181 worker_thread: Option<(
1182 mpsc::Sender<GpuActivationResources>,
1183 WorkerThread<WorkerReturn>,
1184 )>,
1185 display_backends: Vec<DisplayBackend>,
1186 display_params: Vec<GpuDisplayParameters>,
1187 display_event: Arc<AtomicBool>,
1188 rutabaga_builder: RutabagaBuilder,
1189 pci_address: Option<PciAddress>,
1190 pci_bar_size: u64,
1191 external_blob: bool,
1192 fixed_blob_mapping: bool,
1193 rutabaga_component: RutabagaComponentType,
1194 #[cfg(windows)]
1195 wndproc_thread: Option<WindowProcedureThread>,
1196 base_features: u64,
1197 udmabuf: bool,
1198 rutabaga_server_descriptor: Option<SafeDescriptor>,
1199 #[cfg(windows)]
1200 /// Because the Windows GpuDisplay can't expose an epollfd, it has to inform the GPU worker
1201 /// which descriptors to add to its wait context. That's what this Tube is used for (it is
1202 /// provided to each display backend.
1203 gpu_display_wait_descriptor_ctrl_wr: SendTube,
1204 #[cfg(windows)]
1205 /// The GPU worker uses this Tube to receive the descriptors that should be added to its wait
1206 /// context.
1207 gpu_display_wait_descriptor_ctrl_rd: Option<RecvTube>,
1208 capset_mask: u64,
1209 #[cfg(any(target_os = "android", target_os = "linux"))]
1210 gpu_cgroup_path: Option<PathBuf>,
1211 /// Used to differentiate worker kill events that are for shutdown vs sleep. `virtio_sleep`
1212 /// sets this to true while stopping the worker.
1213 sleep_requested: Arc<AtomicBool>,
1214 worker_snapshot: Option<WorkerSnapshot>,
1215 }
1216
1217 impl Gpu {
new( exit_evt_wrtube: SendTube, gpu_control_tube: Tube, resource_bridges: Vec<Tube>, display_backends: Vec<DisplayBackend>, gpu_parameters: &GpuParameters, rutabaga_server_descriptor: Option<SafeDescriptor>, event_devices: Vec<EventDevice>, base_features: u64, channels: &BTreeMap<String, PathBuf>, #[cfg(windows)] wndproc_thread: WindowProcedureThread, #[cfg(any(target_os = "android", target_os = "linux"))] gpu_cgroup_path: Option<&PathBuf>, ) -> Gpu1218 pub fn new(
1219 exit_evt_wrtube: SendTube,
1220 gpu_control_tube: Tube,
1221 resource_bridges: Vec<Tube>,
1222 display_backends: Vec<DisplayBackend>,
1223 gpu_parameters: &GpuParameters,
1224 rutabaga_server_descriptor: Option<SafeDescriptor>,
1225 event_devices: Vec<EventDevice>,
1226 base_features: u64,
1227 channels: &BTreeMap<String, PathBuf>,
1228 #[cfg(windows)] wndproc_thread: WindowProcedureThread,
1229 #[cfg(any(target_os = "android", target_os = "linux"))] gpu_cgroup_path: Option<&PathBuf>,
1230 ) -> Gpu {
1231 let mut display_params = gpu_parameters.display_params.clone();
1232 if display_params.is_empty() {
1233 display_params.push(Default::default());
1234 }
1235 let (display_width, display_height) = display_params[0].get_virtual_display_size();
1236
1237 let mut rutabaga_channels: Vec<RutabagaChannel> = Vec::new();
1238 for (channel_name, path) in channels {
1239 match &channel_name[..] {
1240 "" => rutabaga_channels.push(RutabagaChannel {
1241 base_channel: path.clone(),
1242 channel_type: RUTABAGA_CHANNEL_TYPE_WAYLAND,
1243 }),
1244 "mojo" => rutabaga_channels.push(RutabagaChannel {
1245 base_channel: path.clone(),
1246 channel_type: RUTABAGA_CHANNEL_TYPE_CAMERA,
1247 }),
1248 _ => error!("unknown rutabaga channel"),
1249 }
1250 }
1251
1252 let rutabaga_channels_opt = Some(rutabaga_channels);
1253 let component = match gpu_parameters.mode {
1254 GpuMode::Mode2D => RutabagaComponentType::Rutabaga2D,
1255 #[cfg(feature = "virgl_renderer")]
1256 GpuMode::ModeVirglRenderer => RutabagaComponentType::VirglRenderer,
1257 #[cfg(feature = "gfxstream")]
1258 GpuMode::ModeGfxstream => RutabagaComponentType::Gfxstream,
1259 };
1260
1261 // Only allow virglrenderer to fork its own render server when explicitly requested.
1262 // Caller can enforce its own restrictions (e.g. not allowed when sandboxed) and set the
1263 // allow flag appropriately.
1264 let use_render_server = rutabaga_server_descriptor.is_some()
1265 || gpu_parameters.allow_implicit_render_server_exec;
1266
1267 let rutabaga_wsi = match gpu_parameters.wsi {
1268 Some(GpuWsi::Vulkan) => RutabagaWsi::VulkanSwapchain,
1269 _ => RutabagaWsi::Surfaceless,
1270 };
1271
1272 let rutabaga_builder = RutabagaBuilder::new(component, gpu_parameters.capset_mask)
1273 .set_display_width(display_width)
1274 .set_display_height(display_height)
1275 .set_rutabaga_channels(rutabaga_channels_opt)
1276 .set_use_egl(gpu_parameters.renderer_use_egl)
1277 .set_use_gles(gpu_parameters.renderer_use_gles)
1278 .set_use_glx(gpu_parameters.renderer_use_glx)
1279 .set_use_surfaceless(gpu_parameters.renderer_use_surfaceless)
1280 .set_use_vulkan(gpu_parameters.use_vulkan.unwrap_or_default())
1281 .set_wsi(rutabaga_wsi)
1282 .set_use_external_blob(gpu_parameters.external_blob)
1283 .set_use_system_blob(gpu_parameters.system_blob)
1284 .set_use_render_server(use_render_server)
1285 .set_renderer_features(gpu_parameters.renderer_features.clone());
1286
1287 #[cfg(windows)]
1288 let (gpu_display_wait_descriptor_ctrl_wr, gpu_display_wait_descriptor_ctrl_rd) =
1289 Tube::directional_pair().expect("failed to create wait descriptor control pair.");
1290
1291 Gpu {
1292 exit_evt_wrtube,
1293 gpu_control_tube: Some(gpu_control_tube),
1294 mapper: Arc::new(Mutex::new(None)),
1295 resource_bridges: Some(ResourceBridges::new(resource_bridges)),
1296 event_devices: Some(event_devices),
1297 worker_thread: None,
1298 display_backends,
1299 display_params,
1300 display_event: Arc::new(AtomicBool::new(false)),
1301 rutabaga_builder,
1302 pci_address: gpu_parameters.pci_address,
1303 pci_bar_size: gpu_parameters.pci_bar_size,
1304 external_blob: gpu_parameters.external_blob,
1305 fixed_blob_mapping: gpu_parameters.fixed_blob_mapping,
1306 rutabaga_component: component,
1307 #[cfg(windows)]
1308 wndproc_thread: Some(wndproc_thread),
1309 base_features,
1310 udmabuf: gpu_parameters.udmabuf,
1311 rutabaga_server_descriptor,
1312 #[cfg(windows)]
1313 gpu_display_wait_descriptor_ctrl_wr,
1314 #[cfg(windows)]
1315 gpu_display_wait_descriptor_ctrl_rd: Some(gpu_display_wait_descriptor_ctrl_rd),
1316 capset_mask: gpu_parameters.capset_mask,
1317 #[cfg(any(target_os = "android", target_os = "linux"))]
1318 gpu_cgroup_path: gpu_cgroup_path.cloned(),
1319 sleep_requested: Arc::new(AtomicBool::new(false)),
1320 worker_snapshot: None,
1321 }
1322 }
1323
1324 /// Initializes the internal device state so that it can begin processing virtqueues.
1325 ///
1326 /// Only used by vhost-user GPU.
initialize_frontend( &mut self, fence_state: Arc<Mutex<FenceState>>, fence_handler: RutabagaFenceHandler, mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>, ) -> Option<Frontend>1327 pub fn initialize_frontend(
1328 &mut self,
1329 fence_state: Arc<Mutex<FenceState>>,
1330 fence_handler: RutabagaFenceHandler,
1331 mapper: Arc<Mutex<Option<Box<dyn SharedMemoryMapper>>>>,
1332 ) -> Option<Frontend> {
1333 let rutabaga_server_descriptor = self.rutabaga_server_descriptor.as_ref().map(|d| {
1334 to_rutabaga_descriptor(d.try_clone().expect("failed to clone server descriptor"))
1335 });
1336 let rutabaga = self
1337 .rutabaga_builder
1338 .clone()
1339 .build(fence_handler, rutabaga_server_descriptor)
1340 .map_err(|e| error!("failed to build rutabaga {}", e))
1341 .ok()?;
1342
1343 let mut virtio_gpu = build(
1344 &self.display_backends,
1345 self.display_params.clone(),
1346 self.display_event.clone(),
1347 rutabaga,
1348 mapper,
1349 self.external_blob,
1350 self.fixed_blob_mapping,
1351 #[cfg(windows)]
1352 &mut self.wndproc_thread,
1353 self.udmabuf,
1354 #[cfg(windows)]
1355 self.gpu_display_wait_descriptor_ctrl_wr
1356 .try_clone()
1357 .expect("failed to clone wait context control channel"),
1358 )?;
1359
1360 for event_device in self.event_devices.take().expect("missing event_devices") {
1361 virtio_gpu
1362 .import_event_device(event_device)
1363 // We lost the `EventDevice`, so fail hard.
1364 .expect("failed to import event device");
1365 }
1366
1367 Some(Frontend::new(virtio_gpu, fence_state))
1368 }
1369
1370 // This is not invoked when running with vhost-user GPU.
start_worker_thread(&mut self)1371 fn start_worker_thread(&mut self) {
1372 let exit_evt_wrtube = self
1373 .exit_evt_wrtube
1374 .try_clone()
1375 .context("error cloning exit tube")
1376 .unwrap();
1377
1378 let gpu_control_tube = self
1379 .gpu_control_tube
1380 .take()
1381 .context("gpu_control_tube is none")
1382 .unwrap();
1383
1384 let resource_bridges = self
1385 .resource_bridges
1386 .take()
1387 .context("resource_bridges is none")
1388 .unwrap();
1389
1390 let display_backends = self.display_backends.clone();
1391 let display_params = self.display_params.clone();
1392 let display_event = self.display_event.clone();
1393 let event_devices = self.event_devices.take().expect("missing event_devices");
1394 let external_blob = self.external_blob;
1395 let fixed_blob_mapping = self.fixed_blob_mapping;
1396 let udmabuf = self.udmabuf;
1397 let fence_state = Arc::new(Mutex::new(Default::default()));
1398
1399 #[cfg(windows)]
1400 let mut wndproc_thread = self.wndproc_thread.take();
1401
1402 #[cfg(windows)]
1403 let gpu_display_wait_descriptor_ctrl_wr = self
1404 .gpu_display_wait_descriptor_ctrl_wr
1405 .try_clone()
1406 .expect("failed to clone wait context ctrl channel");
1407
1408 #[cfg(windows)]
1409 let gpu_display_wait_descriptor_ctrl_rd = self
1410 .gpu_display_wait_descriptor_ctrl_rd
1411 .take()
1412 .expect("failed to take gpu_display_wait_descriptor_ctrl_rd");
1413
1414 #[cfg(any(target_os = "android", target_os = "linux"))]
1415 let gpu_cgroup_path = self.gpu_cgroup_path.clone();
1416
1417 let mapper = Arc::clone(&self.mapper);
1418
1419 let rutabaga_builder = self.rutabaga_builder.clone();
1420 let rutabaga_server_descriptor = self.rutabaga_server_descriptor.as_ref().map(|d| {
1421 to_rutabaga_descriptor(d.try_clone().expect("failed to clone server descriptor"))
1422 });
1423
1424 let (init_finished_tx, init_finished_rx) = mpsc::channel();
1425 let (activate_tx, activate_rx) = mpsc::channel();
1426 let sleep_requested = self.sleep_requested.clone();
1427
1428 let worker_thread = WorkerThread::start("v_gpu", move |kill_evt| {
1429 #[cfg(any(target_os = "android", target_os = "linux"))]
1430 if let Some(cgroup_path) = gpu_cgroup_path {
1431 move_task_to_cgroup(cgroup_path, base::gettid())
1432 .expect("Failed to move v_gpu into requested cgroup");
1433 }
1434
1435 let rutabaga_fence_handler_resources = Arc::new(Mutex::new(None));
1436 let rutabaga_fence_handler = create_fence_handler(
1437 rutabaga_fence_handler_resources.clone(),
1438 fence_state.clone(),
1439 );
1440 let rutabaga =
1441 match rutabaga_builder.build(rutabaga_fence_handler, rutabaga_server_descriptor) {
1442 Ok(rutabaga) => rutabaga,
1443 Err(e) => {
1444 error!("failed to build rutabaga {}", e);
1445 return WorkerReturn {
1446 gpu_control_tube,
1447 resource_bridges,
1448 event_devices,
1449 activated_state: None,
1450 };
1451 }
1452 };
1453
1454 let mut virtio_gpu = match build(
1455 &display_backends,
1456 display_params,
1457 display_event,
1458 rutabaga,
1459 mapper,
1460 external_blob,
1461 fixed_blob_mapping,
1462 #[cfg(windows)]
1463 &mut wndproc_thread,
1464 udmabuf,
1465 #[cfg(windows)]
1466 gpu_display_wait_descriptor_ctrl_wr,
1467 ) {
1468 Some(backend) => backend,
1469 None => {
1470 return WorkerReturn {
1471 gpu_control_tube,
1472 resource_bridges,
1473 event_devices,
1474 activated_state: None,
1475 };
1476 }
1477 };
1478
1479 for event_device in event_devices {
1480 virtio_gpu
1481 .import_event_device(event_device)
1482 // We lost the `EventDevice`, so fail hard.
1483 .expect("failed to import event device");
1484 }
1485
1486 // Tell the parent thread that the init phase is complete.
1487 let _ = init_finished_tx.send(());
1488
1489 let activation_resources: GpuActivationResources = match activate_rx.recv() {
1490 Ok(x) => x,
1491 // Other half of channel was dropped.
1492 Err(mpsc::RecvError) => {
1493 return WorkerReturn {
1494 gpu_control_tube,
1495 resource_bridges,
1496 event_devices: virtio_gpu.display().borrow_mut().take_event_devices(),
1497 activated_state: None,
1498 };
1499 }
1500 };
1501
1502 rutabaga_fence_handler_resources
1503 .lock()
1504 .replace(FenceHandlerActivationResources {
1505 mem: activation_resources.mem.clone(),
1506 ctrl_queue: activation_resources.ctrl_queue.clone(),
1507 });
1508 // Drop so we don't hold extra refs on the queue's `Arc`.
1509 std::mem::drop(rutabaga_fence_handler_resources);
1510
1511 let mut worker = Worker {
1512 interrupt: activation_resources.interrupt,
1513 exit_evt_wrtube,
1514 gpu_control_tube,
1515 mem: activation_resources.mem,
1516 ctrl_queue: activation_resources.ctrl_queue,
1517 cursor_queue: activation_resources.cursor_queue,
1518 resource_bridges,
1519 kill_evt,
1520 state: Frontend::new(virtio_gpu, fence_state),
1521 #[cfg(windows)]
1522 gpu_display_wait_descriptor_ctrl_rd,
1523 };
1524
1525 // If a snapshot was provided, restore from it.
1526 if let Some(snapshot) = activation_resources.worker_snapshot {
1527 worker
1528 .state
1529 .fence_state
1530 .lock()
1531 .restore(snapshot.fence_state_snapshot);
1532 worker
1533 .state
1534 .virtio_gpu
1535 .restore(snapshot.virtio_gpu_snapshot, &worker.mem)
1536 .expect("failed to restore VirtioGpu");
1537 }
1538
1539 worker.run();
1540
1541 let event_devices = worker
1542 .state
1543 .virtio_gpu
1544 .display()
1545 .borrow_mut()
1546 .take_event_devices();
1547 // If we are stopping the worker because of a virtio_sleep request, then take a
1548 // snapshot and reclaim the queues.
1549 let activated_state = if sleep_requested.load(Ordering::SeqCst) {
1550 let worker_snapshot = WorkerSnapshot {
1551 fence_state_snapshot: worker.state.fence_state.lock().snapshot(),
1552 virtio_gpu_snapshot: worker
1553 .state
1554 .virtio_gpu
1555 .snapshot()
1556 .expect("failed to snapshot VirtioGpu"),
1557 };
1558 // Need to drop `Frontend` for the `Arc::try_unwrap` below to succeed.
1559 std::mem::drop(worker.state);
1560 Some((
1561 vec![
1562 match Arc::try_unwrap(worker.ctrl_queue.queue) {
1563 Ok(x) => x.into_inner(),
1564 Err(_) => panic!("too many refs on ctrl_queue"),
1565 },
1566 worker.cursor_queue.queue.into_inner(),
1567 ],
1568 worker_snapshot,
1569 ))
1570 } else {
1571 None
1572 };
1573 WorkerReturn {
1574 gpu_control_tube: worker.gpu_control_tube,
1575 resource_bridges: worker.resource_bridges,
1576 event_devices,
1577 activated_state,
1578 }
1579 });
1580
1581 self.worker_thread = Some((activate_tx, worker_thread));
1582
1583 match init_finished_rx.recv() {
1584 Ok(()) => {}
1585 Err(mpsc::RecvError) => error!("virtio-gpu worker thread init failed"),
1586 }
1587 }
1588
get_config(&self) -> virtio_gpu_config1589 fn get_config(&self) -> virtio_gpu_config {
1590 let mut events_read = 0;
1591
1592 if self.display_event.load(Ordering::Relaxed) {
1593 events_read |= VIRTIO_GPU_EVENT_DISPLAY;
1594 }
1595
1596 let num_capsets = match self.capset_mask {
1597 0 => {
1598 match self.rutabaga_component {
1599 RutabagaComponentType::Rutabaga2D => 0,
1600 _ => {
1601 #[allow(unused_mut)]
1602 let mut num_capsets = 0;
1603
1604 // Three capsets for virgl_renderer
1605 #[cfg(feature = "virgl_renderer")]
1606 {
1607 num_capsets += 3;
1608 }
1609
1610 // One capset for gfxstream
1611 #[cfg(feature = "gfxstream")]
1612 {
1613 num_capsets += 1;
1614 }
1615
1616 num_capsets
1617 }
1618 }
1619 }
1620 _ => self.capset_mask.count_ones(),
1621 };
1622
1623 virtio_gpu_config {
1624 events_read: Le32::from(events_read),
1625 events_clear: Le32::from(0),
1626 num_scanouts: Le32::from(VIRTIO_GPU_MAX_SCANOUTS as u32),
1627 num_capsets: Le32::from(num_capsets),
1628 }
1629 }
1630
1631 /// Send a request to exit the process to VMM.
send_exit_evt(&self) -> anyhow::Result<()>1632 pub fn send_exit_evt(&self) -> anyhow::Result<()> {
1633 self.exit_evt_wrtube
1634 .send::<VmEventType>(&VmEventType::Exit)
1635 .context("failed to send exit event")
1636 }
1637 }
1638
1639 impl VirtioDevice for Gpu {
keep_rds(&self) -> Vec<RawDescriptor>1640 fn keep_rds(&self) -> Vec<RawDescriptor> {
1641 let mut keep_rds = Vec::new();
1642
1643 // To find the RawDescriptor associated with stdout and stderr on Windows is difficult.
1644 // Resource bridges are used only for Wayland displays. There is also no meaningful way
1645 // casting the underlying DMA buffer wrapped in File to a copyable RawDescriptor.
1646 // TODO(davidriley): Remove once virgl has another path to include
1647 // debugging logs.
1648 #[cfg(any(target_os = "android", target_os = "linux"))]
1649 if cfg!(debug_assertions) {
1650 keep_rds.push(libc::STDOUT_FILENO);
1651 keep_rds.push(libc::STDERR_FILENO);
1652 }
1653
1654 if let Some(ref mapper) = *self.mapper.lock() {
1655 if let Some(descriptor) = mapper.as_raw_descriptor() {
1656 keep_rds.push(descriptor);
1657 }
1658 }
1659
1660 if let Some(ref rutabaga_server_descriptor) = self.rutabaga_server_descriptor {
1661 keep_rds.push(rutabaga_server_descriptor.as_raw_descriptor());
1662 }
1663
1664 keep_rds.push(self.exit_evt_wrtube.as_raw_descriptor());
1665
1666 if let Some(gpu_control_tube) = &self.gpu_control_tube {
1667 keep_rds.push(gpu_control_tube.as_raw_descriptor());
1668 }
1669
1670 if let Some(resource_bridges) = &self.resource_bridges {
1671 resource_bridges.append_raw_descriptors(&mut keep_rds);
1672 }
1673
1674 for event_device in self.event_devices.iter().flatten() {
1675 keep_rds.push(event_device.as_raw_descriptor());
1676 }
1677
1678 keep_rds
1679 }
1680
device_type(&self) -> DeviceType1681 fn device_type(&self) -> DeviceType {
1682 DeviceType::Gpu
1683 }
1684
queue_max_sizes(&self) -> &[u16]1685 fn queue_max_sizes(&self) -> &[u16] {
1686 QUEUE_SIZES
1687 }
1688
features(&self) -> u641689 fn features(&self) -> u64 {
1690 let mut virtio_gpu_features = 1 << VIRTIO_GPU_F_EDID;
1691
1692 // If a non-2D component is specified, enable 3D features. It is possible to run display
1693 // contexts without 3D backend (i.e, gfxstream / virglrender), so check for that too.
1694 if self.rutabaga_component != RutabagaComponentType::Rutabaga2D || self.capset_mask != 0 {
1695 virtio_gpu_features |= 1 << VIRTIO_GPU_F_VIRGL
1696 | 1 << VIRTIO_GPU_F_RESOURCE_UUID
1697 | 1 << VIRTIO_GPU_F_RESOURCE_BLOB
1698 | 1 << VIRTIO_GPU_F_CONTEXT_INIT
1699 | 1 << VIRTIO_GPU_F_EDID;
1700
1701 if self.udmabuf {
1702 virtio_gpu_features |= 1 << VIRTIO_GPU_F_CREATE_GUEST_HANDLE;
1703 }
1704
1705 // New experimental/unstable feature, not upstreamed.
1706 // Safe to enable because guest must explicitly opt-in.
1707 virtio_gpu_features |= 1 << VIRTIO_GPU_F_FENCE_PASSING;
1708 }
1709
1710 self.base_features | virtio_gpu_features
1711 }
1712
ack_features(&mut self, value: u64)1713 fn ack_features(&mut self, value: u64) {
1714 let _ = value;
1715 }
1716
read_config(&self, offset: u64, data: &mut [u8])1717 fn read_config(&self, offset: u64, data: &mut [u8]) {
1718 copy_config(data, 0, self.get_config().as_bytes(), offset);
1719 }
1720
write_config(&mut self, offset: u64, data: &[u8])1721 fn write_config(&mut self, offset: u64, data: &[u8]) {
1722 let mut cfg = self.get_config();
1723 copy_config(cfg.as_bytes_mut(), offset, data, 0);
1724 if (cfg.events_clear.to_native() & VIRTIO_GPU_EVENT_DISPLAY) != 0 {
1725 self.display_event.store(false, Ordering::Relaxed);
1726 }
1727 }
1728
on_device_sandboxed(&mut self)1729 fn on_device_sandboxed(&mut self) {
1730 // Unlike most Virtio devices which start their worker thread in activate(),
1731 // the Gpu's worker thread is started earlier here so that rutabaga and the
1732 // underlying render server have a chance to initialize before the guest OS
1733 // starts. This is needed because the Virtio GPU kernel module has a timeout
1734 // for some calls during initialization and some host GPU drivers have been
1735 // observed to be extremely slow to initialize on fresh GCE instances. The
1736 // entire worker thread is started here (as opposed to just initializing
1737 // rutabaga and the underlying render server) as OpenGL based renderers may
1738 // expect to be initialized on the same thread that later processes commands.
1739 self.start_worker_thread();
1740 }
1741
activate( &mut self, mem: GuestMemory, interrupt: Interrupt, mut queues: BTreeMap<usize, Queue>, ) -> anyhow::Result<()>1742 fn activate(
1743 &mut self,
1744 mem: GuestMemory,
1745 interrupt: Interrupt,
1746 mut queues: BTreeMap<usize, Queue>,
1747 ) -> anyhow::Result<()> {
1748 if queues.len() != QUEUE_SIZES.len() {
1749 return Err(anyhow!(
1750 "expected {} queues, got {}",
1751 QUEUE_SIZES.len(),
1752 queues.len()
1753 ));
1754 }
1755
1756 let ctrl_queue = SharedQueueReader::new(queues.remove(&0).unwrap(), interrupt.clone());
1757 let cursor_queue = LocalQueueReader::new(queues.remove(&1).unwrap(), interrupt.clone());
1758
1759 self.worker_thread
1760 .as_mut()
1761 .expect("worker thread missing on activate")
1762 .0
1763 .send(GpuActivationResources {
1764 mem,
1765 interrupt,
1766 ctrl_queue,
1767 cursor_queue,
1768 worker_snapshot: self.worker_snapshot.take(),
1769 })
1770 .expect("failed to send activation resources to worker thread");
1771
1772 Ok(())
1773 }
1774
pci_address(&self) -> Option<PciAddress>1775 fn pci_address(&self) -> Option<PciAddress> {
1776 self.pci_address
1777 }
1778
get_shared_memory_region(&self) -> Option<SharedMemoryRegion>1779 fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
1780 Some(SharedMemoryRegion {
1781 id: VIRTIO_GPU_SHM_ID_HOST_VISIBLE,
1782 length: self.pci_bar_size,
1783 })
1784 }
1785
set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>)1786 fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
1787 self.mapper.lock().replace(mapper);
1788 }
1789
expose_shmem_descriptors_with_viommu(&self) -> bool1790 fn expose_shmem_descriptors_with_viommu(&self) -> bool {
1791 // TODO(b/323368701): integrate with fixed_blob_mapping so this can always return true.
1792 !self.fixed_blob_mapping
1793 }
1794
1795 // Notes on sleep/wake/snapshot/restore functionality.
1796 //
1797 // * Only 2d mode is supported so far.
1798 // * We only snapshot the state relevant to the virtio-gpu 2d mode protocol (i.e. scanouts,
1799 // resources, fences).
1800 // * The GpuDisplay is recreated from scratch, we don't want to snapshot the state of a
1801 // Wayland socket (for example).
1802 // * No state about pending virtio requests needs to be snapshotted because the 2d backend
1803 // completes them synchronously.
1804
virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>>1805 fn virtio_sleep(&mut self) -> anyhow::Result<Option<BTreeMap<usize, Queue>>> {
1806 if let Some((activate_tx, worker_thread)) = self.worker_thread.take() {
1807 self.sleep_requested.store(true, Ordering::SeqCst);
1808 drop(activate_tx);
1809 let WorkerReturn {
1810 gpu_control_tube,
1811 resource_bridges,
1812 event_devices,
1813 activated_state,
1814 } = worker_thread.stop();
1815 self.sleep_requested.store(false, Ordering::SeqCst);
1816
1817 self.resource_bridges = Some(resource_bridges);
1818 self.gpu_control_tube = Some(gpu_control_tube);
1819 self.event_devices = Some(event_devices);
1820
1821 match activated_state {
1822 Some((queues, worker_snapshot)) => {
1823 self.worker_snapshot = Some(worker_snapshot);
1824 return Ok(Some(queues.into_iter().enumerate().collect()));
1825 }
1826 // Device not activated yet.
1827 None => {
1828 self.worker_snapshot = None;
1829 return Ok(None);
1830 }
1831 }
1832 }
1833 Ok(None)
1834 }
1835
virtio_wake( &mut self, queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>, ) -> anyhow::Result<()>1836 fn virtio_wake(
1837 &mut self,
1838 queues_state: Option<(GuestMemory, Interrupt, BTreeMap<usize, Queue>)>,
1839 ) -> anyhow::Result<()> {
1840 match queues_state {
1841 None => Ok(()),
1842 Some((mem, interrupt, queues)) => {
1843 assert!(self.worker_thread.is_none());
1844 self.start_worker_thread();
1845 // TODO(khei): activate is just what we want at the moment, but we should probably
1846 // move it into a "start workers" function to make it obvious that it isn't
1847 // strictly used for activate events.
1848 self.activate(mem, interrupt, queues)?;
1849 Ok(())
1850 }
1851 }
1852 }
1853
virtio_snapshot(&mut self) -> anyhow::Result<serde_json::Value>1854 fn virtio_snapshot(&mut self) -> anyhow::Result<serde_json::Value> {
1855 Ok(serde_json::to_value(&self.worker_snapshot)?)
1856 }
1857
virtio_restore(&mut self, data: serde_json::Value) -> anyhow::Result<()>1858 fn virtio_restore(&mut self, data: serde_json::Value) -> anyhow::Result<()> {
1859 self.worker_snapshot = serde_json::from_value(data)?;
1860 Ok(())
1861 }
1862 }
1863
1864 /// This struct takes the ownership of resource bridges and tracks which ones should be processed.
1865 struct ResourceBridges {
1866 resource_bridges: Vec<Tube>,
1867 should_process: Vec<bool>,
1868 }
1869
1870 impl ResourceBridges {
new(resource_bridges: Vec<Tube>) -> Self1871 pub fn new(resource_bridges: Vec<Tube>) -> Self {
1872 #[cfg(windows)]
1873 assert!(
1874 resource_bridges.is_empty(),
1875 "resource bridges are not supported on Windows"
1876 );
1877
1878 let mut resource_bridges = Self {
1879 resource_bridges,
1880 should_process: Default::default(),
1881 };
1882 resource_bridges.reset_should_process();
1883 resource_bridges
1884 }
1885
1886 // Appends raw descriptors of all resource bridges to the given vector.
append_raw_descriptors(&self, rds: &mut Vec<RawDescriptor>)1887 pub fn append_raw_descriptors(&self, rds: &mut Vec<RawDescriptor>) {
1888 for bridge in &self.resource_bridges {
1889 rds.push(bridge.as_raw_descriptor());
1890 }
1891 }
1892
1893 /// Adds all resource bridges to WaitContext.
add_to_wait_context(&self, wait_ctx: &mut WaitContext<WorkerToken>)1894 pub fn add_to_wait_context(&self, wait_ctx: &mut WaitContext<WorkerToken>) {
1895 for (index, bridge) in self.resource_bridges.iter().enumerate() {
1896 if let Err(e) = wait_ctx.add(bridge, WorkerToken::ResourceBridge { index }) {
1897 error!("failed to add resource bridge to WaitContext: {}", e);
1898 }
1899 }
1900 }
1901
1902 /// Marks that the resource bridge at the given index should be processed when
1903 /// `process_resource_bridges()` is called.
set_should_process(&mut self, index: usize)1904 pub fn set_should_process(&mut self, index: usize) {
1905 self.should_process[index] = true;
1906 }
1907
1908 /// Processes all resource bridges that have been marked as should be processed. The markings
1909 /// will be cleared before returning. Faulty resource bridges will be removed from WaitContext.
process_resource_bridges( &mut self, state: &mut Frontend, wait_ctx: &mut WaitContext<WorkerToken>, )1910 pub fn process_resource_bridges(
1911 &mut self,
1912 state: &mut Frontend,
1913 wait_ctx: &mut WaitContext<WorkerToken>,
1914 ) {
1915 for (bridge, &should_process) in self.resource_bridges.iter().zip(&self.should_process) {
1916 if should_process {
1917 if let Err(e) = state.process_resource_bridge(bridge) {
1918 error!("Failed to process resource bridge: {:#}", e);
1919 error!("Removing that resource bridge from the wait context.");
1920 wait_ctx.delete(bridge).unwrap_or_else(|e| {
1921 error!("Failed to remove faulty resource bridge: {:#}", e)
1922 });
1923 }
1924 }
1925 }
1926 self.reset_should_process();
1927 }
1928
reset_should_process(&mut self)1929 fn reset_should_process(&mut self) {
1930 self.should_process.clear();
1931 self.should_process
1932 .resize(self.resource_bridges.len(), false);
1933 }
1934 }
1935