• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::cell::RefCell;
6 use std::collections::BTreeMap;
7 use std::path::PathBuf;
8 use std::rc::Rc;
9 use std::sync::Arc;
10 
11 use anyhow::Context;
12 use argh::FromArgs;
13 use base::clone_descriptor;
14 use base::error;
15 use base::SafeDescriptor;
16 use base::Tube;
17 use base::UnixSeqpacketListener;
18 use base::UnlinkUnixSeqpacketListener;
19 use cros_async::AsyncWrapper;
20 use cros_async::Executor;
21 use cros_async::IoSource;
22 use hypervisor::ProtectionType;
23 use sync::Mutex;
24 
25 use crate::virtio;
26 use crate::virtio::gpu;
27 use crate::virtio::gpu::ProcessDisplayResult;
28 use crate::virtio::vhost::user::device::gpu::GpuBackend;
29 use crate::virtio::vhost::user::device::listener::sys::VhostUserListener;
30 use crate::virtio::vhost::user::device::listener::VhostUserListenerTrait;
31 use crate::virtio::vhost::user::device::wl::parse_wayland_sock;
32 use crate::virtio::Gpu;
33 use crate::virtio::GpuDisplayParameters;
34 use crate::virtio::GpuParameters;
35 use crate::virtio::Interrupt;
36 
run_display( display: IoSource<AsyncWrapper<SafeDescriptor>>, state: Rc<RefCell<gpu::Frontend>>, )37 async fn run_display(
38     display: IoSource<AsyncWrapper<SafeDescriptor>>,
39     state: Rc<RefCell<gpu::Frontend>>,
40 ) {
41     loop {
42         if let Err(e) = display.wait_readable().await {
43             error!(
44                 "Failed to wait for display context to become readable: {}",
45                 e
46             );
47             break;
48         }
49 
50         match state.borrow_mut().process_display() {
51             ProcessDisplayResult::Error(e) => {
52                 error!("Failed to process display events: {}", e);
53                 break;
54             }
55             ProcessDisplayResult::CloseRequested => break,
56             ProcessDisplayResult::Success => {}
57         }
58     }
59 }
60 
run_resource_bridge(tube: IoSource<Tube>, state: Rc<RefCell<gpu::Frontend>>)61 async fn run_resource_bridge(tube: IoSource<Tube>, state: Rc<RefCell<gpu::Frontend>>) {
62     loop {
63         if let Err(e) = tube.wait_readable().await {
64             error!(
65                 "Failed to wait for resource bridge tube to become readable: {}",
66                 e
67             );
68             break;
69         }
70 
71         if let Err(e) = state.borrow_mut().process_resource_bridge(tube.as_source()) {
72             error!("Failed to process resource bridge: {:#}", e);
73             break;
74         }
75     }
76 }
77 
78 impl GpuBackend {
start_platform_workers(&mut self, _interrupt: Interrupt) -> anyhow::Result<()>79     pub fn start_platform_workers(&mut self, _interrupt: Interrupt) -> anyhow::Result<()> {
80         let state = self
81             .state
82             .as_ref()
83             .context("frontend state wasn't set")?
84             .clone();
85 
86         // Start handling the resource bridges.
87         for bridge in self.resource_bridges.lock().drain(..) {
88             let tube = self
89                 .ex
90                 .async_from(bridge)
91                 .context("failed to create async tube")?;
92             let task = self
93                 .ex
94                 .spawn_local(run_resource_bridge(tube, state.clone()));
95             self.platform_workers.borrow_mut().push(task);
96         }
97 
98         // Start handling the display.
99         let display = clone_descriptor(&*state.borrow_mut().display().borrow())
100             .map(AsyncWrapper::new)
101             .context("failed to clone inner WaitContext for gpu display")
102             .and_then(|ctx| {
103                 self.ex
104                     .async_from(ctx)
105                     .context("failed to create async WaitContext")
106             })?;
107 
108         let task = self.ex.spawn_local(run_display(display, state));
109         self.platform_workers.borrow_mut().push(task);
110 
111         Ok(())
112     }
113 }
gpu_parameters_from_str(input: &str) -> Result<GpuParameters, String>114 fn gpu_parameters_from_str(input: &str) -> Result<GpuParameters, String> {
115     serde_json::from_str(input).map_err(|e| e.to_string())
116 }
117 
118 #[derive(FromArgs)]
119 /// GPU device
120 #[argh(subcommand, name = "gpu")]
121 pub struct Options {
122     #[argh(option, arg_name = "PATH")]
123     /// path to bind a listening vhost-user socket
124     socket: String,
125     #[argh(option, from_str_fn(parse_wayland_sock), arg_name = "PATH[,name=NAME]")]
126     /// path to one or more Wayland sockets. The unnamed socket is
127     /// used for displaying virtual screens while the named ones are used for IPC
128     wayland_sock: Vec<(String, PathBuf)>,
129     #[argh(option, arg_name = "PATH")]
130     /// path to one or more bridge sockets for communicating with
131     /// other graphics devices (wayland, video, etc)
132     resource_bridge: Vec<String>,
133     #[argh(option, arg_name = "DISPLAY")]
134     /// X11 display name to use
135     x_display: Option<String>,
136     #[argh(
137         option,
138         from_str_fn(gpu_parameters_from_str),
139         default = "Default::default()",
140         arg_name = "JSON"
141     )]
142     /// a JSON object of virtio-gpu parameters
143     params: GpuParameters,
144 }
145 
run_gpu_device(opts: Options) -> anyhow::Result<()>146 pub fn run_gpu_device(opts: Options) -> anyhow::Result<()> {
147     let Options {
148         x_display,
149         params: mut gpu_parameters,
150         resource_bridge,
151         socket,
152         wayland_sock,
153     } = opts;
154 
155     let channels: BTreeMap<_, _> = wayland_sock.into_iter().collect();
156 
157     let resource_bridge_listeners = resource_bridge
158         .into_iter()
159         .map(|p| {
160             UnixSeqpacketListener::bind(&p)
161                 .map(UnlinkUnixSeqpacketListener)
162                 .with_context(|| format!("failed to bind socket at path {}", p))
163         })
164         .collect::<anyhow::Result<Vec<_>>>()?;
165 
166     if gpu_parameters.display_params.is_empty() {
167         gpu_parameters
168             .display_params
169             .push(GpuDisplayParameters::default());
170     }
171 
172     let ex = Executor::new().context("failed to create executor")?;
173 
174     // We don't know the order in which other devices are going to connect to the resource bridges
175     // so start listening for all of them on separate threads. Any devices that connect after the
176     // gpu device starts its queues will not have its resource bridges processed. In practice this
177     // should be fine since the devices that use the resource bridge always try to connect to the
178     // gpu device before handling messages from the VM.
179     let resource_bridges = Arc::new(Mutex::new(Vec::with_capacity(
180         resource_bridge_listeners.len(),
181     )));
182     for listener in resource_bridge_listeners {
183         let resource_bridges = Arc::clone(&resource_bridges);
184         ex.spawn_blocking(move || match listener.accept() {
185             Ok(stream) => resource_bridges
186                 .lock()
187                 .push(Tube::new_from_unix_seqpacket(stream).unwrap()),
188             Err(e) => {
189                 let path = listener
190                     .path()
191                     .unwrap_or_else(|_| PathBuf::from("{unknown}"));
192                 error!(
193                     "Failed to accept resource bridge connection for socket {}: {}",
194                     path.display(),
195                     e
196                 );
197             }
198         })
199         .detach();
200     }
201 
202     // TODO(b/232344535): Read side of the tube is ignored currently.
203     // Complete the implementation by polling `exit_evt_rdtube` and
204     // kill the sibling VM.
205     let (exit_evt_wrtube, _) =
206         Tube::directional_pair().context("failed to create vm event tube")?;
207 
208     let (gpu_control_tube, _) = Tube::pair().context("failed to create gpu control tube")?;
209 
210     let mut display_backends = vec![
211         virtio::DisplayBackend::X(x_display),
212         virtio::DisplayBackend::Stub,
213     ];
214     if let Some(p) = channels.get("") {
215         display_backends.insert(0, virtio::DisplayBackend::Wayland(Some(p.to_owned())));
216     }
217 
218     // These are only used when there is an input device.
219     let event_devices = Vec::new();
220 
221     let base_features = virtio::base_features(ProtectionType::Unprotected);
222 
223     let listener = VhostUserListener::new_socket(&socket, None)?;
224 
225     let gpu = Rc::new(RefCell::new(Gpu::new(
226         exit_evt_wrtube,
227         gpu_control_tube,
228         Vec::new(), // resource_bridges, handled separately by us
229         display_backends,
230         &gpu_parameters,
231         /* rutabaga_server_descriptor */
232         None,
233         event_devices,
234         base_features,
235         &channels,
236         /* gpu_cgroup_path */
237         None,
238     )));
239 
240     let backend = GpuBackend {
241         ex: ex.clone(),
242         gpu,
243         resource_bridges,
244         acked_protocol_features: 0,
245         state: None,
246         fence_state: Default::default(),
247         queue_workers: Default::default(),
248         platform_workers: Default::default(),
249         shmem_mapper: Arc::new(Mutex::new(None)),
250     };
251 
252     // Run until the backend is finished.
253     let _ = ex.run_until(listener.run_backend(backend, &ex))?;
254 
255     // Process any tasks from the backend's destructor.
256     Ok(ex.run_until(async {})?)
257 }
258