• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2022 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 use std::cell::RefCell;
6 use std::collections::BTreeMap;
7 use std::path::PathBuf;
8 use std::rc::Rc;
9 use std::sync::Arc;
10 
11 use anyhow::bail;
12 use anyhow::Context;
13 use argh::FromArgs;
14 use base::error;
15 use base::info;
16 use base::Event;
17 use base::FromRawDescriptor;
18 use base::RawDescriptor;
19 use base::SafeDescriptor;
20 use base::SendTube;
21 use base::StreamChannel;
22 use base::Tube;
23 use broker_ipc::common_child_setup;
24 use broker_ipc::CommonChildStartupArgs;
25 use cros_async::AsyncTube;
26 use cros_async::AsyncWrapper;
27 use cros_async::EventAsync;
28 use cros_async::Executor;
29 use gpu_display::EventDevice;
30 use gpu_display::WindowProcedureThread;
31 use gpu_display::WindowProcedureThreadBuilder;
32 use hypervisor::ProtectionType;
33 use serde::Deserialize;
34 use serde::Serialize;
35 use sync::Mutex;
36 use tube_transporter::TubeToken;
37 use vm_control::gpu::GpuControlCommand;
38 use vm_control::gpu::GpuControlResult;
39 
40 use crate::virtio;
41 use crate::virtio::gpu;
42 use crate::virtio::gpu::ProcessDisplayResult;
43 use crate::virtio::vhost::user::device::gpu::GpuBackend;
44 use crate::virtio::vhost::user::device::handler::sys::windows::read_from_tube_transporter;
45 use crate::virtio::vhost::user::device::handler::sys::windows::run_handler;
46 use crate::virtio::vhost::user::device::handler::DeviceRequestHandler;
47 use crate::virtio::Gpu;
48 use crate::virtio::GpuDisplayParameters;
49 use crate::virtio::GpuParameters;
50 use crate::virtio::Interrupt;
51 
52 pub mod generic;
53 pub use generic as product;
54 
run_display( display: EventAsync, state: Rc<RefCell<gpu::Frontend>>, gpu: Rc<RefCell<gpu::Gpu>>, )55 async fn run_display(
56     display: EventAsync,
57     state: Rc<RefCell<gpu::Frontend>>,
58     gpu: Rc<RefCell<gpu::Gpu>>,
59 ) {
60     loop {
61         if let Err(e) = display.next_val().await {
62             error!(
63                 "Failed to wait for display context to become readable: {}",
64                 e
65             );
66             break;
67         }
68 
69         match state.borrow_mut().process_display() {
70             ProcessDisplayResult::Error(e) => {
71                 error!("Failed to process display events: {}", e);
72                 break;
73             }
74             ProcessDisplayResult::CloseRequested => {
75                 let res = gpu.borrow().send_exit_evt();
76                 if res.is_err() {
77                     error!("Failed to send exit event: {:?}", res);
78                 }
79                 break;
80             }
81             ProcessDisplayResult::Success => {}
82         }
83     }
84 }
85 
run_gpu_control_command_handler( mut gpu_control_tube: AsyncTube, state: Rc<RefCell<gpu::Frontend>>, interrupt: Interrupt, )86 async fn run_gpu_control_command_handler(
87     mut gpu_control_tube: AsyncTube,
88     state: Rc<RefCell<gpu::Frontend>>,
89     interrupt: Interrupt,
90 ) {
91     'wait: loop {
92         let req = match gpu_control_tube.next::<GpuControlCommand>().await {
93             Ok(req) => req,
94             Err(e) => {
95                 error!("GPU control socket failed to recv: {:?}", e);
96                 break 'wait;
97             }
98         };
99 
100         let resp = state.borrow_mut().process_gpu_control_command(req);
101 
102         if let GpuControlResult::DisplaysUpdated = resp {
103             info!("Signaling display config change");
104             interrupt.signal_config_changed();
105         }
106 
107         if let Err(e) = gpu_control_tube.send(resp).await {
108             error!("Display control socket failed to send: {}", e);
109             break 'wait;
110         }
111     }
112 }
113 
114 impl GpuBackend {
start_platform_workers(&mut self, interrupt: Interrupt) -> anyhow::Result<()>115     pub fn start_platform_workers(&mut self, interrupt: Interrupt) -> anyhow::Result<()> {
116         let state = self
117             .state
118             .as_ref()
119             .context("frontend state wasn't set")?
120             .clone();
121 
122         // Start handling the display.
123         // SAFETY:
124         // Safe because the raw descriptor is valid, and an event.
125         let display = unsafe {
126             EventAsync::clone_raw_without_reset(&*state.borrow_mut().display().borrow(), &self.ex)
127         }
128         .context("failed to clone inner WaitContext for gpu display")?;
129 
130         let task = self
131             .ex
132             .spawn_local(run_display(display, state.clone(), self.gpu.clone()));
133         self.platform_workers.borrow_mut().push(task);
134 
135         let task = self.ex.spawn_local(run_gpu_control_command_handler(
136             AsyncTube::new(
137                 &self.ex,
138                 self.gpu
139                     .borrow_mut()
140                     .gpu_control_tube
141                     .take()
142                     .expect("gpu control tube must exist"),
143             )
144             .expect("gpu control tube creation"),
145             state,
146             interrupt,
147         ));
148         self.platform_workers.borrow_mut().push(task);
149 
150         Ok(())
151     }
152 }
153 
154 #[derive(FromArgs)]
155 /// GPU device
156 #[argh(subcommand, name = "gpu", description = "")]
157 pub struct Options {
158     #[argh(
159         option,
160         description = "pipe handle end for Tube Transporter",
161         arg_name = "HANDLE"
162     )]
163     bootstrap: usize,
164 }
165 
166 /// Main process end for input event devices.
167 #[derive(Deserialize, Serialize)]
168 pub struct InputEventVmmConfig {
169     // Pipes to receive input events on.
170     pub multi_touch_pipes: Vec<StreamChannel>,
171     pub mouse_pipes: Vec<StreamChannel>,
172     pub keyboard_pipes: Vec<StreamChannel>,
173 }
174 
175 /// Backend process end for input event devices.
176 #[derive(Deserialize, Serialize)]
177 pub struct InputEventBackendConfig {
178     // Event devices to send input events to.
179     pub event_devices: Vec<EventDevice>,
180 }
181 
182 /// Configuration for running input event devices, split by a part sent to the main VMM and a part
183 /// sent to the window thread (either main process or a vhost-user process).
184 #[derive(Deserialize, Serialize)]
185 pub struct InputEventSplitConfig {
186     // Config sent to the backend.
187     pub backend_config: Option<InputEventBackendConfig>,
188     // Config sent to the main process.
189     pub vmm_config: InputEventVmmConfig,
190 }
191 
192 /// Main process end for a GPU device.
193 #[derive(Deserialize, Serialize)]
194 pub struct GpuVmmConfig {
195     // Tube for setting up the vhost-user connection. May not exist if not using vhost-user.
196     pub main_vhost_user_tube: Option<Tube>,
197     // A tube to forward GPU control commands in the main process.
198     pub gpu_control_host_tube: Option<Tube>,
199     pub product_config: product::GpuVmmConfig,
200 }
201 
202 /// Config arguments passed through the bootstrap Tube from the broker to the Gpu backend
203 /// process.
204 #[derive(Deserialize, Serialize)]
205 pub struct GpuBackendConfig {
206     // Tube for setting up the vhost-user connection. May not exist if not using vhost-user.
207     pub device_vhost_user_tube: Option<Tube>,
208     // An event for an incoming exit request.
209     pub exit_event: Event,
210     // A tube to send an exit request.
211     pub exit_evt_wrtube: SendTube,
212     // A tube to handle GPU control commands in the GPU device.
213     pub gpu_control_device_tube: Tube,
214     // GPU parameters.
215     pub params: GpuParameters,
216     // Product related configurations.
217     pub product_config: product::GpuBackendConfig,
218 }
219 
220 #[derive(Deserialize, Serialize)]
221 pub struct WindowProcedureThreadVmmConfig {
222     pub product_config: product::WindowProcedureThreadVmmConfig,
223 }
224 
225 #[derive(Deserialize, Serialize)]
226 pub struct WindowProcedureThreadSplitConfig {
227     // This is the config sent to the backend process.
228     pub wndproc_thread_builder: Option<WindowProcedureThreadBuilder>,
229     // Config sent to the main process.
230     pub vmm_config: WindowProcedureThreadVmmConfig,
231 }
232 
run_gpu_device(opts: Options) -> anyhow::Result<()>233 pub fn run_gpu_device(opts: Options) -> anyhow::Result<()> {
234     cros_tracing::init();
235 
236     let raw_transport_tube = opts.bootstrap as RawDescriptor;
237 
238     let mut tubes = read_from_tube_transporter(raw_transport_tube)?;
239 
240     let bootstrap_tube = tubes.get_tube(TubeToken::Bootstrap)?;
241 
242     let startup_args: CommonChildStartupArgs = bootstrap_tube.recv::<CommonChildStartupArgs>()?;
243     let _child_cleanup = common_child_setup(startup_args)?;
244 
245     let (mut config, input_event_backend_config, wndproc_thread_builder): (
246         GpuBackendConfig,
247         InputEventBackendConfig,
248         WindowProcedureThreadBuilder,
249     ) = bootstrap_tube
250         .recv()
251         .context("failed to parse GPU backend config from bootstrap tube")?;
252 
253     // TODO(b/213170185): Uncomment once sandbox is upstreamed.
254     // if sandbox::is_sandbox_target() {
255     //     sandbox::TargetServices::get()
256     //         .expect("failed to get target services")
257     //         .unwrap()
258     //         .lower_token();
259     // }
260 
261     let wndproc_thread = wndproc_thread_builder
262         .start_thread()
263         .context("Failed to create window procedure thread for vhost GPU")?;
264 
265     run_gpu_device_worker(
266         config,
267         input_event_backend_config.event_devices,
268         wndproc_thread,
269     )
270 }
271 
272 /// Run the GPU device worker.
run_gpu_device_worker( mut config: GpuBackendConfig, event_devices: Vec<EventDevice>, wndproc_thread: WindowProcedureThread, ) -> anyhow::Result<()>273 pub fn run_gpu_device_worker(
274     mut config: GpuBackendConfig,
275     event_devices: Vec<EventDevice>,
276     wndproc_thread: WindowProcedureThread,
277 ) -> anyhow::Result<()> {
278     let vhost_user_tube = config
279         .device_vhost_user_tube
280         .expect("vhost-user gpu tube must be set");
281 
282     if config.params.display_params.is_empty() {
283         config
284             .params
285             .display_params
286             .push(GpuDisplayParameters::default());
287     }
288 
289     let display_backends = vec![virtio::DisplayBackend::WinApi];
290 
291     let mut gpu_params = config.params.clone();
292 
293     // Fallback for when external_blob is not available on the machine. Currently always off.
294     gpu_params.system_blob = false;
295 
296     let base_features = virtio::base_features(ProtectionType::Unprotected);
297 
298     let gpu = Rc::new(RefCell::new(Gpu::new(
299         config.exit_evt_wrtube,
300         config.gpu_control_device_tube,
301         /* resource_bridges= */ Vec::new(),
302         display_backends,
303         &gpu_params,
304         /* render_server_descriptor */ None,
305         event_devices,
306         base_features,
307         /* channels= */ &Default::default(),
308         wndproc_thread,
309     )));
310 
311     let ex = Executor::new().context("failed to create executor")?;
312 
313     let backend = GpuBackend {
314         ex: ex.clone(),
315         gpu,
316         resource_bridges: Default::default(),
317         acked_protocol_features: 0,
318         state: None,
319         fence_state: Default::default(),
320         queue_workers: Default::default(),
321         platform_workers: Default::default(),
322         shmem_mapper: Arc::new(Mutex::new(None)),
323     };
324 
325     let handler = DeviceRequestHandler::new(backend);
326 
327     info!("vhost-user gpu device ready, starting run loop...");
328 
329     // Run until the backend is finished.
330     if let Err(e) = ex.run_until(run_handler(
331         Box::new(handler),
332         vhost_user_tube,
333         config.exit_event,
334         &ex,
335     )) {
336         bail!("error occurred: {}", e);
337     }
338 
339     // Process any tasks from the backend's destructor.
340     Ok(ex.run_until(async {})?)
341 }
342