1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! Crate for displaying simple surfaces and GPU buffers over wayland.
6
7 extern crate base;
8
9 #[path = "dwl.rs"]
10 mod dwl;
11
12 use std::cell::Cell;
13 use std::cmp::max;
14 use std::collections::HashMap;
15 use std::ffi::CStr;
16 use std::ffi::CString;
17 use std::mem::zeroed;
18 use std::panic::catch_unwind;
19 use std::path::Path;
20 use std::process::abort;
21 use std::ptr::null;
22
23 use anyhow::bail;
24 use base::error;
25 use base::round_up_to_page_size;
26 use base::AsRawDescriptor;
27 use base::MemoryMapping;
28 use base::MemoryMappingBuilder;
29 use base::RawDescriptor;
30 use base::SharedMemory;
31 use base::VolatileMemory;
32 use dwl::*;
33 use linux_input_sys::virtio_input_event;
34 use sync::Waitable;
35 use vm_control::gpu::DisplayParameters;
36
37 use crate::DisplayExternalResourceImport;
38 use crate::DisplayT;
39 use crate::EventDeviceKind;
40 use crate::FlipToExtraInfo;
41 use crate::GpuDisplayError;
42 use crate::GpuDisplayEvents;
43 use crate::GpuDisplayFramebuffer;
44 use crate::GpuDisplayResult;
45 use crate::GpuDisplaySurface;
46 use crate::SemaphoreTimepoint;
47 use crate::SurfaceType;
48 use crate::SysDisplayT;
49
50 const BUFFER_COUNT: usize = 3;
51 const BYTES_PER_PIXEL: u32 = 4;
52
53 struct DwlContext(*mut dwl_context);
54 impl Drop for DwlContext {
drop(&mut self)55 fn drop(&mut self) {
56 if !self.0.is_null() {
57 // SAFETY:
58 // Safe given that we checked the pointer for non-null and it should always be of the
59 // correct type.
60 unsafe {
61 dwl_context_destroy(&mut self.0);
62 }
63 }
64 }
65 }
66
67 impl AsRawDescriptor for DwlContext {
as_raw_descriptor(&self) -> RawDescriptor68 fn as_raw_descriptor(&self) -> RawDescriptor {
69 // SAFETY:
70 // Safe given that the context pointer is valid.
71 unsafe { dwl_context_fd(self.0) }
72 }
73 }
74
75 struct DwlDmabuf(*mut dwl_dmabuf);
76
77 impl Drop for DwlDmabuf {
drop(&mut self)78 fn drop(&mut self) {
79 if !self.0.is_null() {
80 // SAFETY:
81 // Safe given that we checked the pointer for non-null and it should always be of the
82 // correct type.
83 unsafe {
84 dwl_dmabuf_destroy(&mut self.0);
85 }
86 }
87 }
88 }
89
90 struct DwlSurface(*mut dwl_surface);
91 impl Drop for DwlSurface {
drop(&mut self)92 fn drop(&mut self) {
93 if !self.0.is_null() {
94 // SAFETY:
95 // Safe given that we checked the pointer for non-null and it should always be of the
96 // correct type.
97 unsafe {
98 dwl_surface_destroy(&mut self.0);
99 }
100 }
101 }
102 }
103
104 struct WaylandSurface {
105 surface: DwlSurface,
106 row_size: u32,
107 buffer_size: usize,
108 buffer_index: Cell<usize>,
109 buffer_mem: MemoryMapping,
110 }
111
112 impl WaylandSurface {
surface(&self) -> *mut dwl_surface113 fn surface(&self) -> *mut dwl_surface {
114 self.surface.0
115 }
116 }
117
118 impl GpuDisplaySurface for WaylandSurface {
surface_descriptor(&self) -> u64119 fn surface_descriptor(&self) -> u64 {
120 // SAFETY:
121 // Safe if the surface is valid.
122 let pointer = unsafe { dwl_surface_descriptor(self.surface.0) };
123 pointer as u64
124 }
125
framebuffer(&mut self) -> Option<GpuDisplayFramebuffer>126 fn framebuffer(&mut self) -> Option<GpuDisplayFramebuffer> {
127 let buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
128 let framebuffer = self
129 .buffer_mem
130 .get_slice(buffer_index * self.buffer_size, self.buffer_size)
131 .ok()?;
132
133 Some(GpuDisplayFramebuffer::new(
134 framebuffer,
135 self.row_size,
136 BYTES_PER_PIXEL,
137 ))
138 }
139
next_buffer_in_use(&self) -> bool140 fn next_buffer_in_use(&self) -> bool {
141 let next_buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
142 // SAFETY:
143 // Safe because only a valid surface and buffer index is used.
144 unsafe { dwl_surface_buffer_in_use(self.surface(), next_buffer_index) }
145 }
146
close_requested(&self) -> bool147 fn close_requested(&self) -> bool {
148 // SAFETY:
149 // Safe because only a valid surface is used.
150 unsafe { dwl_surface_close_requested(self.surface()) }
151 }
152
flip(&mut self)153 fn flip(&mut self) {
154 self.buffer_index
155 .set((self.buffer_index.get() + 1) % BUFFER_COUNT);
156
157 // SAFETY:
158 // Safe because only a valid surface and buffer index is used.
159 unsafe {
160 dwl_surface_flip(self.surface(), self.buffer_index.get());
161 }
162 }
163
flip_to( &mut self, import_id: u32, _acquire_timepoint: Option<SemaphoreTimepoint>, _release_timepoint: Option<SemaphoreTimepoint>, _extra_info: Option<FlipToExtraInfo>, ) -> anyhow::Result<Waitable>164 fn flip_to(
165 &mut self,
166 import_id: u32,
167 _acquire_timepoint: Option<SemaphoreTimepoint>,
168 _release_timepoint: Option<SemaphoreTimepoint>,
169 _extra_info: Option<FlipToExtraInfo>,
170 ) -> anyhow::Result<Waitable> {
171 // SAFETY:
172 // Safe because only a valid surface and import_id is used.
173 unsafe { dwl_surface_flip_to(self.surface(), import_id) };
174 Ok(Waitable::signaled())
175 }
176
commit(&mut self) -> GpuDisplayResult<()>177 fn commit(&mut self) -> GpuDisplayResult<()> {
178 // SAFETY:
179 // Safe because only a valid surface is used.
180 unsafe {
181 dwl_surface_commit(self.surface());
182 }
183
184 Ok(())
185 }
186
set_position(&mut self, x: u32, y: u32)187 fn set_position(&mut self, x: u32, y: u32) {
188 // SAFETY:
189 // Safe because only a valid surface is used.
190 unsafe {
191 dwl_surface_set_position(self.surface(), x, y);
192 }
193 }
194 }
195
196 /// A connection to the compositor and associated collection of state.
197 ///
198 /// The user of `GpuDisplay` can use `AsRawDescriptor` to poll on the compositor connection's file
199 /// descriptor. When the connection is readable, `dispatch_events` can be called to process it.
200
201 pub struct DisplayWl {
202 dmabufs: HashMap<u32, DwlDmabuf>,
203 ctx: DwlContext,
204 current_event: Option<dwl_event>,
205 mt_tracking_id: u16,
206 }
207
208 /// Error logging callback used by wrapped C implementation.
209 ///
210 /// # Safety
211 ///
212 /// safe because it must be passed a valid pointer to null-terminated c-string.
213 #[allow(clippy::unnecessary_cast)]
error_callback(message: *const ::std::os::raw::c_char)214 unsafe extern "C" fn error_callback(message: *const ::std::os::raw::c_char) {
215 catch_unwind(|| {
216 assert!(!message.is_null());
217 // SAFETY: trivially safe
218 let msg = unsafe {
219 std::str::from_utf8(std::slice::from_raw_parts(
220 message as *const u8,
221 libc::strlen(message),
222 ))
223 .unwrap()
224 };
225 error!("{}", msg);
226 })
227 .unwrap_or_else(|_| abort())
228 }
229
230 impl DisplayWl {
231 /// Opens a fresh connection to the compositor.
new(wayland_path: Option<&Path>) -> GpuDisplayResult<DisplayWl>232 pub fn new(wayland_path: Option<&Path>) -> GpuDisplayResult<DisplayWl> {
233 // SAFETY:
234 // The dwl_context_new call should always be safe to call, and we check its result.
235 let ctx = DwlContext(unsafe { dwl_context_new(Some(error_callback)) });
236 if ctx.0.is_null() {
237 return Err(GpuDisplayError::Allocate);
238 }
239
240 // The dwl_context_setup call is always safe to call given that the supplied context is
241 // valid. and we check its result.
242 let cstr_path = match wayland_path.map(|p| p.as_os_str().to_str()) {
243 Some(Some(s)) => match CString::new(s) {
244 Ok(cstr) => Some(cstr),
245 Err(_) => return Err(GpuDisplayError::InvalidPath),
246 },
247 Some(None) => return Err(GpuDisplayError::InvalidPath),
248 None => None,
249 };
250 // This grabs a pointer to cstr_path without moving the CString into the .map closure
251 // accidentally, which triggeres a really hard to catch use after free in
252 // dwl_context_setup.
253 let cstr_path_ptr = cstr_path
254 .as_ref()
255 .map(|s: &CString| CStr::as_ptr(s))
256 .unwrap_or(null());
257 // SAFETY: args are valid and the return value is checked.
258 let setup_success = unsafe { dwl_context_setup(ctx.0, cstr_path_ptr) };
259 if !setup_success {
260 return Err(GpuDisplayError::Connect);
261 }
262
263 Ok(DisplayWl {
264 dmabufs: HashMap::new(),
265 ctx,
266 current_event: None,
267 mt_tracking_id: 0u16,
268 })
269 }
270
ctx(&self) -> *mut dwl_context271 fn ctx(&self) -> *mut dwl_context {
272 self.ctx.0
273 }
274
pop_event(&self) -> dwl_event275 fn pop_event(&self) -> dwl_event {
276 // SAFETY:
277 // Safe because dwl_next_events from a context's circular buffer.
278 unsafe {
279 let mut ev = zeroed();
280 dwl_context_next_event(self.ctx(), &mut ev);
281 ev
282 }
283 }
284
next_tracking_id(&mut self) -> i32285 fn next_tracking_id(&mut self) -> i32 {
286 let cur_id: i32 = self.mt_tracking_id as i32;
287 self.mt_tracking_id = self.mt_tracking_id.wrapping_add(1);
288 cur_id
289 }
290
current_tracking_id(&self) -> i32291 fn current_tracking_id(&self) -> i32 {
292 self.mt_tracking_id as i32
293 }
294 }
295
296 impl DisplayT for DisplayWl {
pending_events(&self) -> bool297 fn pending_events(&self) -> bool {
298 // SAFETY:
299 // Safe because the function just queries the values of two variables in a context.
300 unsafe { dwl_context_pending_events(self.ctx()) }
301 }
302
next_event(&mut self) -> GpuDisplayResult<u64>303 fn next_event(&mut self) -> GpuDisplayResult<u64> {
304 let ev = self.pop_event();
305 let descriptor = ev.surface_descriptor as u64;
306 self.current_event = Some(ev);
307 Ok(descriptor)
308 }
309
handle_next_event( &mut self, _surface: &mut Box<dyn GpuDisplaySurface>, ) -> Option<GpuDisplayEvents>310 fn handle_next_event(
311 &mut self,
312 _surface: &mut Box<dyn GpuDisplaySurface>,
313 ) -> Option<GpuDisplayEvents> {
314 // Should not panic since the common layer only calls this when an event occurs.
315 let event = self.current_event.take().unwrap();
316
317 match event.event_type {
318 DWL_EVENT_TYPE_KEYBOARD_ENTER => None,
319 DWL_EVENT_TYPE_KEYBOARD_LEAVE => None,
320 DWL_EVENT_TYPE_KEYBOARD_KEY => {
321 let linux_keycode = event.params[0] as u16;
322 let pressed = event.params[1] == DWL_KEYBOARD_KEY_STATE_PRESSED;
323 let events = vec![virtio_input_event::key(linux_keycode, pressed, false)];
324 Some(GpuDisplayEvents {
325 events,
326 device_type: EventDeviceKind::Keyboard,
327 })
328 }
329 // TODO(tutankhamen): slot is always 0, because all the input
330 // events come from mouse device, i.e. only one touch is possible at a time.
331 // Full MT protocol has to be implemented and properly wired later.
332 DWL_EVENT_TYPE_TOUCH_DOWN | DWL_EVENT_TYPE_TOUCH_MOTION => {
333 let tracking_id = if event.event_type == DWL_EVENT_TYPE_TOUCH_DOWN {
334 self.next_tracking_id()
335 } else {
336 self.current_tracking_id()
337 };
338
339 let events = vec![
340 virtio_input_event::multitouch_slot(0),
341 virtio_input_event::multitouch_tracking_id(tracking_id),
342 virtio_input_event::multitouch_absolute_x(max(0, event.params[0])),
343 virtio_input_event::multitouch_absolute_y(max(0, event.params[1])),
344 ];
345 Some(GpuDisplayEvents {
346 events,
347 device_type: EventDeviceKind::Touchscreen,
348 })
349 }
350 DWL_EVENT_TYPE_TOUCH_UP => {
351 let events = vec![
352 virtio_input_event::multitouch_slot(0),
353 virtio_input_event::multitouch_tracking_id(-1),
354 ];
355 Some(GpuDisplayEvents {
356 events,
357 device_type: EventDeviceKind::Touchscreen,
358 })
359 }
360 _ => {
361 error!("unknown event type {}", event.event_type);
362 None
363 }
364 }
365 }
366
flush(&self)367 fn flush(&self) {
368 // SAFETY:
369 // Safe given that the context pointer is valid.
370 unsafe {
371 dwl_context_dispatch(self.ctx());
372 }
373 }
374
create_surface( &mut self, parent_surface_id: Option<u32>, surface_id: u32, scanout_id: Option<u32>, display_params: &DisplayParameters, surf_type: SurfaceType, ) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>>375 fn create_surface(
376 &mut self,
377 parent_surface_id: Option<u32>,
378 surface_id: u32,
379 scanout_id: Option<u32>,
380 display_params: &DisplayParameters,
381 surf_type: SurfaceType,
382 ) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
383 let parent_id = parent_surface_id.unwrap_or(0);
384
385 let (width, height) = display_params.get_virtual_display_size();
386 let row_size = width * BYTES_PER_PIXEL;
387 let fb_size = row_size * height;
388 let buffer_size = round_up_to_page_size(fb_size as usize * BUFFER_COUNT);
389 let buffer_shm = SharedMemory::new("GpuDisplaySurface", buffer_size as u64)?;
390 let buffer_mem = MemoryMappingBuilder::new(buffer_size)
391 .from_shared_memory(&buffer_shm)
392 .build()
393 .unwrap();
394
395 let dwl_surf_flags = match surf_type {
396 SurfaceType::Cursor => DWL_SURFACE_FLAG_HAS_ALPHA,
397 SurfaceType::Scanout => DWL_SURFACE_FLAG_RECEIVE_INPUT,
398 };
399 // SAFETY:
400 // Safe because only a valid context, parent ID (if not non-zero), and buffer FD are used.
401 // The returned surface is checked for validity before being filed away.
402 let surface = DwlSurface(unsafe {
403 dwl_context_surface_new(
404 self.ctx(),
405 parent_id,
406 surface_id,
407 buffer_shm.as_raw_descriptor(),
408 buffer_size,
409 fb_size as usize,
410 width,
411 height,
412 row_size,
413 dwl_surf_flags,
414 )
415 });
416
417 if surface.0.is_null() {
418 return Err(GpuDisplayError::CreateSurface);
419 }
420
421 if let Some(scanout_id) = scanout_id {
422 // SAFETY:
423 // Safe because only a valid surface is used.
424 unsafe {
425 dwl_surface_set_scanout_id(surface.0, scanout_id);
426 }
427 }
428
429 Ok(Box::new(WaylandSurface {
430 surface,
431 row_size,
432 buffer_size: fb_size as usize,
433 buffer_index: Cell::new(0),
434 buffer_mem,
435 }))
436 }
437
import_resource( &mut self, import_id: u32, _surface_id: u32, external_display_resource: DisplayExternalResourceImport, ) -> anyhow::Result<()>438 fn import_resource(
439 &mut self,
440 import_id: u32,
441 _surface_id: u32,
442 external_display_resource: DisplayExternalResourceImport,
443 ) -> anyhow::Result<()> {
444 // This let pattern is always true if the vulkan_display feature is disabled.
445 #[allow(irrefutable_let_patterns)]
446 if let DisplayExternalResourceImport::Dmabuf {
447 descriptor,
448 offset,
449 stride,
450 modifiers,
451 width,
452 height,
453 fourcc,
454 } = external_display_resource
455 {
456 // SAFETY:
457 // Safe given that the context pointer is valid. Any other invalid parameters would be
458 // rejected by dwl_context_dmabuf_new safely. We check that the resulting dmabuf is
459 // valid before filing it away.
460 let dmabuf = DwlDmabuf(unsafe {
461 dwl_context_dmabuf_new(
462 self.ctx(),
463 import_id,
464 descriptor.as_raw_descriptor(),
465 offset,
466 stride,
467 modifiers,
468 width,
469 height,
470 fourcc,
471 )
472 });
473
474 if dmabuf.0.is_null() {
475 bail!("dmabuf import failed.");
476 }
477
478 self.dmabufs.insert(import_id, dmabuf);
479
480 Ok(())
481 } else {
482 bail!("gpu_display_wl only supports Dmabuf imports");
483 }
484 }
485
release_import(&mut self, _surface_id: u32, import_id: u32)486 fn release_import(&mut self, _surface_id: u32, import_id: u32) {
487 self.dmabufs.remove(&import_id);
488 }
489 }
490
491 impl SysDisplayT for DisplayWl {}
492
493 impl AsRawDescriptor for DisplayWl {
as_raw_descriptor(&self) -> RawDescriptor494 fn as_raw_descriptor(&self) -> RawDescriptor {
495 // Safe given that the context pointer is valid.
496 self.ctx.as_raw_descriptor()
497 }
498 }
499