• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Crate for displaying simple surfaces and GPU buffers over wayland.
6 
7 extern crate base;
8 
9 #[path = "dwl.rs"]
10 #[allow(dead_code)]
11 mod dwl;
12 
13 use std::cell::Cell;
14 use std::cmp::max;
15 use std::collections::HashMap;
16 use std::ffi::CStr;
17 use std::ffi::CString;
18 use std::mem::zeroed;
19 use std::panic::catch_unwind;
20 use std::path::Path;
21 use std::process::abort;
22 use std::ptr::null;
23 
24 use anyhow::bail;
25 use base::error;
26 use base::round_up_to_page_size;
27 use base::AsRawDescriptor;
28 use base::MemoryMapping;
29 use base::MemoryMappingBuilder;
30 use base::RawDescriptor;
31 use base::SharedMemory;
32 use base::VolatileMemory;
33 use dwl::*;
34 use linux_input_sys::virtio_input_event;
35 use sync::Waitable;
36 use vm_control::gpu::DisplayParameters;
37 
38 use crate::DisplayExternalResourceImport;
39 use crate::DisplayT;
40 use crate::EventDeviceKind;
41 use crate::FlipToExtraInfo;
42 use crate::GpuDisplayError;
43 use crate::GpuDisplayEvents;
44 use crate::GpuDisplayFramebuffer;
45 use crate::GpuDisplayResult;
46 use crate::GpuDisplaySurface;
47 use crate::SemaphoreTimepoint;
48 use crate::SurfaceType;
49 use crate::SysDisplayT;
50 
51 const BUFFER_COUNT: usize = 3;
52 const BYTES_PER_PIXEL: u32 = 4;
53 
54 struct DwlContext(*mut dwl_context);
55 impl Drop for DwlContext {
drop(&mut self)56     fn drop(&mut self) {
57         if !self.0.is_null() {
58             // SAFETY:
59             // Safe given that we checked the pointer for non-null and it should always be of the
60             // correct type.
61             unsafe {
62                 dwl_context_destroy(&mut self.0);
63             }
64         }
65     }
66 }
67 
68 impl AsRawDescriptor for DwlContext {
as_raw_descriptor(&self) -> RawDescriptor69     fn as_raw_descriptor(&self) -> RawDescriptor {
70         // SAFETY:
71         // Safe given that the context pointer is valid.
72         unsafe { dwl_context_fd(self.0) }
73     }
74 }
75 
76 struct DwlDmabuf(*mut dwl_dmabuf);
77 
78 impl Drop for DwlDmabuf {
drop(&mut self)79     fn drop(&mut self) {
80         if !self.0.is_null() {
81             // SAFETY:
82             // Safe given that we checked the pointer for non-null and it should always be of the
83             // correct type.
84             unsafe {
85                 dwl_dmabuf_destroy(&mut self.0);
86             }
87         }
88     }
89 }
90 
91 struct DwlSurface(*mut dwl_surface);
92 impl Drop for DwlSurface {
drop(&mut self)93     fn drop(&mut self) {
94         if !self.0.is_null() {
95             // SAFETY:
96             // Safe given that we checked the pointer for non-null and it should always be of the
97             // correct type.
98             unsafe {
99                 dwl_surface_destroy(&mut self.0);
100             }
101         }
102     }
103 }
104 
105 struct WaylandSurface {
106     surface: DwlSurface,
107     row_size: u32,
108     buffer_size: usize,
109     buffer_index: Cell<usize>,
110     buffer_mem: MemoryMapping,
111 }
112 
113 impl WaylandSurface {
surface(&self) -> *mut dwl_surface114     fn surface(&self) -> *mut dwl_surface {
115         self.surface.0
116     }
117 }
118 
119 impl GpuDisplaySurface for WaylandSurface {
surface_descriptor(&self) -> u64120     fn surface_descriptor(&self) -> u64 {
121         // SAFETY:
122         // Safe if the surface is valid.
123         let pointer = unsafe { dwl_surface_descriptor(self.surface.0) };
124         pointer as u64
125     }
126 
framebuffer(&mut self) -> Option<GpuDisplayFramebuffer>127     fn framebuffer(&mut self) -> Option<GpuDisplayFramebuffer> {
128         let buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
129         let framebuffer = self
130             .buffer_mem
131             .get_slice(buffer_index * self.buffer_size, self.buffer_size)
132             .ok()?;
133 
134         Some(GpuDisplayFramebuffer::new(
135             framebuffer,
136             self.row_size,
137             BYTES_PER_PIXEL,
138         ))
139     }
140 
next_buffer_in_use(&self) -> bool141     fn next_buffer_in_use(&self) -> bool {
142         let next_buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
143         // SAFETY:
144         // Safe because only a valid surface and buffer index is used.
145         unsafe { dwl_surface_buffer_in_use(self.surface(), next_buffer_index) }
146     }
147 
close_requested(&self) -> bool148     fn close_requested(&self) -> bool {
149         // SAFETY:
150         // Safe because only a valid surface is used.
151         unsafe { dwl_surface_close_requested(self.surface()) }
152     }
153 
flip(&mut self)154     fn flip(&mut self) {
155         self.buffer_index
156             .set((self.buffer_index.get() + 1) % BUFFER_COUNT);
157 
158         // SAFETY:
159         // Safe because only a valid surface and buffer index is used.
160         unsafe {
161             dwl_surface_flip(self.surface(), self.buffer_index.get());
162         }
163     }
164 
flip_to( &mut self, import_id: u32, _acquire_timepoint: Option<SemaphoreTimepoint>, _release_timepoint: Option<SemaphoreTimepoint>, _extra_info: Option<FlipToExtraInfo>, ) -> anyhow::Result<Waitable>165     fn flip_to(
166         &mut self,
167         import_id: u32,
168         _acquire_timepoint: Option<SemaphoreTimepoint>,
169         _release_timepoint: Option<SemaphoreTimepoint>,
170         _extra_info: Option<FlipToExtraInfo>,
171     ) -> anyhow::Result<Waitable> {
172         // SAFETY:
173         // Safe because only a valid surface and import_id is used.
174         unsafe { dwl_surface_flip_to(self.surface(), import_id) };
175         Ok(Waitable::signaled())
176     }
177 
commit(&mut self) -> GpuDisplayResult<()>178     fn commit(&mut self) -> GpuDisplayResult<()> {
179         // SAFETY:
180         // Safe because only a valid surface is used.
181         unsafe {
182             dwl_surface_commit(self.surface());
183         }
184 
185         Ok(())
186     }
187 
set_position(&mut self, x: u32, y: u32)188     fn set_position(&mut self, x: u32, y: u32) {
189         // SAFETY:
190         // Safe because only a valid surface is used.
191         unsafe {
192             dwl_surface_set_position(self.surface(), x, y);
193         }
194     }
195 }
196 
197 /// A connection to the compositor and associated collection of state.
198 ///
199 /// The user of `GpuDisplay` can use `AsRawDescriptor` to poll on the compositor connection's file
200 /// descriptor. When the connection is readable, `dispatch_events` can be called to process it.
201 pub struct DisplayWl {
202     dmabufs: HashMap<u32, DwlDmabuf>,
203     ctx: DwlContext,
204     current_event: Option<dwl_event>,
205     mt_tracking_id: u16,
206 }
207 
208 /// Error logging callback used by wrapped C implementation.
209 ///
210 /// # Safety
211 ///
212 /// safe because it must be passed a valid pointer to null-terminated c-string.
213 #[allow(clippy::unnecessary_cast)]
error_callback(message: *const ::std::os::raw::c_char)214 unsafe extern "C" fn error_callback(message: *const ::std::os::raw::c_char) {
215     catch_unwind(|| {
216         assert!(!message.is_null());
217         // SAFETY: trivially safe
218         let msg = unsafe {
219             std::str::from_utf8(std::slice::from_raw_parts(
220                 message as *const u8,
221                 libc::strlen(message),
222             ))
223             .unwrap()
224         };
225         error!("{}", msg);
226     })
227     .unwrap_or_else(|_| abort())
228 }
229 
230 impl DisplayWl {
231     /// Opens a fresh connection to the compositor.
new(wayland_path: Option<&Path>) -> GpuDisplayResult<DisplayWl>232     pub fn new(wayland_path: Option<&Path>) -> GpuDisplayResult<DisplayWl> {
233         // SAFETY:
234         // The dwl_context_new call should always be safe to call, and we check its result.
235         let ctx = DwlContext(unsafe { dwl_context_new(Some(error_callback)) });
236         if ctx.0.is_null() {
237             return Err(GpuDisplayError::Allocate);
238         }
239 
240         // The dwl_context_setup call is always safe to call given that the supplied context is
241         // valid. and we check its result.
242         let cstr_path = match wayland_path.map(|p| p.as_os_str().to_str()) {
243             Some(Some(s)) => match CString::new(s) {
244                 Ok(cstr) => Some(cstr),
245                 Err(_) => return Err(GpuDisplayError::InvalidPath),
246             },
247             Some(None) => return Err(GpuDisplayError::InvalidPath),
248             None => None,
249         };
250         // This grabs a pointer to cstr_path without moving the CString into the .map closure
251         // accidentally, which triggeres a really hard to catch use after free in
252         // dwl_context_setup.
253         let cstr_path_ptr = cstr_path
254             .as_ref()
255             .map(|s: &CString| CStr::as_ptr(s))
256             .unwrap_or(null());
257         // SAFETY: args are valid and the return value is checked.
258         let setup_success = unsafe { dwl_context_setup(ctx.0, cstr_path_ptr) };
259         if !setup_success {
260             return Err(GpuDisplayError::Connect);
261         }
262 
263         Ok(DisplayWl {
264             dmabufs: HashMap::new(),
265             ctx,
266             current_event: None,
267             mt_tracking_id: 0u16,
268         })
269     }
270 
ctx(&self) -> *mut dwl_context271     fn ctx(&self) -> *mut dwl_context {
272         self.ctx.0
273     }
274 
pop_event(&self) -> dwl_event275     fn pop_event(&self) -> dwl_event {
276         // SAFETY:
277         // Safe because dwl_next_events from a context's circular buffer.
278         unsafe {
279             let mut ev = zeroed();
280             dwl_context_next_event(self.ctx(), &mut ev);
281             ev
282         }
283     }
284 
next_tracking_id(&mut self) -> i32285     fn next_tracking_id(&mut self) -> i32 {
286         let cur_id: i32 = self.mt_tracking_id as i32;
287         self.mt_tracking_id = self.mt_tracking_id.wrapping_add(1);
288         cur_id
289     }
290 
current_tracking_id(&self) -> i32291     fn current_tracking_id(&self) -> i32 {
292         self.mt_tracking_id as i32
293     }
294 }
295 
296 impl DisplayT for DisplayWl {
pending_events(&self) -> bool297     fn pending_events(&self) -> bool {
298         // SAFETY:
299         // Safe because the function just queries the values of two variables in a context.
300         unsafe { dwl_context_pending_events(self.ctx()) }
301     }
302 
next_event(&mut self) -> GpuDisplayResult<u64>303     fn next_event(&mut self) -> GpuDisplayResult<u64> {
304         let ev = self.pop_event();
305         let descriptor = ev.surface_descriptor as u64;
306         self.current_event = Some(ev);
307         Ok(descriptor)
308     }
309 
handle_next_event( &mut self, _surface: &mut Box<dyn GpuDisplaySurface>, ) -> Option<GpuDisplayEvents>310     fn handle_next_event(
311         &mut self,
312         _surface: &mut Box<dyn GpuDisplaySurface>,
313     ) -> Option<GpuDisplayEvents> {
314         // Should not panic since the common layer only calls this when an event occurs.
315         let event = self.current_event.take().unwrap();
316 
317         match event.event_type {
318             DWL_EVENT_TYPE_KEYBOARD_ENTER => None,
319             DWL_EVENT_TYPE_KEYBOARD_LEAVE => None,
320             DWL_EVENT_TYPE_KEYBOARD_KEY => {
321                 let linux_keycode = event.params[0] as u16;
322                 let pressed = event.params[1] == DWL_KEYBOARD_KEY_STATE_PRESSED;
323                 let events = vec![virtio_input_event::key(linux_keycode, pressed, false)];
324                 Some(GpuDisplayEvents {
325                     events,
326                     device_type: EventDeviceKind::Keyboard,
327                 })
328             }
329             // TODO(tutankhamen): slot is always 0, because all the input
330             // events come from mouse device, i.e. only one touch is possible at a time.
331             // Full MT protocol has to be implemented and properly wired later.
332             DWL_EVENT_TYPE_TOUCH_DOWN | DWL_EVENT_TYPE_TOUCH_MOTION => {
333                 let tracking_id = if event.event_type == DWL_EVENT_TYPE_TOUCH_DOWN {
334                     self.next_tracking_id()
335                 } else {
336                     self.current_tracking_id()
337                 };
338 
339                 let events = vec![
340                     virtio_input_event::multitouch_slot(0),
341                     virtio_input_event::multitouch_tracking_id(tracking_id),
342                     virtio_input_event::multitouch_absolute_x(max(0, event.params[0])),
343                     virtio_input_event::multitouch_absolute_y(max(0, event.params[1])),
344                     virtio_input_event::touch(true),
345                 ];
346                 Some(GpuDisplayEvents {
347                     events,
348                     device_type: EventDeviceKind::Touchscreen,
349                 })
350             }
351             DWL_EVENT_TYPE_TOUCH_UP => {
352                 let events = vec![
353                     virtio_input_event::multitouch_slot(0),
354                     virtio_input_event::multitouch_tracking_id(-1),
355                     virtio_input_event::touch(false),
356                 ];
357                 Some(GpuDisplayEvents {
358                     events,
359                     device_type: EventDeviceKind::Touchscreen,
360                 })
361             }
362             _ => {
363                 error!("unknown event type {}", event.event_type);
364                 None
365             }
366         }
367     }
368 
flush(&self)369     fn flush(&self) {
370         // SAFETY:
371         // Safe given that the context pointer is valid.
372         unsafe {
373             dwl_context_dispatch(self.ctx());
374         }
375     }
376 
create_surface( &mut self, parent_surface_id: Option<u32>, surface_id: u32, scanout_id: Option<u32>, display_params: &DisplayParameters, surf_type: SurfaceType, ) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>>377     fn create_surface(
378         &mut self,
379         parent_surface_id: Option<u32>,
380         surface_id: u32,
381         scanout_id: Option<u32>,
382         display_params: &DisplayParameters,
383         surf_type: SurfaceType,
384     ) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
385         let parent_id = parent_surface_id.unwrap_or(0);
386 
387         let (width, height) = display_params.get_virtual_display_size();
388         let row_size = width * BYTES_PER_PIXEL;
389         let fb_size = row_size * height;
390         let buffer_size = round_up_to_page_size(fb_size as usize * BUFFER_COUNT);
391         let buffer_shm = SharedMemory::new("GpuDisplaySurface", buffer_size as u64)?;
392         let buffer_mem = MemoryMappingBuilder::new(buffer_size)
393             .from_shared_memory(&buffer_shm)
394             .build()
395             .unwrap();
396 
397         let dwl_surf_flags = match surf_type {
398             SurfaceType::Cursor => DWL_SURFACE_FLAG_HAS_ALPHA,
399             SurfaceType::Scanout => DWL_SURFACE_FLAG_RECEIVE_INPUT,
400         };
401         // SAFETY:
402         // Safe because only a valid context, parent ID (if not non-zero), and buffer FD are used.
403         // The returned surface is checked for validity before being filed away.
404         let surface = DwlSurface(unsafe {
405             dwl_context_surface_new(
406                 self.ctx(),
407                 parent_id,
408                 surface_id,
409                 buffer_shm.as_raw_descriptor(),
410                 buffer_size,
411                 fb_size as usize,
412                 width,
413                 height,
414                 row_size,
415                 dwl_surf_flags,
416             )
417         });
418 
419         if surface.0.is_null() {
420             return Err(GpuDisplayError::CreateSurface);
421         }
422 
423         if let Some(scanout_id) = scanout_id {
424             // SAFETY:
425             // Safe because only a valid surface is used.
426             unsafe {
427                 dwl_surface_set_scanout_id(surface.0, scanout_id);
428             }
429         }
430 
431         Ok(Box::new(WaylandSurface {
432             surface,
433             row_size,
434             buffer_size: fb_size as usize,
435             buffer_index: Cell::new(0),
436             buffer_mem,
437         }))
438     }
439 
import_resource( &mut self, import_id: u32, _surface_id: u32, external_display_resource: DisplayExternalResourceImport, ) -> anyhow::Result<()>440     fn import_resource(
441         &mut self,
442         import_id: u32,
443         _surface_id: u32,
444         external_display_resource: DisplayExternalResourceImport,
445     ) -> anyhow::Result<()> {
446         // This let pattern is always true if the vulkan_display feature is disabled.
447         #[allow(irrefutable_let_patterns)]
448         if let DisplayExternalResourceImport::Dmabuf {
449             descriptor,
450             offset,
451             stride,
452             modifiers,
453             width,
454             height,
455             fourcc,
456         } = external_display_resource
457         {
458             // SAFETY:
459             // Safe given that the context pointer is valid. Any other invalid parameters would be
460             // rejected by dwl_context_dmabuf_new safely. We check that the resulting dmabuf is
461             // valid before filing it away.
462             let dmabuf = DwlDmabuf(unsafe {
463                 dwl_context_dmabuf_new(
464                     self.ctx(),
465                     import_id,
466                     descriptor.as_raw_descriptor(),
467                     offset,
468                     stride,
469                     modifiers,
470                     width,
471                     height,
472                     fourcc,
473                 )
474             });
475 
476             if dmabuf.0.is_null() {
477                 bail!("dmabuf import failed.");
478             }
479 
480             self.dmabufs.insert(import_id, dmabuf);
481 
482             Ok(())
483         } else {
484             bail!("gpu_display_wl only supports Dmabuf imports");
485         }
486     }
487 
release_import(&mut self, _surface_id: u32, import_id: u32)488     fn release_import(&mut self, _surface_id: u32, import_id: u32) {
489         self.dmabufs.remove(&import_id);
490     }
491 }
492 
493 impl SysDisplayT for DisplayWl {}
494 
495 impl AsRawDescriptor for DisplayWl {
as_raw_descriptor(&self) -> RawDescriptor496     fn as_raw_descriptor(&self) -> RawDescriptor {
497         // Safe given that the context pointer is valid.
498         self.ctx.as_raw_descriptor()
499     }
500 }
501