• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Crate for displaying simple surfaces and GPU buffers over wayland.
6 
7 extern crate base;
8 extern crate data_model;
9 
10 #[path = "dwl.rs"]
11 mod dwl;
12 
13 use dwl::*;
14 
15 use crate::{
16     DisplayT, EventDeviceKind, GpuDisplayError, GpuDisplayEvents, GpuDisplayFramebuffer,
17     GpuDisplayImport, GpuDisplayResult, GpuDisplaySurface, SurfaceType,
18 };
19 
20 use linux_input_sys::virtio_input_event;
21 use std::cell::Cell;
22 use std::cmp::max;
23 use std::ffi::{CStr, CString};
24 use std::mem::zeroed;
25 use std::path::Path;
26 use std::ptr::null;
27 
28 use base::{
29     error, round_up_to_page_size, AsRawDescriptor, MemoryMapping, MemoryMappingBuilder,
30     RawDescriptor, SharedMemory,
31 };
32 use data_model::VolatileMemory;
33 
34 const BUFFER_COUNT: usize = 3;
35 const BYTES_PER_PIXEL: u32 = 4;
36 
37 struct DwlContext(*mut dwl_context);
38 impl Drop for DwlContext {
drop(&mut self)39     fn drop(&mut self) {
40         if !self.0.is_null() {
41             // Safe given that we checked the pointer for non-null and it should always be of the
42             // correct type.
43             unsafe {
44                 dwl_context_destroy(&mut self.0);
45             }
46         }
47     }
48 }
49 
50 impl AsRawDescriptor for DwlContext {
as_raw_descriptor(&self) -> RawDescriptor51     fn as_raw_descriptor(&self) -> RawDescriptor {
52         // Safe given that the context pointer is valid.
53         unsafe { dwl_context_fd(self.0) }
54     }
55 }
56 
57 struct DwlDmabuf(*mut dwl_dmabuf);
58 
59 impl GpuDisplayImport for DwlDmabuf {}
60 
61 impl Drop for DwlDmabuf {
drop(&mut self)62     fn drop(&mut self) {
63         if !self.0.is_null() {
64             // Safe given that we checked the pointer for non-null and it should always be of the
65             // correct type.
66             unsafe {
67                 dwl_dmabuf_destroy(&mut self.0);
68             }
69         }
70     }
71 }
72 
73 struct DwlSurface(*mut dwl_surface);
74 impl Drop for DwlSurface {
drop(&mut self)75     fn drop(&mut self) {
76         if !self.0.is_null() {
77             // Safe given that we checked the pointer for non-null and it should always be of the
78             // correct type.
79             unsafe {
80                 dwl_surface_destroy(&mut self.0);
81             }
82         }
83     }
84 }
85 
86 struct WaylandSurface {
87     surface: DwlSurface,
88     row_size: u32,
89     buffer_size: usize,
90     buffer_index: Cell<usize>,
91     buffer_mem: MemoryMapping,
92 }
93 
94 impl WaylandSurface {
surface(&self) -> *mut dwl_surface95     fn surface(&self) -> *mut dwl_surface {
96         self.surface.0
97     }
98 }
99 
100 impl GpuDisplaySurface for WaylandSurface {
surface_descriptor(&self) -> u64101     fn surface_descriptor(&self) -> u64 {
102         // Safe if the surface is valid.
103         let pointer = unsafe { dwl_surface_descriptor(self.surface.0) };
104         pointer as u64
105     }
106 
framebuffer(&mut self) -> Option<GpuDisplayFramebuffer>107     fn framebuffer(&mut self) -> Option<GpuDisplayFramebuffer> {
108         let buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
109         let framebuffer = self
110             .buffer_mem
111             .get_slice(buffer_index * self.buffer_size, self.buffer_size)
112             .ok()?;
113 
114         Some(GpuDisplayFramebuffer::new(
115             framebuffer,
116             self.row_size,
117             BYTES_PER_PIXEL,
118         ))
119     }
120 
next_buffer_in_use(&self) -> bool121     fn next_buffer_in_use(&self) -> bool {
122         let next_buffer_index = (self.buffer_index.get() + 1) % BUFFER_COUNT;
123         // Safe because only a valid surface and buffer index is used.
124         unsafe { dwl_surface_buffer_in_use(self.surface(), next_buffer_index) }
125     }
126 
close_requested(&self) -> bool127     fn close_requested(&self) -> bool {
128         // Safe because only a valid surface is used.
129         unsafe { dwl_surface_close_requested(self.surface()) }
130     }
131 
flip(&mut self)132     fn flip(&mut self) {
133         self.buffer_index
134             .set((self.buffer_index.get() + 1) % BUFFER_COUNT);
135 
136         // Safe because only a valid surface and buffer index is used.
137         unsafe {
138             dwl_surface_flip(self.surface(), self.buffer_index.get());
139         }
140     }
141 
flip_to(&mut self, import_id: u32)142     fn flip_to(&mut self, import_id: u32) {
143         // Safe because only a valid surface and import_id is used.
144         unsafe { dwl_surface_flip_to(self.surface(), import_id) }
145     }
146 
commit(&mut self) -> GpuDisplayResult<()>147     fn commit(&mut self) -> GpuDisplayResult<()> {
148         // Safe because only a valid surface is used.
149         unsafe {
150             dwl_surface_commit(self.surface());
151         }
152 
153         Ok(())
154     }
155 
set_position(&mut self, x: u32, y: u32)156     fn set_position(&mut self, x: u32, y: u32) {
157         // Safe because only a valid surface is used.
158         unsafe {
159             dwl_surface_set_position(self.surface(), x, y);
160         }
161     }
162 
set_scanout_id(&mut self, scanout_id: u32)163     fn set_scanout_id(&mut self, scanout_id: u32) {
164         // Safe because only a valid surface is used.
165         unsafe {
166             dwl_surface_set_scanout_id(self.surface(), scanout_id);
167         }
168     }
169 }
170 
171 /// A connection to the compositor and associated collection of state.
172 ///
173 /// The user of `GpuDisplay` can use `AsRawDescriptor` to poll on the compositor connection's file
174 /// descriptor. When the connection is readable, `dispatch_events` can be called to process it.
175 
176 pub struct DisplayWl {
177     ctx: DwlContext,
178     current_event: Option<dwl_event>,
179     mt_tracking_id: u16,
180 }
181 
182 impl DisplayWl {
183     /// Opens a fresh connection to the compositor.
new(wayland_path: Option<&Path>) -> GpuDisplayResult<DisplayWl>184     pub fn new(wayland_path: Option<&Path>) -> GpuDisplayResult<DisplayWl> {
185         // The dwl_context_new call should always be safe to call, and we check its result.
186         let ctx = DwlContext(unsafe { dwl_context_new() });
187         if ctx.0.is_null() {
188             return Err(GpuDisplayError::Allocate);
189         }
190 
191         // The dwl_context_setup call is always safe to call given that the supplied context is
192         // valid. and we check its result.
193         let cstr_path = match wayland_path.map(|p| p.as_os_str().to_str()) {
194             Some(Some(s)) => match CString::new(s) {
195                 Ok(cstr) => Some(cstr),
196                 Err(_) => return Err(GpuDisplayError::InvalidPath),
197             },
198             Some(None) => return Err(GpuDisplayError::InvalidPath),
199             None => None,
200         };
201         // This grabs a pointer to cstr_path without moving the CString into the .map closure
202         // accidentally, which triggeres a really hard to catch use after free in
203         // dwl_context_setup.
204         let cstr_path_ptr = cstr_path
205             .as_ref()
206             .map(|s: &CString| CStr::as_ptr(s))
207             .unwrap_or(null());
208         let setup_success = unsafe { dwl_context_setup(ctx.0, cstr_path_ptr) };
209         if !setup_success {
210             return Err(GpuDisplayError::Connect);
211         }
212 
213         Ok(DisplayWl {
214             ctx,
215             current_event: None,
216             mt_tracking_id: 0u16,
217         })
218     }
219 
ctx(&self) -> *mut dwl_context220     fn ctx(&self) -> *mut dwl_context {
221         self.ctx.0
222     }
223 
pop_event(&self) -> dwl_event224     fn pop_event(&self) -> dwl_event {
225         // Safe because dwl_next_events from a context's circular buffer.
226         unsafe {
227             let mut ev = zeroed();
228             dwl_context_next_event(self.ctx(), &mut ev);
229             ev
230         }
231     }
232 
next_tracking_id(&mut self) -> i32233     fn next_tracking_id(&mut self) -> i32 {
234         let cur_id: i32 = self.mt_tracking_id as i32;
235         self.mt_tracking_id = self.mt_tracking_id.wrapping_add(1);
236         cur_id
237     }
238 
current_tracking_id(&self) -> i32239     fn current_tracking_id(&self) -> i32 {
240         self.mt_tracking_id as i32
241     }
242 }
243 
244 impl DisplayT for DisplayWl {
pending_events(&self) -> bool245     fn pending_events(&self) -> bool {
246         // Safe because the function just queries the values of two variables in a context.
247         unsafe { dwl_context_pending_events(self.ctx()) }
248     }
249 
next_event(&mut self) -> GpuDisplayResult<u64>250     fn next_event(&mut self) -> GpuDisplayResult<u64> {
251         let ev = self.pop_event();
252         let descriptor = ev.surface_descriptor as u64;
253         self.current_event = Some(ev);
254         Ok(descriptor)
255     }
256 
handle_next_event( &mut self, _surface: &mut Box<dyn GpuDisplaySurface>, ) -> Option<GpuDisplayEvents>257     fn handle_next_event(
258         &mut self,
259         _surface: &mut Box<dyn GpuDisplaySurface>,
260     ) -> Option<GpuDisplayEvents> {
261         // Should not panic since the common layer only calls this when an event occurs.
262         let event = self.current_event.take().unwrap();
263 
264         match event.event_type {
265             DWL_EVENT_TYPE_KEYBOARD_ENTER => None,
266             DWL_EVENT_TYPE_KEYBOARD_LEAVE => None,
267             DWL_EVENT_TYPE_KEYBOARD_KEY => {
268                 let linux_keycode = event.params[0] as u16;
269                 let pressed = event.params[1] == DWL_KEYBOARD_KEY_STATE_PRESSED;
270                 let events = vec![virtio_input_event::key(linux_keycode, pressed)];
271                 Some(GpuDisplayEvents {
272                     events,
273                     device_type: EventDeviceKind::Keyboard,
274                 })
275             }
276             // TODO(tutankhamen): slot is always 0, because all the input
277             // events come from mouse device, i.e. only one touch is possible at a time.
278             // Full MT protocol has to be implemented and properly wired later.
279             DWL_EVENT_TYPE_TOUCH_DOWN | DWL_EVENT_TYPE_TOUCH_MOTION => {
280                 let tracking_id = if event.event_type == DWL_EVENT_TYPE_TOUCH_DOWN {
281                     self.next_tracking_id()
282                 } else {
283                     self.current_tracking_id()
284                 };
285 
286                 let events = vec![
287                     virtio_input_event::multitouch_slot(0),
288                     virtio_input_event::multitouch_tracking_id(tracking_id),
289                     virtio_input_event::multitouch_absolute_x(max(0, event.params[0])),
290                     virtio_input_event::multitouch_absolute_y(max(0, event.params[1])),
291                 ];
292                 Some(GpuDisplayEvents {
293                     events,
294                     device_type: EventDeviceKind::Touchscreen,
295                 })
296             }
297             DWL_EVENT_TYPE_TOUCH_UP => {
298                 let events = vec![
299                     virtio_input_event::multitouch_slot(0),
300                     virtio_input_event::multitouch_tracking_id(-1),
301                 ];
302                 Some(GpuDisplayEvents {
303                     events,
304                     device_type: EventDeviceKind::Touchscreen,
305                 })
306             }
307             _ => {
308                 error!("unknown event type {}", event.event_type);
309                 None
310             }
311         }
312     }
313 
flush(&self)314     fn flush(&self) {
315         // Safe given that the context pointer is valid.
316         unsafe {
317             dwl_context_dispatch(self.ctx());
318         }
319     }
320 
create_surface( &mut self, parent_surface_id: Option<u32>, surface_id: u32, width: u32, height: u32, surf_type: SurfaceType, ) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>>321     fn create_surface(
322         &mut self,
323         parent_surface_id: Option<u32>,
324         surface_id: u32,
325         width: u32,
326         height: u32,
327         surf_type: SurfaceType,
328     ) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>> {
329         let parent_id = parent_surface_id.unwrap_or(0);
330 
331         let row_size = width * BYTES_PER_PIXEL;
332         let fb_size = row_size * height;
333         let buffer_size = round_up_to_page_size(fb_size as usize * BUFFER_COUNT);
334         let buffer_shm = SharedMemory::named("GpuDisplaySurface", buffer_size as u64)?;
335         let buffer_mem = MemoryMappingBuilder::new(buffer_size)
336             .from_shared_memory(&buffer_shm)
337             .build()
338             .unwrap();
339 
340         let dwl_surf_flags = match surf_type {
341             SurfaceType::Cursor => DWL_SURFACE_FLAG_HAS_ALPHA,
342             SurfaceType::Scanout => DWL_SURFACE_FLAG_RECEIVE_INPUT,
343         };
344         // Safe because only a valid context, parent ID (if not non-zero), and buffer FD are used.
345         // The returned surface is checked for validity before being filed away.
346         let surface = DwlSurface(unsafe {
347             dwl_context_surface_new(
348                 self.ctx(),
349                 parent_id,
350                 surface_id,
351                 buffer_shm.as_raw_descriptor(),
352                 buffer_size,
353                 fb_size as usize,
354                 width,
355                 height,
356                 row_size,
357                 dwl_surf_flags,
358             )
359         });
360 
361         if surface.0.is_null() {
362             return Err(GpuDisplayError::CreateSurface);
363         }
364 
365         Ok(Box::new(WaylandSurface {
366             surface,
367             row_size,
368             buffer_size: fb_size as usize,
369             buffer_index: Cell::new(0),
370             buffer_mem,
371         }))
372     }
373 
import_memory( &mut self, import_id: u32, descriptor: &dyn AsRawDescriptor, offset: u32, stride: u32, modifiers: u64, width: u32, height: u32, fourcc: u32, ) -> GpuDisplayResult<Box<dyn GpuDisplayImport>>374     fn import_memory(
375         &mut self,
376         import_id: u32,
377         descriptor: &dyn AsRawDescriptor,
378         offset: u32,
379         stride: u32,
380         modifiers: u64,
381         width: u32,
382         height: u32,
383         fourcc: u32,
384     ) -> GpuDisplayResult<Box<dyn GpuDisplayImport>> {
385         // Safe given that the context pointer is valid. Any other invalid parameters would be
386         // rejected by dwl_context_dmabuf_new safely. We check that the resulting dmabuf is valid
387         // before filing it away.
388         let dmabuf = DwlDmabuf(unsafe {
389             dwl_context_dmabuf_new(
390                 self.ctx(),
391                 import_id,
392                 descriptor.as_raw_descriptor(),
393                 offset,
394                 stride,
395                 modifiers,
396                 width,
397                 height,
398                 fourcc,
399             )
400         });
401 
402         if dmabuf.0.is_null() {
403             return Err(GpuDisplayError::FailedImport);
404         }
405 
406         Ok(Box::new(dmabuf))
407     }
408 }
409 
410 impl AsRawDescriptor for DisplayWl {
as_raw_descriptor(&self) -> RawDescriptor411     fn as_raw_descriptor(&self) -> RawDescriptor {
412         // Safe given that the context pointer is valid.
413         unsafe { dwl_context_fd(self.ctx.0) }
414     }
415 }
416