• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2018 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Crate for displaying simple surfaces and GPU buffers over wayland.
6 
7 mod dwl;
8 
9 use std::cell::Cell;
10 use std::collections::HashMap;
11 use std::ffi::{CStr, CString};
12 use std::fmt::{self, Display};
13 use std::os::unix::io::{AsRawFd, RawFd};
14 use std::path::Path;
15 use std::ptr::null_mut;
16 
17 use data_model::{VolatileMemory, VolatileSlice};
18 use sys_util::{round_up_to_page_size, Error as SysError, MemoryMapping, SharedMemory};
19 
20 use crate::dwl::*;
21 
22 const BUFFER_COUNT: usize = 2;
23 const BYTES_PER_PIXEL: u32 = 4;
24 
25 /// An error generated by `GpuDisplay`.
26 #[derive(Debug)]
27 pub enum GpuDisplayError {
28     /// An internal allocation failed.
29     Allocate,
30     /// Connecting to the compositor failed.
31     Connect,
32     /// Creating shared memory failed.
33     CreateShm(SysError),
34     /// Setting the size of shared memory failed.
35     SetSize(SysError),
36     /// Failed to create a surface on the compositor.
37     CreateSurface,
38     /// Failed to import a buffer to the compositor.
39     FailedImport,
40     /// The surface ID is invalid.
41     InvalidSurfaceId,
42     /// The path is invalid.
43     InvalidPath,
44 }
45 
46 impl Display for GpuDisplayError {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result47     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
48         use self::GpuDisplayError::*;
49 
50         match self {
51             Allocate => write!(f, "internal allocation failed"),
52             Connect => write!(f, "failed to connect to compositor"),
53             CreateShm(e) => write!(f, "failed to create shared memory: {}", e),
54             SetSize(e) => write!(f, "failed to set size of shared memory: {}", e),
55             CreateSurface => write!(f, "failed to crate surface on the compositor"),
56             FailedImport => write!(f, "failed to import a buffer to the compositor"),
57             InvalidSurfaceId => write!(f, "invalid surface ID"),
58             InvalidPath => write!(f, "invalid path"),
59         }
60     }
61 }
62 
63 struct DwlContext(*mut dwl_context);
64 impl Drop for DwlContext {
drop(&mut self)65     fn drop(&mut self) {
66         if !self.0.is_null() {
67             // Safe given that we checked the pointer for non-null and it should always be of the
68             // correct type.
69             unsafe {
70                 dwl_context_destroy(&mut self.0);
71             }
72         }
73     }
74 }
75 
76 struct DwlDmabuf(*mut dwl_dmabuf);
77 impl Drop for DwlDmabuf {
drop(&mut self)78     fn drop(&mut self) {
79         if !self.0.is_null() {
80             // Safe given that we checked the pointer for non-null and it should always be of the
81             // correct type.
82             unsafe {
83                 dwl_dmabuf_destroy(&mut self.0);
84             }
85         }
86     }
87 }
88 
89 struct DwlSurface(*mut dwl_surface);
90 impl Drop for DwlSurface {
drop(&mut self)91     fn drop(&mut self) {
92         if !self.0.is_null() {
93             // Safe given that we checked the pointer for non-null and it should always be of the
94             // correct type.
95             unsafe {
96                 dwl_surface_destroy(&mut self.0);
97             }
98         }
99     }
100 }
101 
102 struct GpuDisplaySurface {
103     surface: DwlSurface,
104     buffer_size: usize,
105     buffer_index: Cell<usize>,
106     buffer_mem: MemoryMapping,
107 }
108 
109 impl GpuDisplaySurface {
surface(&self) -> *mut dwl_surface110     fn surface(&self) -> *mut dwl_surface {
111         self.surface.0
112     }
113 }
114 
115 /// A connection to the compositor and associated collection of state.
116 ///
117 /// The user of `GpuDisplay` can use `AsRawFd` to poll on the compositor connection's file
118 /// descriptor. When the connection is readable, `dispatch_events` can be called to process it.
119 pub struct GpuDisplay {
120     ctx: DwlContext,
121     dmabufs: HashMap<u32, DwlDmabuf>,
122     dmabuf_next_id: u32,
123     surfaces: HashMap<u32, GpuDisplaySurface>,
124     surface_next_id: u32,
125 }
126 
127 impl GpuDisplay {
128     /// Opens a fresh connection to the compositor.
new<P: AsRef<Path>>(wayland_path: P) -> Result<GpuDisplay, GpuDisplayError>129     pub fn new<P: AsRef<Path>>(wayland_path: P) -> Result<GpuDisplay, GpuDisplayError> {
130         // The dwl_context_new call should always be safe to call, and we check its result.
131         let ctx = DwlContext(unsafe { dwl_context_new() });
132         if ctx.0.is_null() {
133             return Err(GpuDisplayError::Allocate);
134         }
135 
136         // The dwl_context_setup call is always safe to call given that the supplied context is
137         // valid. and we check its result.
138         let cstr_path = match wayland_path.as_ref().as_os_str().to_str() {
139             Some(str) => match CString::new(str) {
140                 Ok(cstr) => cstr,
141                 Err(_) => return Err(GpuDisplayError::InvalidPath),
142             },
143             None => return Err(GpuDisplayError::InvalidPath),
144         };
145         let setup_success = unsafe { dwl_context_setup(ctx.0, cstr_path.as_ptr()) };
146         if !setup_success {
147             return Err(GpuDisplayError::Connect);
148         }
149 
150         Ok(GpuDisplay {
151             ctx,
152             dmabufs: Default::default(),
153             dmabuf_next_id: 0,
154             surfaces: Default::default(),
155             surface_next_id: 0,
156         })
157     }
158 
ctx(&self) -> *mut dwl_context159     fn ctx(&self) -> *mut dwl_context {
160         self.ctx.0
161     }
162 
get_surface(&self, surface_id: u32) -> Option<&GpuDisplaySurface>163     fn get_surface(&self, surface_id: u32) -> Option<&GpuDisplaySurface> {
164         self.surfaces.get(&surface_id)
165     }
166 
167     /// Imports a dmabuf to the compositor for use as a surface buffer and returns a handle to it.
import_dmabuf( &mut self, fd: RawFd, offset: u32, stride: u32, modifiers: u64, width: u32, height: u32, fourcc: u32, ) -> Result<u32, GpuDisplayError>168     pub fn import_dmabuf(
169         &mut self,
170         fd: RawFd,
171         offset: u32,
172         stride: u32,
173         modifiers: u64,
174         width: u32,
175         height: u32,
176         fourcc: u32,
177     ) -> Result<u32, GpuDisplayError> {
178         // Safe given that the context pointer is valid. Any other invalid parameters would be
179         // rejected by dwl_context_dmabuf_new safely. We check that the resulting dmabuf is valid
180         // before filing it away.
181         let dmabuf = DwlDmabuf(unsafe {
182             dwl_context_dmabuf_new(
183                 self.ctx(),
184                 fd,
185                 offset,
186                 stride,
187                 modifiers,
188                 width,
189                 height,
190                 fourcc,
191             )
192         });
193         if dmabuf.0.is_null() {
194             return Err(GpuDisplayError::FailedImport);
195         }
196 
197         let next_id = self.dmabuf_next_id;
198         self.dmabufs.insert(next_id, dmabuf);
199         self.dmabuf_next_id += 1;
200         Ok(next_id)
201     }
202 
203     /// Releases a previously imported dmabuf identified by the given handle.
release_import(&mut self, import_id: u32)204     pub fn release_import(&mut self, import_id: u32) {
205         self.dmabufs.remove(&import_id);
206     }
207 
208     /// Dispatches internal events that were received from the compositor since the last call to
209     /// `dispatch_events`.
dispatch_events(&mut self)210     pub fn dispatch_events(&mut self) {
211         // Safe given that the context pointer is valid.
212         unsafe {
213             dwl_context_dispatch(self.ctx());
214         }
215     }
216 
217     /// Creates a surface on the the compositor as either a top level window, or child of another
218     /// surface, returning a handle to the new surface.
create_surface( &mut self, parent_surface_id: Option<u32>, width: u32, height: u32, ) -> Result<u32, GpuDisplayError>219     pub fn create_surface(
220         &mut self,
221         parent_surface_id: Option<u32>,
222         width: u32,
223         height: u32,
224     ) -> Result<u32, GpuDisplayError> {
225         let parent_ptr = match parent_surface_id {
226             Some(id) => match self.get_surface(id).map(|p| p.surface()) {
227                 Some(ptr) => ptr,
228                 None => return Err(GpuDisplayError::InvalidSurfaceId),
229             },
230             None => null_mut(),
231         };
232         let row_size = width * BYTES_PER_PIXEL;
233         let fb_size = row_size * height;
234         let buffer_size = round_up_to_page_size(fb_size as usize * BUFFER_COUNT);
235         let mut buffer_shm = SharedMemory::new(Some(
236             CStr::from_bytes_with_nul(b"GpuDisplaySurface\0").unwrap(),
237         ))
238         .map_err(GpuDisplayError::CreateShm)?;
239         buffer_shm
240             .set_size(buffer_size as u64)
241             .map_err(GpuDisplayError::SetSize)?;
242         let buffer_mem = MemoryMapping::from_fd(&buffer_shm, buffer_size).unwrap();
243 
244         // Safe because only a valid context, parent pointer (if not  None), and buffer FD are used.
245         // The returned surface is checked for validity before being filed away.
246         let surface = DwlSurface(unsafe {
247             dwl_context_surface_new(
248                 self.ctx(),
249                 parent_ptr,
250                 buffer_shm.as_raw_fd(),
251                 buffer_size,
252                 fb_size as usize,
253                 width,
254                 height,
255                 row_size,
256             )
257         });
258 
259         if surface.0.is_null() {
260             return Err(GpuDisplayError::CreateSurface);
261         }
262 
263         let next_id = self.surface_next_id;
264         self.surfaces.insert(
265             next_id,
266             GpuDisplaySurface {
267                 surface,
268                 buffer_size: fb_size as usize,
269                 buffer_index: Cell::new(0),
270                 buffer_mem,
271             },
272         );
273 
274         self.surface_next_id += 1;
275         Ok(next_id)
276     }
277 
278     /// Releases a previously created surface identified by the given handle.
release_surface(&mut self, surface_id: u32)279     pub fn release_surface(&mut self, surface_id: u32) {
280         self.surfaces.remove(&surface_id);
281     }
282 
283     /// Gets a reference to an unused framebuffer for the identified surface.
framebuffer_memory(&self, surface_id: u32) -> Option<VolatileSlice>284     pub fn framebuffer_memory(&self, surface_id: u32) -> Option<VolatileSlice> {
285         let surface = self.get_surface(surface_id)?;
286         let buffer_index = (surface.buffer_index.get() + 1) % BUFFER_COUNT;
287         surface
288             .buffer_mem
289             .get_slice(
290                 (buffer_index * surface.buffer_size) as u64,
291                 surface.buffer_size as u64,
292             )
293             .ok()
294     }
295 
296     /// Commits any pending state for the identified surface.
commit(&self, surface_id: u32)297     pub fn commit(&self, surface_id: u32) {
298         match self.get_surface(surface_id) {
299             Some(surface) => {
300                 // Safe because only a valid surface is used.
301                 unsafe {
302                     dwl_surface_commit(surface.surface());
303                 }
304             }
305             None => debug_assert!(false, "invalid surface_id {}", surface_id),
306         }
307     }
308 
309     /// Returns true if the next buffer in the buffer queue for the given surface is currently in
310     /// use.
311     ///
312     /// If the next buffer is in use, the memory returned from `framebuffer_memory` should not be
313     /// written to.
next_buffer_in_use(&self, surface_id: u32) -> bool314     pub fn next_buffer_in_use(&self, surface_id: u32) -> bool {
315         match self.get_surface(surface_id) {
316             Some(surface) => {
317                 let next_buffer_index = (surface.buffer_index.get() + 1) % BUFFER_COUNT;
318                 // Safe because only a valid surface and buffer index is used.
319                 unsafe { dwl_surface_buffer_in_use(surface.surface(), next_buffer_index) }
320             }
321             None => {
322                 debug_assert!(false, "invalid surface_id {}", surface_id);
323                 false
324             }
325         }
326     }
327 
328     /// Changes the visible contents of the identified surface to the contents of the framebuffer
329     /// last returned by `framebuffer_memory` for this surface.
flip(&self, surface_id: u32)330     pub fn flip(&self, surface_id: u32) {
331         match self.get_surface(surface_id) {
332             Some(surface) => {
333                 surface
334                     .buffer_index
335                     .set((surface.buffer_index.get() + 1) % BUFFER_COUNT);
336                 // Safe because only a valid surface and buffer index is used.
337                 unsafe {
338                     dwl_surface_flip(surface.surface(), surface.buffer_index.get());
339                 }
340             }
341             None => debug_assert!(false, "invalid surface_id {}", surface_id),
342         }
343     }
344 
345     /// Changes the visible contents of the identified surface to that of the identified imported
346     /// buffer.
flip_to(&self, surface_id: u32, import_id: u32)347     pub fn flip_to(&self, surface_id: u32, import_id: u32) {
348         match self.get_surface(surface_id) {
349             Some(surface) => {
350                 match self.dmabufs.get(&import_id) {
351                     // Safe because only a valid surface and dmabuf is used.
352                     Some(dmabuf) => unsafe { dwl_surface_flip_to(surface.surface(), dmabuf.0) },
353                     None => debug_assert!(false, "invalid import_id {}", import_id),
354                 }
355             }
356             None => debug_assert!(false, "invalid surface_id {}", surface_id),
357         }
358     }
359 
360     /// Returns true if the identified top level surface has been told to close by the compositor,
361     /// and by extension the user.
close_requested(&self, surface_id: u32) -> bool362     pub fn close_requested(&self, surface_id: u32) -> bool {
363         match self.get_surface(surface_id) {
364             Some(surface) =>
365             // Safe because only a valid surface is used.
366             unsafe { dwl_surface_close_requested(surface.surface()) }
367             None => false,
368         }
369     }
370 
371     /// Sets the position of the identified subsurface relative to its parent.
372     ///
373     /// The change in position will not be visible until `commit` is called for the parent surface.
set_position(&self, surface_id: u32, x: u32, y: u32)374     pub fn set_position(&self, surface_id: u32, x: u32, y: u32) {
375         match self.get_surface(surface_id) {
376             Some(surface) => {
377                 // Safe because only a valid surface is used.
378                 unsafe {
379                     dwl_surface_set_position(surface.surface(), x, y);
380                 }
381             }
382             None => debug_assert!(false, "invalid surface_id {}", surface_id),
383         }
384     }
385 }
386 
387 impl Drop for GpuDisplay {
drop(&mut self)388     fn drop(&mut self) {
389         // Safe given that the context pointer is valid.
390         unsafe { dwl_context_destroy(&mut self.ctx.0) }
391     }
392 }
393 
394 impl AsRawFd for GpuDisplay {
as_raw_fd(&self) -> RawFd395     fn as_raw_fd(&self) -> RawFd {
396         // Safe given that the context pointer is valid.
397         unsafe { dwl_context_fd(self.ctx.0) }
398     }
399 }
400