1 // Copyright 2018 The Chromium OS Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 //! Crate for displaying simple surfaces and GPU buffers over a low-level display backend such as 6 //! Wayland or X. 7 8 use std::collections::BTreeMap; 9 use std::io::Error as IoError; 10 use std::path::Path; 11 use std::time::Duration; 12 13 use base::{AsRawDescriptor, Error as BaseError, EventType, PollToken, RawDescriptor, WaitContext}; 14 use data_model::VolatileSlice; 15 use remain::sorted; 16 use thiserror::Error; 17 18 mod event_device; 19 mod gpu_display_stub; 20 mod gpu_display_wl; 21 #[cfg(feature = "x")] 22 mod gpu_display_x; 23 #[cfg(feature = "x")] 24 mod keycode_converter; 25 26 pub use event_device::{EventDevice, EventDeviceKind}; 27 use linux_input_sys::virtio_input_event; 28 29 /// An error generated by `GpuDisplay`. 30 #[sorted] 31 #[derive(Error, Debug)] 32 pub enum GpuDisplayError { 33 /// An internal allocation failed. 34 #[error("internal allocation failed")] 35 Allocate, 36 /// A base error occurred. 37 #[error("received a base error: {0}")] 38 BaseError(BaseError), 39 /// Connecting to the compositor failed. 40 #[error("failed to connect to compositor")] 41 Connect, 42 /// Creating event file descriptor failed. 43 #[error("failed to create event file descriptor")] 44 CreateEvent, 45 /// Failed to create a surface on the compositor. 46 #[error("failed to crate surface on the compositor")] 47 CreateSurface, 48 /// Failed to import a buffer to the compositor. 49 #[error("failed to import a buffer to the compositor")] 50 FailedImport, 51 /// The import ID is invalid. 52 #[error("invalid import ID")] 53 InvalidImportId, 54 /// The path is invalid. 55 #[error("invalid path")] 56 InvalidPath, 57 /// The surface ID is invalid. 58 #[error("invalid surface ID")] 59 InvalidSurfaceId, 60 /// An input/output error occured. 61 #[error("an input/output error occur: {0}")] 62 IoError(IoError), 63 /// A required feature was missing. 64 #[error("required feature was missing: {0}")] 65 RequiredFeature(&'static str), 66 /// The method is unsupported by the implementation. 67 #[error("unsupported by the implementation")] 68 Unsupported, 69 } 70 71 pub type GpuDisplayResult<T> = std::result::Result<T, GpuDisplayError>; 72 73 impl From<BaseError> for GpuDisplayError { from(e: BaseError) -> GpuDisplayError74 fn from(e: BaseError) -> GpuDisplayError { 75 GpuDisplayError::BaseError(e) 76 } 77 } 78 79 impl From<IoError> for GpuDisplayError { from(e: IoError) -> GpuDisplayError80 fn from(e: IoError) -> GpuDisplayError { 81 GpuDisplayError::IoError(e) 82 } 83 } 84 85 /// A surface type 86 #[derive(Clone, Copy, Debug, PartialEq)] 87 pub enum SurfaceType { 88 /// Scanout surface 89 Scanout, 90 /// Mouse cursor surface 91 Cursor, 92 } 93 94 /// Poll token for display instances 95 #[derive(PollToken)] 96 pub enum DisplayPollToken { 97 Display, 98 EventDevice { event_device_id: u32 }, 99 } 100 101 #[derive(Clone)] 102 pub struct GpuDisplayFramebuffer<'a> { 103 framebuffer: VolatileSlice<'a>, 104 slice: VolatileSlice<'a>, 105 stride: u32, 106 bytes_per_pixel: u32, 107 } 108 109 impl<'a> GpuDisplayFramebuffer<'a> { new( framebuffer: VolatileSlice<'a>, stride: u32, bytes_per_pixel: u32, ) -> GpuDisplayFramebuffer110 fn new( 111 framebuffer: VolatileSlice<'a>, 112 stride: u32, 113 bytes_per_pixel: u32, 114 ) -> GpuDisplayFramebuffer { 115 GpuDisplayFramebuffer { 116 framebuffer, 117 slice: framebuffer, 118 stride, 119 bytes_per_pixel, 120 } 121 } 122 sub_region( &self, x: u32, y: u32, width: u32, height: u32, ) -> Option<GpuDisplayFramebuffer<'a>>123 fn sub_region( 124 &self, 125 x: u32, 126 y: u32, 127 width: u32, 128 height: u32, 129 ) -> Option<GpuDisplayFramebuffer<'a>> { 130 let x_byte_offset = x.checked_mul(self.bytes_per_pixel)?; 131 let y_byte_offset = y.checked_mul(self.stride)?; 132 let byte_offset = x_byte_offset.checked_add(y_byte_offset)?; 133 134 let width_bytes = width.checked_mul(self.bytes_per_pixel)?; 135 let count = height 136 .checked_mul(self.stride)? 137 .checked_sub(self.stride)? 138 .checked_add(width_bytes)?; 139 let slice = self 140 .framebuffer 141 .sub_slice(byte_offset as usize, count as usize) 142 .unwrap(); 143 144 Some(GpuDisplayFramebuffer { slice, ..*self }) 145 } 146 as_volatile_slice(&self) -> VolatileSlice<'a>147 pub fn as_volatile_slice(&self) -> VolatileSlice<'a> { 148 self.slice 149 } 150 stride(&self) -> u32151 pub fn stride(&self) -> u32 { 152 self.stride 153 } 154 } 155 156 /// Empty trait, just used as a bounds for now 157 trait GpuDisplayImport {} 158 159 trait GpuDisplaySurface { 160 /// Returns an unique ID associated with the surface. This is typically generated by the 161 /// compositor or cast of a raw pointer. surface_descriptor(&self) -> u64162 fn surface_descriptor(&self) -> u64 { 163 0 164 } 165 166 /// Returns the next framebuffer, allocating if necessary. framebuffer(&mut self) -> Option<GpuDisplayFramebuffer>167 fn framebuffer(&mut self) -> Option<GpuDisplayFramebuffer>; 168 169 /// Returns true if the next buffer in the swapchain is already in use. next_buffer_in_use(&self) -> bool170 fn next_buffer_in_use(&self) -> bool { 171 false 172 } 173 174 /// Returns true if the surface should be closed. close_requested(&self) -> bool175 fn close_requested(&self) -> bool { 176 false 177 } 178 179 /// Puts the next buffer on the screen, making it the current buffer. flip(&mut self)180 fn flip(&mut self) { 181 // no-op 182 } 183 184 /// Puts the specified import_id on the screen. flip_to(&mut self, _import_id: u32)185 fn flip_to(&mut self, _import_id: u32) { 186 // no-op 187 } 188 189 /// Commits the surface to the compositor. commit(&mut self) -> GpuDisplayResult<()>190 fn commit(&mut self) -> GpuDisplayResult<()> { 191 Ok(()) 192 } 193 194 /// Sets the position of the identified subsurface relative to its parent. set_position(&mut self, _x: u32, _y: u32)195 fn set_position(&mut self, _x: u32, _y: u32) { 196 // no-op 197 } 198 199 /// Returns the type of the completed buffer. buffer_completion_type(&self) -> u32200 fn buffer_completion_type(&self) -> u32 { 201 0 202 } 203 204 /// Draws the current buffer on the screen. draw_current_buffer(&mut self)205 fn draw_current_buffer(&mut self) { 206 // no-op 207 } 208 209 /// Handles a compositor-specific client event. on_client_message(&mut self, _client_data: u64)210 fn on_client_message(&mut self, _client_data: u64) { 211 // no-op 212 } 213 214 /// Handles a compositor-specific shared memory completion event. on_shm_completion(&mut self, _shm_complete: u64)215 fn on_shm_completion(&mut self, _shm_complete: u64) { 216 // no-op 217 } 218 219 /// Sets the scanout ID for the surface. set_scanout_id(&mut self, _scanout_id: u32)220 fn set_scanout_id(&mut self, _scanout_id: u32) { 221 // no-op 222 } 223 } 224 225 struct GpuDisplayEvents { 226 events: Vec<virtio_input_event>, 227 device_type: EventDeviceKind, 228 } 229 230 trait DisplayT: AsRawDescriptor { 231 /// Returns true if there are events that are on the queue. pending_events(&self) -> bool232 fn pending_events(&self) -> bool { 233 false 234 } 235 236 /// Sends any pending commands to the compositor. flush(&self)237 fn flush(&self) { 238 // no-op 239 } 240 241 /// Returns the surface descirptor associated with the current event next_event(&mut self) -> GpuDisplayResult<u64>242 fn next_event(&mut self) -> GpuDisplayResult<u64> { 243 Ok(0) 244 } 245 246 /// Handles the event from the compositor, and returns an list of events handle_next_event( &mut self, _surface: &mut Box<dyn GpuDisplaySurface>, ) -> Option<GpuDisplayEvents>247 fn handle_next_event( 248 &mut self, 249 _surface: &mut Box<dyn GpuDisplaySurface>, 250 ) -> Option<GpuDisplayEvents> { 251 None 252 } 253 254 /// Creates a surface with the given parameters. The display backend is given a non-zero 255 /// `surface_id` as a handle for subsequent operations. create_surface( &mut self, parent_surface_id: Option<u32>, surface_id: u32, width: u32, height: u32, surf_type: SurfaceType, ) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>>256 fn create_surface( 257 &mut self, 258 parent_surface_id: Option<u32>, 259 surface_id: u32, 260 width: u32, 261 height: u32, 262 surf_type: SurfaceType, 263 ) -> GpuDisplayResult<Box<dyn GpuDisplaySurface>>; 264 265 /// Imports memory into the display backend. The display backend is given a non-zero 266 /// `import_id` as a handle for subsequent operations. import_memory( &mut self, _import_id: u32, _descriptor: &dyn AsRawDescriptor, _offset: u32, _stride: u32, _modifiers: u64, _width: u32, _height: u32, _fourcc: u32, ) -> GpuDisplayResult<Box<dyn GpuDisplayImport>>267 fn import_memory( 268 &mut self, 269 _import_id: u32, 270 _descriptor: &dyn AsRawDescriptor, 271 _offset: u32, 272 _stride: u32, 273 _modifiers: u64, 274 _width: u32, 275 _height: u32, 276 _fourcc: u32, 277 ) -> GpuDisplayResult<Box<dyn GpuDisplayImport>> { 278 Err(GpuDisplayError::Unsupported) 279 } 280 } 281 282 /// A connection to the compositor and associated collection of state. 283 /// 284 /// The user of `GpuDisplay` can use `AsRawDescriptor` to poll on the compositor connection's file 285 /// descriptor. When the connection is readable, `dispatch_events` can be called to process it. 286 pub struct GpuDisplay { 287 next_id: u32, 288 event_devices: BTreeMap<u32, EventDevice>, 289 surfaces: BTreeMap<u32, Box<dyn GpuDisplaySurface>>, 290 imports: BTreeMap<u32, Box<dyn GpuDisplayImport>>, 291 // `inner` must be after `imports` and `surfaces` to ensure those objects are dropped before 292 // the display context. The drop order for fields inside a struct is the order in which they 293 // are declared [Rust RFC 1857]. 294 inner: Box<dyn DisplayT>, 295 wait_ctx: WaitContext<DisplayPollToken>, 296 is_x: bool, 297 } 298 299 impl GpuDisplay { 300 /// Opens a connection to X server open_x<S: AsRef<str>>(display_name: Option<S>) -> GpuDisplayResult<GpuDisplay>301 pub fn open_x<S: AsRef<str>>(display_name: Option<S>) -> GpuDisplayResult<GpuDisplay> { 302 let _ = display_name; 303 #[cfg(feature = "x")] 304 { 305 let display = match display_name { 306 Some(s) => gpu_display_x::DisplayX::open_display(Some(s.as_ref()))?, 307 None => gpu_display_x::DisplayX::open_display(None)?, 308 }; 309 310 let wait_ctx = WaitContext::new()?; 311 wait_ctx.add(&display, DisplayPollToken::Display)?; 312 313 Ok(GpuDisplay { 314 inner: Box::new(display), 315 next_id: 1, 316 event_devices: Default::default(), 317 surfaces: Default::default(), 318 imports: Default::default(), 319 wait_ctx, 320 is_x: true, 321 }) 322 } 323 #[cfg(not(feature = "x"))] 324 Err(GpuDisplayError::Unsupported) 325 } 326 327 /// Opens a fresh connection to the compositor. open_wayland<P: AsRef<Path>>(wayland_path: Option<P>) -> GpuDisplayResult<GpuDisplay>328 pub fn open_wayland<P: AsRef<Path>>(wayland_path: Option<P>) -> GpuDisplayResult<GpuDisplay> { 329 let display = match wayland_path { 330 Some(s) => gpu_display_wl::DisplayWl::new(Some(s.as_ref()))?, 331 None => gpu_display_wl::DisplayWl::new(None)?, 332 }; 333 334 let wait_ctx = WaitContext::new()?; 335 wait_ctx.add(&display, DisplayPollToken::Display)?; 336 337 Ok(GpuDisplay { 338 inner: Box::new(display), 339 next_id: 1, 340 event_devices: Default::default(), 341 surfaces: Default::default(), 342 imports: Default::default(), 343 wait_ctx, 344 is_x: false, 345 }) 346 } 347 open_stub() -> GpuDisplayResult<GpuDisplay>348 pub fn open_stub() -> GpuDisplayResult<GpuDisplay> { 349 let display = gpu_display_stub::DisplayStub::new()?; 350 let wait_ctx = WaitContext::new()?; 351 wait_ctx.add(&display, DisplayPollToken::Display)?; 352 353 Ok(GpuDisplay { 354 inner: Box::new(display), 355 next_id: 1, 356 event_devices: Default::default(), 357 surfaces: Default::default(), 358 imports: Default::default(), 359 wait_ctx, 360 is_x: false, 361 }) 362 } 363 364 /// Return whether this display is an X display is_x(&self) -> bool365 pub fn is_x(&self) -> bool { 366 self.is_x 367 } 368 handle_event_device(&mut self, event_device_id: u32)369 fn handle_event_device(&mut self, event_device_id: u32) { 370 if let Some(event_device) = self.event_devices.get(&event_device_id) { 371 // TODO(zachr): decode the event and forward to the device. 372 let _ = event_device.recv_event_encoded(); 373 } 374 } 375 dispatch_display_events(&mut self) -> GpuDisplayResult<()>376 fn dispatch_display_events(&mut self) -> GpuDisplayResult<()> { 377 self.inner.flush(); 378 while self.inner.pending_events() { 379 let surface_descriptor = self.inner.next_event()?; 380 381 for surface in self.surfaces.values_mut() { 382 if surface_descriptor != surface.surface_descriptor() { 383 continue; 384 } 385 386 if let Some(gpu_display_events) = self.inner.handle_next_event(surface) { 387 for event_device in self.event_devices.values_mut() { 388 if event_device.kind() != gpu_display_events.device_type { 389 continue; 390 } 391 392 event_device.send_report(gpu_display_events.events.iter().cloned())?; 393 } 394 } 395 } 396 } 397 398 Ok(()) 399 } 400 401 /// Dispatches internal events that were received from the compositor since the last call to 402 /// `dispatch_events`. dispatch_events(&mut self) -> GpuDisplayResult<()>403 pub fn dispatch_events(&mut self) -> GpuDisplayResult<()> { 404 let wait_events = self.wait_ctx.wait_timeout(Duration::default())?; 405 for wait_event in wait_events.iter().filter(|e| e.is_writable) { 406 if let DisplayPollToken::EventDevice { event_device_id } = wait_event.token { 407 if let Some(event_device) = self.event_devices.get_mut(&event_device_id) { 408 if !event_device.flush_buffered_events()? { 409 continue; 410 } 411 self.wait_ctx.modify( 412 event_device, 413 EventType::Read, 414 DisplayPollToken::EventDevice { event_device_id }, 415 )?; 416 } 417 } 418 } 419 420 for wait_event in wait_events.iter().filter(|e| e.is_readable) { 421 match wait_event.token { 422 DisplayPollToken::Display => self.dispatch_display_events()?, 423 DisplayPollToken::EventDevice { event_device_id } => { 424 self.handle_event_device(event_device_id) 425 } 426 } 427 } 428 429 Ok(()) 430 } 431 432 /// Creates a surface on the the compositor as either a top level window, or child of another 433 /// surface, returning a handle to the new surface. create_surface( &mut self, parent_surface_id: Option<u32>, width: u32, height: u32, surf_type: SurfaceType, ) -> GpuDisplayResult<u32>434 pub fn create_surface( 435 &mut self, 436 parent_surface_id: Option<u32>, 437 width: u32, 438 height: u32, 439 surf_type: SurfaceType, 440 ) -> GpuDisplayResult<u32> { 441 if let Some(parent_id) = parent_surface_id { 442 if !self.surfaces.contains_key(&parent_id) { 443 return Err(GpuDisplayError::InvalidSurfaceId); 444 } 445 } 446 447 let new_surface_id = self.next_id; 448 let new_surface = self.inner.create_surface( 449 parent_surface_id, 450 new_surface_id, 451 width, 452 height, 453 surf_type, 454 )?; 455 456 self.next_id += 1; 457 self.surfaces.insert(new_surface_id, new_surface); 458 Ok(new_surface_id) 459 } 460 461 /// Releases a previously created surface identified by the given handle. release_surface(&mut self, surface_id: u32)462 pub fn release_surface(&mut self, surface_id: u32) { 463 self.surfaces.remove(&surface_id); 464 } 465 466 /// Gets a reference to an unused framebuffer for the identified surface. framebuffer(&mut self, surface_id: u32) -> Option<GpuDisplayFramebuffer>467 pub fn framebuffer(&mut self, surface_id: u32) -> Option<GpuDisplayFramebuffer> { 468 let surface = self.surfaces.get_mut(&surface_id)?; 469 surface.framebuffer() 470 } 471 472 /// Gets a reference to an unused framebuffer for the identified surface. framebuffer_region( &mut self, surface_id: u32, x: u32, y: u32, width: u32, height: u32, ) -> Option<GpuDisplayFramebuffer>473 pub fn framebuffer_region( 474 &mut self, 475 surface_id: u32, 476 x: u32, 477 y: u32, 478 width: u32, 479 height: u32, 480 ) -> Option<GpuDisplayFramebuffer> { 481 let framebuffer = self.framebuffer(surface_id)?; 482 framebuffer.sub_region(x, y, width, height) 483 } 484 485 /// Returns true if the next buffer in the buffer queue for the given surface is currently in 486 /// use. 487 /// 488 /// If the next buffer is in use, the memory returned from `framebuffer_memory` should not be 489 /// written to. next_buffer_in_use(&self, surface_id: u32) -> bool490 pub fn next_buffer_in_use(&self, surface_id: u32) -> bool { 491 self.surfaces 492 .get(&surface_id) 493 .map(|s| s.next_buffer_in_use()) 494 .unwrap_or(false) 495 } 496 497 /// Changes the visible contents of the identified surface to the contents of the framebuffer 498 /// last returned by `framebuffer_memory` for this surface. flip(&mut self, surface_id: u32)499 pub fn flip(&mut self, surface_id: u32) { 500 if let Some(surface) = self.surfaces.get_mut(&surface_id) { 501 surface.flip() 502 } 503 } 504 505 /// Returns true if the identified top level surface has been told to close by the compositor, 506 /// and by extension the user. close_requested(&self, surface_id: u32) -> bool507 pub fn close_requested(&self, surface_id: u32) -> bool { 508 self.surfaces 509 .get(&surface_id) 510 .map(|s| s.close_requested()) 511 .unwrap_or(true) 512 } 513 514 /// Imports the given `event_device` into the display, returning an event device id on success. 515 /// This device may be used to poll for input events. import_event_device(&mut self, event_device: EventDevice) -> GpuDisplayResult<u32>516 pub fn import_event_device(&mut self, event_device: EventDevice) -> GpuDisplayResult<u32> { 517 let new_event_device_id = self.next_id; 518 519 self.wait_ctx.add( 520 &event_device, 521 DisplayPollToken::EventDevice { 522 event_device_id: new_event_device_id, 523 }, 524 )?; 525 526 self.event_devices.insert(new_event_device_id, event_device); 527 self.next_id += 1; 528 Ok(new_event_device_id) 529 } 530 531 /// Release an event device from the display, given an `event_device_id`. release_event_device(&mut self, event_device_id: u32)532 pub fn release_event_device(&mut self, event_device_id: u32) { 533 self.event_devices.remove(&event_device_id); 534 } 535 536 /// Imports memory to the compositor for use as a surface buffer and returns a handle 537 /// to it. import_memory( &mut self, descriptor: &dyn AsRawDescriptor, offset: u32, stride: u32, modifiers: u64, width: u32, height: u32, fourcc: u32, ) -> GpuDisplayResult<u32>538 pub fn import_memory( 539 &mut self, 540 descriptor: &dyn AsRawDescriptor, 541 offset: u32, 542 stride: u32, 543 modifiers: u64, 544 width: u32, 545 height: u32, 546 fourcc: u32, 547 ) -> GpuDisplayResult<u32> { 548 let import_id = self.next_id; 549 550 let gpu_display_memory = self.inner.import_memory( 551 import_id, descriptor, offset, stride, modifiers, width, height, fourcc, 552 )?; 553 554 self.next_id += 1; 555 self.imports.insert(import_id, gpu_display_memory); 556 Ok(import_id) 557 } 558 559 /// Releases a previously imported memory identified by the given handle. release_import(&mut self, import_id: u32)560 pub fn release_import(&mut self, import_id: u32) { 561 self.imports.remove(&import_id); 562 } 563 564 /// Commits any pending state for the identified surface. commit(&mut self, surface_id: u32) -> GpuDisplayResult<()>565 pub fn commit(&mut self, surface_id: u32) -> GpuDisplayResult<()> { 566 let surface = self 567 .surfaces 568 .get_mut(&surface_id) 569 .ok_or(GpuDisplayError::InvalidSurfaceId)?; 570 571 surface.commit() 572 } 573 574 /// Changes the visible contents of the identified surface to that of the identified imported 575 /// buffer. flip_to(&mut self, surface_id: u32, import_id: u32) -> GpuDisplayResult<()>576 pub fn flip_to(&mut self, surface_id: u32, import_id: u32) -> GpuDisplayResult<()> { 577 let surface = self 578 .surfaces 579 .get_mut(&surface_id) 580 .ok_or(GpuDisplayError::InvalidSurfaceId)?; 581 582 if !self.imports.contains_key(&import_id) { 583 return Err(GpuDisplayError::InvalidImportId); 584 } 585 586 surface.flip_to(import_id); 587 Ok(()) 588 } 589 590 /// Sets the position of the identified subsurface relative to its parent. 591 /// 592 /// The change in position will not be visible until `commit` is called for the parent surface. set_position(&mut self, surface_id: u32, x: u32, y: u32) -> GpuDisplayResult<()>593 pub fn set_position(&mut self, surface_id: u32, x: u32, y: u32) -> GpuDisplayResult<()> { 594 let surface = self 595 .surfaces 596 .get_mut(&surface_id) 597 .ok_or(GpuDisplayError::InvalidSurfaceId)?; 598 599 surface.set_position(x, y); 600 Ok(()) 601 } 602 603 /// Associates the scanout id with the given surface. set_scanout_id(&mut self, surface_id: u32, scanout_id: u32) -> GpuDisplayResult<()>604 pub fn set_scanout_id(&mut self, surface_id: u32, scanout_id: u32) -> GpuDisplayResult<()> { 605 let surface = self 606 .surfaces 607 .get_mut(&surface_id) 608 .ok_or(GpuDisplayError::InvalidSurfaceId)?; 609 610 surface.set_scanout_id(scanout_id); 611 Ok(()) 612 } 613 } 614 615 impl AsRawDescriptor for GpuDisplay { as_raw_descriptor(&self) -> RawDescriptor616 fn as_raw_descriptor(&self) -> RawDescriptor { 617 self.wait_ctx.as_raw_descriptor() 618 } 619 } 620