1 // Copyright 2025 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #[cfg(target_arch = "x86_64")]
6 use core::arch::x86_64::_mm_clflush;
7 #[cfg(target_arch = "x86_64")]
8 use core::arch::x86_64::_mm_mfence;
9 use std::cell::RefCell;
10 use std::fmt;
11 use std::fmt::Debug;
12 use std::fs::File;
13 use std::iter::zip;
14 use std::mem::replace;
15 use std::num::NonZeroUsize;
16 use std::os::fd::{AsFd, AsRawFd, BorrowedFd, FromRawFd};
17 use std::ptr::NonNull;
18 #[cfg(feature = "vaapi")]
19 use std::rc::Rc;
20 use std::slice;
21 use std::sync::atomic::{fence, Ordering};
22 #[cfg(feature = "v4l2")]
23 use std::sync::Arc;
24
25 use crate::video_frame::{ReadMapping, VideoFrame, WriteMapping};
26 #[cfg(feature = "vaapi")]
27 use crate::DecodedFormat;
28 use crate::{Fourcc, FrameLayout, Resolution};
29
30 use drm_fourcc::DrmModifier;
31 use nix::errno::Errno;
32 use nix::ioctl_write_ptr;
33 use nix::libc;
34 use nix::poll::poll;
35 use nix::poll::PollFd;
36 use nix::poll::PollFlags;
37 use nix::poll::PollTimeout;
38 use nix::sys::mman::mmap;
39 use nix::sys::mman::munmap;
40 use nix::sys::mman::MapFlags;
41 use nix::sys::mman::ProtFlags;
42 use nix::unistd::dup;
43
44 #[cfg(feature = "vaapi")]
45 use libva::{
46 Display, ExternalBufferDescriptor, MemoryType, Surface, UsageHint, VADRMPRIMESurfaceDescriptor,
47 VADRMPRIMESurfaceDescriptorLayer, VADRMPRIMESurfaceDescriptorObject,
48 };
49 #[cfg(feature = "v4l2")]
50 use v4l2r::bindings::v4l2_plane;
51 #[cfg(feature = "v4l2")]
52 use v4l2r::device::Device;
53 #[cfg(feature = "v4l2")]
54 use v4l2r::ioctl::V4l2Buffer;
55 #[cfg(feature = "v4l2")]
56 use v4l2r::memory::DmaBufHandle;
57 #[cfg(feature = "v4l2")]
58 use v4l2r::Format;
59
60 // UNSAFE: This file uses tons of unsafe code because we are directly interacting with the kernel's
61 // DMA infrastructure. The core assumption is that GenericDmaVideoFrame is initialized with a
62 // valid DRM Prime File Descriptor, and that the FrameLayout given accurately describes the memory
63 // layout of the frame. We leverage Rust's lifetime system and RAII design patterns to guarantee
64 // that mappings will not last longer than the underlying DMA buffer.
65
66 // Defined in include/linux/dma-buf.h
67 const DMA_BUF_BASE: u8 = b'b';
68 const DMA_BUF_IOCTL_SYNC: u8 = 0;
69 const DMA_BUF_SYNC_READ: u64 = 1 << 0;
70 const DMA_BUF_SYNC_WRITE: u64 = 2 << 0;
71 const DMA_BUF_SYNC_START: u64 = 0 << 2;
72 const DMA_BUF_SYNC_END: u64 = 1 << 2;
73 #[repr(C)]
74 struct dma_buf_sync {
75 flags: u64,
76 }
77 ioctl_write_ptr!(dma_buf_ioctl_sync, DMA_BUF_BASE, DMA_BUF_IOCTL_SYNC, dma_buf_sync);
78
handle_eintr<T>(cb: &mut impl FnMut() -> nix::Result<T>) -> Result<T, String>79 fn handle_eintr<T>(cb: &mut impl FnMut() -> nix::Result<T>) -> Result<T, String> {
80 loop {
81 match cb() {
82 Ok(ret) => return Ok(ret),
83 Err(errno) => {
84 if errno != Errno::EINTR {
85 return Err(format!("Error executing DMA buf sync! {errno}"));
86 }
87 }
88 }
89 }
90 }
91
92 // Because we are limited to executing raw mmap instead of leveraging the GEM driver, all of our
93 // buffers will be mapped linear even if the backing frame has a modifier. So, we have to manually
94 // detile the buffers.
95 const Y_SUBTILE_WIDTH: usize = 16;
96 const Y_SUBTILE_HEIGHT: usize = 4;
97 const Y_SUBTILE_SIZE: usize = Y_SUBTILE_WIDTH * Y_SUBTILE_HEIGHT;
98 const Y_TILE_WIDTH_IN_SUBTILES: usize = 8;
99 const Y_TILE_HEIGHT_IN_SUBTILES: usize = 8;
100 const Y_TILE_WIDTH: usize = Y_TILE_WIDTH_IN_SUBTILES * Y_SUBTILE_WIDTH;
101 const Y_TILE_HEIGHT: usize = Y_TILE_HEIGHT_IN_SUBTILES * Y_SUBTILE_HEIGHT;
102 const Y_TILE_SIZE: usize = Y_TILE_WIDTH * Y_TILE_HEIGHT;
103
detile_y_tile(dst: &mut [u8], src: &[u8], width: usize, height: usize)104 fn detile_y_tile(dst: &mut [u8], src: &[u8], width: usize, height: usize) {
105 let tiles_per_row = width / Y_TILE_WIDTH;
106 for y in 0..height {
107 for x in 0..width {
108 let tile_x = x / Y_TILE_WIDTH;
109 let tile_y = y / Y_TILE_HEIGHT;
110 let intra_tile_x = x % Y_TILE_WIDTH;
111 let intra_tile_y = y % Y_TILE_HEIGHT;
112 let subtile_x = intra_tile_x / Y_SUBTILE_WIDTH;
113 let subtile_y = intra_tile_y / Y_SUBTILE_HEIGHT;
114 let intra_subtile_x = intra_tile_x % Y_SUBTILE_WIDTH;
115 let intra_subtile_y = intra_tile_y % Y_SUBTILE_HEIGHT;
116 // TODO: We should batch up the writes since subtile rows are contiguous. Also consider
117 // SIMD'ifying this function.
118 dst[y * width + x] = src[(tile_y * tiles_per_row + tile_x) * Y_TILE_SIZE
119 + (subtile_x * Y_TILE_HEIGHT_IN_SUBTILES + subtile_y) * Y_SUBTILE_SIZE
120 + intra_subtile_y * Y_SUBTILE_WIDTH
121 + intra_subtile_x]
122 }
123 }
124 }
125
126 pub struct DmaMapping<'a> {
127 dma_handles: Vec<BorrowedFd<'a>>,
128 addrs: Vec<NonNull<libc::c_void>>,
129 detiled_bufs: Vec<Vec<u8>>,
130 lens: Vec<usize>,
131 is_writable: bool,
132 }
133
134 impl<'a> DmaMapping<'a> {
new( dma_handles: &'a Vec<File>, offsets: Vec<usize>, pitches: Vec<usize>, lens: Vec<usize>, modifier: DrmModifier, is_writable: bool, ) -> Result<Self, String>135 fn new(
136 dma_handles: &'a Vec<File>,
137 offsets: Vec<usize>,
138 pitches: Vec<usize>,
139 lens: Vec<usize>,
140 modifier: DrmModifier,
141 is_writable: bool,
142 ) -> Result<Self, String> {
143 if is_writable && modifier != DrmModifier::Linear {
144 return Err(
145 "Writable mappings currently only supported for linear buffers!".to_string()
146 );
147 }
148 if modifier != DrmModifier::Linear && modifier != DrmModifier::I915_y_tiled {
149 return Err(
150 "Only linear and Y tile buffers are currently supported for mapping!".to_string()
151 );
152 }
153
154 let borrowed_dma_handles: Vec<BorrowedFd> = dma_handles.iter().map(|x| x.as_fd()).collect();
155
156 // Wait on all memory fences to finish before attempting to map this DMA buffer.
157 for fd in borrowed_dma_handles.iter() {
158 let mut fence_poll_fd =
159 [PollFd::new(fd.clone(), PollFlags::POLLIN | PollFlags::POLLOUT)];
160 poll(&mut fence_poll_fd, PollTimeout::NONE).unwrap();
161 }
162
163 // Some architectures do not put DMA in the same coherency zone as CPU, so we need to
164 // invalidate cache lines corresponding to this memory. The DMA infrastructure provides
165 // this convenient IOCTL for doing so.
166 let sync_struct =
167 dma_buf_sync { flags: DMA_BUF_SYNC_START | DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE };
168
169 for fd in borrowed_dma_handles.iter() {
170 // SAFETY: This assumes fd is a valid DMA buffer.
171 handle_eintr(&mut || unsafe { dma_buf_ioctl_sync(fd.as_raw_fd(), &sync_struct) })?;
172 }
173
174 // Offsets aren't guaranteed to page aligned, so we have to map the entire FD and then
175 // do pointer arithmetic to get the right buffer.
176 let mut addrs: Vec<NonNull<libc::c_void>> = vec![];
177 if borrowed_dma_handles.len() > 1 {
178 for i in 0..offsets.len() {
179 // SAFETY: This assumes that fd is a valid DMA buffer and that our lens and offsets
180 // are correct.
181 addrs.push(unsafe {
182 mmap(
183 None,
184 NonZeroUsize::new(lens[i] + offsets[i])
185 .ok_or("Attempted to map plane of length 0!")?,
186 if is_writable {
187 ProtFlags::PROT_READ | ProtFlags::PROT_WRITE
188 } else {
189 ProtFlags::PROT_READ
190 },
191 MapFlags::MAP_SHARED,
192 borrowed_dma_handles[i].as_fd(),
193 0,
194 )
195 .map_err(|err| format!("Error mapping plane {err}"))?
196 .add(offsets[i])
197 });
198 }
199 } else {
200 let total_size = NonZeroUsize::new(lens.iter().sum::<usize>() + offsets[0])
201 .ok_or("Attempted to map VideoFrame of length 0")?;
202 // SAFETY: This assumes that fd is a valid DMA buffer and that our lens and offsets are
203 // correct.
204 unsafe {
205 let base_addr = mmap(
206 None,
207 total_size,
208 if is_writable {
209 ProtFlags::PROT_READ | ProtFlags::PROT_WRITE
210 } else {
211 ProtFlags::PROT_READ
212 },
213 MapFlags::MAP_SHARED,
214 borrowed_dma_handles[0].as_fd(),
215 0,
216 )
217 .map_err(|err| format!("Error mapping plane {err}"))?;
218 for i in 0..offsets.len() {
219 addrs.push(base_addr.add(offsets[i]));
220 }
221 }
222 }
223
224 let mut detiled_bufs = vec![];
225 if modifier == DrmModifier::I915_y_tiled {
226 // SAFETY: This assumes mmap returned a valid memory address. Note that nix's mmap
227 // bindings already check for null pointers, which we turn into Rust Err objects. So
228 // this assumption will only be violated if mmap itself has a bug that returns a
229 // non-NULL, but invalid pointer.
230 let tiled_bufs: Vec<&[u8]> = unsafe {
231 zip(addrs.iter(), lens.iter())
232 .map(|x| slice::from_raw_parts(x.0.as_ptr() as *const u8, *x.1))
233 .collect()
234 };
235 for i in 0..tiled_bufs.len() {
236 let mut detiled_buf: Vec<u8> = vec![];
237 detiled_buf.resize(tiled_bufs[i].len(), 0);
238 detile_y_tile(
239 detiled_buf.as_mut_slice(),
240 tiled_bufs[i],
241 pitches[i],
242 lens[i] / pitches[i],
243 );
244 detiled_bufs.push(detiled_buf);
245 }
246 }
247
248 Ok(DmaMapping {
249 dma_handles: borrowed_dma_handles.clone(),
250 addrs: addrs,
251 detiled_bufs: detiled_bufs,
252 lens: lens.clone(),
253 is_writable: is_writable,
254 })
255 }
256 }
257
258 impl<'a> ReadMapping<'a> for DmaMapping<'a> {
get(&self) -> Vec<&[u8]>259 fn get(&self) -> Vec<&[u8]> {
260 if self.detiled_bufs.len() > 0 {
261 self.detiled_bufs.iter().map(|x| x.as_slice()).collect()
262 } else {
263 // SAFETY: This assumes mmap returned a valid memory address. Note that nix's mmap
264 // bindings already check for null pointers, which we turn into Rust Err objects. So
265 // this assumption will only be violated if mmap itself has a bug that returns a
266 // non-NULL, but invalid pointer.
267 unsafe {
268 zip(self.addrs.iter(), self.lens.iter())
269 .map(|x| slice::from_raw_parts(x.0.as_ptr() as *const u8, *x.1))
270 .collect()
271 }
272 }
273 }
274 }
275
276 impl<'a> WriteMapping<'a> for DmaMapping<'a> {
get(&self) -> Vec<RefCell<&'a mut [u8]>>277 fn get(&self) -> Vec<RefCell<&'a mut [u8]>> {
278 if !self.is_writable {
279 panic!("Attempted to get writable slice to read only mapping!");
280 }
281
282 // The above check prevents us from undefined behavior in the event that the user attempts
283 // to coerce a ReadMapping into a WriteMapping.
284 // SAFETY: This assumes mmap returned a valid memory address. Note that nix's mmap bindings
285 // already check for null pointers, which we turn into Rust Err objects. So this assumptoin
286 // will only be violated if mmap itself has a bug that returns a non-NULL, but invalid
287 // pointer.
288 unsafe {
289 zip(self.addrs.iter(), self.lens.iter())
290 .map(|x| RefCell::new(slice::from_raw_parts_mut(x.0.as_ptr() as *mut u8, *x.1)))
291 .collect()
292 }
293 }
294 }
295
296 impl<'a> Drop for DmaMapping<'a> {
drop(&mut self)297 fn drop(&mut self) {
298 // SAFETY: This should be safe because we would not instantiate a DmaMapping object if the
299 // first call to dma_buf_ioctl_sync or the mmap call failed.
300 unsafe {
301 fence(Ordering::SeqCst);
302
303 // Flush all cache lines back to main memory.
304 let sync_struct =
305 dma_buf_sync { flags: DMA_BUF_SYNC_END | DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE };
306 for fd in self.dma_handles.iter() {
307 let _ = handle_eintr(&mut || dma_buf_ioctl_sync(fd.as_raw_fd(), &sync_struct));
308 }
309
310 // For some reason, DMA_BUF_IOCTL_SYNC is insufficient on Intel machines, and we have
311 // to manually flush the cache lines. This is probably a violation of the DMA API spec?
312 #[cfg(target_arch = "x86_64")]
313 {
314 // Note that fence() only guarantees that the compiler won't reorder memory
315 // operations, and we need to call _mm_mfence() to guarantee the CPU won't do it.
316 _mm_mfence();
317
318 for (addr, len) in zip(self.addrs.iter(), self.lens.iter()) {
319 // TODO: We shouldn't actually have to flush every address, we should just
320 // flush the address at the beginning of each cache line. But, during testing
321 // this caused a race condition.
322 for offset in 0..*len {
323 _mm_clflush((addr.as_ptr() as *const u8).offset(offset as isize));
324 }
325 }
326
327 _mm_mfence();
328 }
329
330 fence(Ordering::SeqCst);
331
332 let _ = zip(self.addrs.iter(), self.lens.iter()).map(|x| munmap(*x.0, *x.1).unwrap());
333 }
334 }
335 }
336
337 #[derive(Debug)]
338 pub struct GenericDmaVideoFrame<T: Clone + Send + Sync + Sized + Debug + 'static> {
339 pub token: T,
340 dma_handles: Vec<File>,
341 layout: FrameLayout,
342 }
343
344 // The Clone trait is implemented for GenericDmaVideoFrame (and importantly no other VideoFrame!)
345 // just so we can export the frame as a VA-API surface. While this looks risky, in practice we tie
346 // the lifetimes of the VideoFrames with the Surfaces they are exported to through the VaapiPicture
347 // struct.
348 impl<T: Clone + Send + Sync + Sized + Debug + 'static> Clone for GenericDmaVideoFrame<T> {
clone(&self) -> Self349 fn clone(&self) -> Self {
350 Self {
351 token: self.token.clone(),
352 // SAFETY: This is safe because we are dup'ing the fd, giving the clone'd
353 // GenericDmaVideoFrame ownership of the new fd.
354 dma_handles: self
355 .dma_handles
356 .iter()
357 .map(|x| unsafe {
358 File::from_raw_fd(dup(x.as_raw_fd()).expect("Could not dup DMAbuf FD!"))
359 })
360 .collect(),
361 layout: self.layout.clone(),
362 }
363 }
364 }
365
366 impl<T: Clone + Send + Sync + Sized + Debug + 'static> GenericDmaVideoFrame<T> {
new( token: T, dma_handles: Vec<File>, layout: FrameLayout, ) -> Result<GenericDmaVideoFrame<T>, String>367 pub fn new(
368 token: T,
369 dma_handles: Vec<File>,
370 layout: FrameLayout,
371 ) -> Result<GenericDmaVideoFrame<T>, String> {
372 let ret = GenericDmaVideoFrame { token: token, dma_handles: dma_handles, layout: layout };
373 ret.validate_frame()?;
374 Ok(ret)
375 }
376
get_single_plane_size(&self, index: usize) -> usize377 fn get_single_plane_size(&self, index: usize) -> usize {
378 if index >= self.num_planes() {
379 panic!("Invalid plane index {index}!");
380 }
381
382 if self.dma_handles.len() == 1 {
383 if index == self.num_planes() - 1 {
384 let total_size = self.dma_handles[0].metadata().unwrap().len() as usize;
385 total_size - self.layout.planes[index].offset
386 } else {
387 self.layout.planes[index + 1].offset - self.layout.planes[index].offset
388 }
389 } else {
390 self.dma_handles[index].metadata().unwrap().len() as usize
391 }
392 }
393
get_plane_offset(&self) -> Vec<usize>394 fn get_plane_offset(&self) -> Vec<usize> {
395 self.layout.planes.iter().map(|x| x.offset).collect()
396 }
397
map_helper(&self, is_writable: bool) -> Result<DmaMapping, String>398 fn map_helper(&self, is_writable: bool) -> Result<DmaMapping, String> {
399 let lens = self.get_plane_size();
400 let pitches = self.get_plane_pitch();
401 let offsets = self.get_plane_offset();
402 DmaMapping::new(
403 &self.dma_handles,
404 offsets,
405 pitches,
406 lens,
407 DrmModifier::from(self.layout.format.1),
408 is_writable,
409 )
410 }
411 }
412
413 #[cfg(feature = "vaapi")]
414 impl<T: Clone + Send + Sync + Sized + Debug + 'static> ExternalBufferDescriptor
415 for GenericDmaVideoFrame<T>
416 {
417 const MEMORY_TYPE: MemoryType = MemoryType::DrmPrime2;
418 type DescriptorAttribute = VADRMPRIMESurfaceDescriptor;
419
va_surface_attribute(&mut self) -> Self::DescriptorAttribute420 fn va_surface_attribute(&mut self) -> Self::DescriptorAttribute {
421 let objects = self
422 .dma_handles
423 .iter()
424 .map(|fd| VADRMPRIMESurfaceDescriptorObject {
425 fd: fd.as_raw_fd(),
426 size: fd.metadata().unwrap().len() as u32,
427 drm_format_modifier: self.layout.format.1,
428 })
429 .chain(std::iter::repeat(Default::default()))
430 .take(4)
431 .collect::<Vec<_>>();
432 let layers = [
433 VADRMPRIMESurfaceDescriptorLayer {
434 drm_format: u32::from(self.layout.format.0),
435 num_planes: self.num_planes() as u32,
436 object_index: (0..self.dma_handles.len() as u32)
437 .chain(std::iter::repeat(0))
438 .take(4)
439 .collect::<Vec<_>>()
440 .try_into()
441 .unwrap(),
442 offset: self
443 .get_plane_offset()
444 .iter()
445 .map(|x| *x as u32)
446 .chain(std::iter::repeat(0))
447 .take(4)
448 .collect::<Vec<_>>()
449 .try_into()
450 .unwrap(),
451 pitch: self
452 .get_plane_pitch()
453 .iter()
454 .map(|x| *x as u32)
455 .chain(std::iter::repeat(0))
456 .take(4)
457 .collect::<Vec<_>>()
458 .try_into()
459 .unwrap(),
460 },
461 Default::default(),
462 Default::default(),
463 Default::default(),
464 ];
465 VADRMPRIMESurfaceDescriptor {
466 fourcc: u32::from(self.layout.format.0),
467 width: self.layout.size.width,
468 height: self.layout.size.height,
469 num_objects: self.dma_handles.len() as u32,
470 objects: objects.try_into().unwrap(),
471 num_layers: 1,
472 layers: layers,
473 }
474 }
475 }
476
477 impl<T: Clone + Send + Sync + Sized + Debug + 'static> VideoFrame for GenericDmaVideoFrame<T> {
478 #[cfg(feature = "v4l2")]
479 type NativeHandle = DmaBufHandle<File>;
480
481 #[cfg(feature = "vaapi")]
482 type MemDescriptor = GenericDmaVideoFrame<T>;
483 #[cfg(feature = "vaapi")]
484 type NativeHandle = Surface<GenericDmaVideoFrame<T>>;
485
fourcc(&self) -> Fourcc486 fn fourcc(&self) -> Fourcc {
487 self.layout.format.0.clone()
488 }
489
resolution(&self) -> Resolution490 fn resolution(&self) -> Resolution {
491 self.layout.size.clone()
492 }
493
get_plane_size(&self) -> Vec<usize>494 fn get_plane_size(&self) -> Vec<usize> {
495 (0..self.num_planes()).map(|idx| self.get_single_plane_size(idx)).collect()
496 }
497
get_plane_pitch(&self) -> Vec<usize>498 fn get_plane_pitch(&self) -> Vec<usize> {
499 self.layout.planes.iter().map(|x| x.stride).collect()
500 }
501
map<'a>(&'a self) -> Result<Box<dyn ReadMapping<'a> + 'a>, String>502 fn map<'a>(&'a self) -> Result<Box<dyn ReadMapping<'a> + 'a>, String> {
503 Ok(Box::new(self.map_helper(false)?))
504 }
505
map_mut<'a>(&'a mut self) -> Result<Box<dyn WriteMapping<'a> + 'a>, String>506 fn map_mut<'a>(&'a mut self) -> Result<Box<dyn WriteMapping<'a> + 'a>, String> {
507 Ok(Box::new(self.map_helper(true)?))
508 }
509
510 #[cfg(feature = "v4l2")]
fill_v4l2_plane(&self, index: usize, plane: &mut v4l2_plane)511 fn fill_v4l2_plane(&self, index: usize, plane: &mut v4l2_plane) {
512 if self.dma_handles.len() == 1 {
513 plane.m.fd = self.dma_handles[0].as_raw_fd();
514 plane.length = self.dma_handles[0].metadata().unwrap().len() as u32;
515 } else {
516 plane.m.fd = self.dma_handles[index].as_raw_fd();
517 plane.length = self.get_single_plane_size(index) as u32;
518 }
519 // WARNING: Importing DMA buffers with an offset is not officially supported by V4L2, but
520 // several drivers (including MTK venc) will respect the data_offset field.
521 plane.data_offset = self.layout.planes[index].offset as u32;
522 }
523
524 // No-op for DMA buffers since the backing FD already disambiguates them.
525 #[cfg(feature = "v4l2")]
process_dqbuf(&mut self, _device: Arc<Device>, _format: &Format, _buf: &V4l2Buffer)526 fn process_dqbuf(&mut self, _device: Arc<Device>, _format: &Format, _buf: &V4l2Buffer) {}
527
528 #[cfg(feature = "vaapi")]
to_native_handle(&self, display: &Rc<Display>) -> Result<Self::NativeHandle, String>529 fn to_native_handle(&self, display: &Rc<Display>) -> Result<Self::NativeHandle, String> {
530 if self.is_compressed() {
531 return Err("Compressed buffer export to VA-API is not currently supported".to_string());
532 }
533 if !self.is_contiguous() {
534 return Err(
535 "Exporting non-contiguous GBM buffers to VA-API is not currently supported"
536 .to_string(),
537 );
538 }
539
540 // TODO: Add more supported formats
541 let rt_format = match self.decoded_format().unwrap() {
542 DecodedFormat::I420 | DecodedFormat::NV12 => libva::VA_RT_FORMAT_YUV420,
543 _ => return Err("Format unsupported for VA-API export".to_string()),
544 };
545
546 let mut ret = display
547 .create_surfaces(
548 rt_format,
549 Some(u32::from(self.layout.format.0)),
550 self.resolution().width,
551 self.resolution().height,
552 // TODO: Should we add USAGE_HINT_ENCODER support?
553 Some(UsageHint::USAGE_HINT_DECODER),
554 vec![self.clone()],
555 )
556 .map_err(|_| "Error importing GenericDmaVideoFrame to VA-API".to_string())?;
557
558 Ok(ret.pop().unwrap())
559 }
560 }
561