• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2024 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Support for virtio-media devices in crosvm.
6 //!
7 //! This module provides implementation for the virtio-media traits required to make virtio-media
8 //! devices operate under crosvm. Sub-modules then integrate these devices with crosvm.
9 
10 #[cfg(feature = "video-decoder")]
11 pub mod decoder_adapter;
12 
13 use std::collections::BTreeMap;
14 use std::os::fd::AsRawFd;
15 use std::os::fd::BorrowedFd;
16 use std::path::Path;
17 use std::path::PathBuf;
18 use std::rc::Rc;
19 use std::sync::Arc;
20 
21 use anyhow::Context;
22 use base::error;
23 use base::Descriptor;
24 use base::Event;
25 use base::EventToken;
26 use base::EventType;
27 use base::MappedRegion;
28 use base::MemoryMappingArena;
29 use base::Protection;
30 use base::WaitContext;
31 use base::WorkerThread;
32 use resources::address_allocator::AddressAllocator;
33 use resources::AddressRange;
34 use resources::Alloc;
35 use sync::Mutex;
36 use virtio_media::io::WriteToDescriptorChain;
37 use virtio_media::poll::SessionPoller;
38 use virtio_media::protocol::SgEntry;
39 use virtio_media::protocol::V4l2Event;
40 use virtio_media::protocol::VirtioMediaDeviceConfig;
41 use virtio_media::GuestMemoryRange;
42 use virtio_media::VirtioMediaDevice;
43 use virtio_media::VirtioMediaDeviceRunner;
44 use virtio_media::VirtioMediaEventQueue;
45 use virtio_media::VirtioMediaGuestMemoryMapper;
46 use virtio_media::VirtioMediaHostMemoryMapper;
47 use vm_control::VmMemorySource;
48 use vm_memory::GuestAddress;
49 use vm_memory::GuestMemory;
50 
51 use crate::virtio::copy_config;
52 use crate::virtio::device_constants::media::QUEUE_SIZES;
53 #[cfg(feature = "video-decoder")]
54 use crate::virtio::device_constants::video::VideoBackendType;
55 use crate::virtio::DeviceType;
56 use crate::virtio::Interrupt;
57 use crate::virtio::Queue;
58 use crate::virtio::Reader;
59 use crate::virtio::SharedMemoryMapper;
60 use crate::virtio::SharedMemoryRegion;
61 use crate::virtio::VirtioDevice;
62 use crate::virtio::Writer;
63 
64 /// Structure supporting the implementation of `VirtioMediaEventQueue` for sending events to the
65 /// driver.
66 struct EventQueue(Queue);
67 
68 impl VirtioMediaEventQueue for EventQueue {
69     /// Wait until an event descriptor becomes available and send `event` to the guest.
send_event(&mut self, event: V4l2Event)70     fn send_event(&mut self, event: V4l2Event) {
71         let mut desc;
72 
73         loop {
74             match self.0.pop() {
75                 Some(d) => {
76                     desc = d;
77                     break;
78                 }
79                 None => {
80                     if let Err(e) = self.0.event().wait() {
81                         error!("could not obtain a descriptor to send event to: {:#}", e);
82                         return;
83                     }
84                 }
85             }
86         }
87 
88         if let Err(e) = match event {
89             V4l2Event::Error(event) => WriteToDescriptorChain::write_obj(&mut desc.writer, event),
90             V4l2Event::DequeueBuffer(event) => {
91                 WriteToDescriptorChain::write_obj(&mut desc.writer, event)
92             }
93             V4l2Event::Event(event) => WriteToDescriptorChain::write_obj(&mut desc.writer, event),
94         } {
95             error!("failed to write event: {}", e);
96         }
97 
98         let written = desc.writer.bytes_written() as u32;
99         self.0.add_used(desc, written);
100         self.0.trigger_interrupt();
101     }
102 }
103 
104 /// A `SharedMemoryMapper` behind an `Arc`, allowing it to be shared.
105 ///
106 /// This is required by the fact that devices can be activated several times, but the mapper is
107 /// only provided once. This might be a defect of the `VirtioDevice` interface.
108 #[derive(Clone)]
109 struct ArcedMemoryMapper(Arc<Mutex<Box<dyn SharedMemoryMapper>>>);
110 
111 impl From<Box<dyn SharedMemoryMapper>> for ArcedMemoryMapper {
from(mapper: Box<dyn SharedMemoryMapper>) -> Self112     fn from(mapper: Box<dyn SharedMemoryMapper>) -> Self {
113         Self(Arc::new(Mutex::new(mapper)))
114     }
115 }
116 
117 impl SharedMemoryMapper for ArcedMemoryMapper {
add_mapping( &mut self, source: VmMemorySource, offset: u64, prot: Protection, cache: hypervisor::MemCacheType, ) -> anyhow::Result<()>118     fn add_mapping(
119         &mut self,
120         source: VmMemorySource,
121         offset: u64,
122         prot: Protection,
123         cache: hypervisor::MemCacheType,
124     ) -> anyhow::Result<()> {
125         self.0.lock().add_mapping(source, offset, prot, cache)
126     }
127 
remove_mapping(&mut self, offset: u64) -> anyhow::Result<()>128     fn remove_mapping(&mut self, offset: u64) -> anyhow::Result<()> {
129         self.0.lock().remove_mapping(offset)
130     }
131 
as_raw_descriptor(&self) -> Option<base::RawDescriptor>132     fn as_raw_descriptor(&self) -> Option<base::RawDescriptor> {
133         self.0.lock().as_raw_descriptor()
134     }
135 }
136 
137 /// Provides the ability to map host memory into the guest physical address space. Used to
138 /// implement `VirtioMediaHostMemoryMapper`.
139 struct HostMemoryMapper<M: SharedMemoryMapper> {
140     /// Mapper.
141     shm_mapper: M,
142     /// Address allocator for the mapper.
143     allocator: AddressAllocator,
144 }
145 
146 impl<M: SharedMemoryMapper> VirtioMediaHostMemoryMapper for HostMemoryMapper<M> {
add_mapping( &mut self, buffer: BorrowedFd, length: u64, offset: u64, rw: bool, ) -> Result<u64, i32>147     fn add_mapping(
148         &mut self,
149         buffer: BorrowedFd,
150         length: u64,
151         offset: u64,
152         rw: bool,
153     ) -> Result<u64, i32> {
154         // TODO: technically `offset` can be used twice if a buffer is deleted and some other takes
155         // its place...
156         let shm_offset = self
157             .allocator
158             .allocate(length, Alloc::FileBacked(offset), "".into())
159             .map_err(|_| libc::ENOMEM)?;
160 
161         match self.shm_mapper.add_mapping(
162             VmMemorySource::Descriptor {
163                 descriptor: buffer.try_clone_to_owned().map_err(|_| libc::EIO)?.into(),
164                 offset: 0,
165                 size: length,
166             },
167             shm_offset,
168             if rw {
169                 Protection::read_write()
170             } else {
171                 Protection::read()
172             },
173             hypervisor::MemCacheType::CacheCoherent,
174         ) {
175             Ok(()) => Ok(shm_offset),
176             Err(e) => {
177                 base::error!("failed to map memory buffer: {:#}", e);
178                 Err(libc::EINVAL)
179             }
180         }
181     }
182 
remove_mapping(&mut self, offset: u64) -> Result<(), i32>183     fn remove_mapping(&mut self, offset: u64) -> Result<(), i32> {
184         let _ = self.allocator.release_containing(offset);
185 
186         self.shm_mapper
187             .remove_mapping(offset)
188             .map_err(|_| libc::EINVAL)
189     }
190 }
191 
192 /// Direct linear mapping of sparse guest memory.
193 ///
194 /// A re-mapping of sparse guest memory into an arena that is linear to the host.
195 struct GuestMemoryMapping {
196     arena: MemoryMappingArena,
197     start_offset: usize,
198 }
199 
200 impl GuestMemoryMapping {
new(mem: &GuestMemory, sgs: &[SgEntry]) -> anyhow::Result<Self>201     fn new(mem: &GuestMemory, sgs: &[SgEntry]) -> anyhow::Result<Self> {
202         let page_size = base::pagesize() as u64;
203         let page_mask = page_size - 1;
204 
205         // Validate the SGs.
206         //
207         // We can only map full pages and need to maintain a linear area. This means that the
208         // following invariants must be withheld:
209         //
210         // - For all entries but the first, the start offset within the page must be 0.
211         // - For all entries but the last, `start + len` must be a multiple of page size.
212         for sg in sgs.iter().skip(1) {
213             if sg.start & page_mask != 0 {
214                 anyhow::bail!("non-initial SG entry start offset is not 0");
215             }
216         }
217         for sg in sgs.iter().take(sgs.len() - 1) {
218             if (sg.start + sg.len as u64) & page_mask != 0 {
219                 anyhow::bail!("non-terminal SG entry with start + len != page_size");
220             }
221         }
222 
223         // Compute the arena size.
224         let arena_size = sgs
225             .iter()
226             .fold(0, |size, sg| size + (sg.start & page_mask) + sg.len as u64)
227             // Align to page size if the last entry did not cover a full page.
228             .next_multiple_of(page_size);
229         let mut arena = MemoryMappingArena::new(arena_size as usize)?;
230 
231         // Map all SG entries.
232         let mut pos = 0;
233         for region in sgs {
234             // Address of the first page of the region.
235             let region_first_page = region.start & !page_mask;
236             let len = region.start - region_first_page + region.len as u64;
237             // Make sure to map whole pages (only necessary for the last entry).
238             let len = len.next_multiple_of(page_size) as usize;
239             // TODO: find the offset from the region, this assumes a single
240             // region starting at address 0.
241             let fd = mem.offset_region(region_first_page)?;
242             // Always map whole pages
243             arena.add_fd_offset(pos, len, fd, region_first_page)?;
244 
245             pos += len;
246         }
247 
248         let start_offset = sgs
249             .first()
250             .map(|region| region.start & page_mask)
251             .unwrap_or(0) as usize;
252 
253         Ok(GuestMemoryMapping {
254             arena,
255             start_offset,
256         })
257     }
258 }
259 
260 impl GuestMemoryRange for GuestMemoryMapping {
as_ptr(&self) -> *const u8261     fn as_ptr(&self) -> *const u8 {
262         // SAFETY: the arena has a valid pointer that covers `start_offset + len`.
263         unsafe { self.arena.as_ptr().add(self.start_offset) }
264     }
265 
as_mut_ptr(&mut self) -> *mut u8266     fn as_mut_ptr(&mut self) -> *mut u8 {
267         // SAFETY: the arena has a valid pointer that covers `start_offset + len`.
268         unsafe { self.arena.as_ptr().add(self.start_offset) }
269     }
270 }
271 
272 /// Copy of sparse guest memory that is written back upon destruction.
273 ///
274 /// Contrary to `GuestMemoryMapping` which re-maps guest memory to make it appear linear to the
275 /// host, this copies the sparse guest memory into a linear vector that is copied back upon
276 /// destruction. Doing so can be faster than a costly mapping operation if the guest area is small
277 /// enough.
278 struct GuestMemoryShadowMapping {
279     /// Sparse data copied from the guest.
280     data: Vec<u8>,
281     /// Guest memory to read from.
282     mem: GuestMemory,
283     /// SG entries describing the sparse guest area.
284     sgs: Vec<SgEntry>,
285     /// Whether the data has potentially been modified and requires to be written back to the
286     /// guest.
287     dirty: bool,
288 }
289 
290 impl GuestMemoryShadowMapping {
new(mem: &GuestMemory, sgs: Vec<SgEntry>) -> anyhow::Result<Self>291     fn new(mem: &GuestMemory, sgs: Vec<SgEntry>) -> anyhow::Result<Self> {
292         let total_size = sgs.iter().fold(0, |total, sg| total + sg.len as usize);
293         let mut data = vec![0u8; total_size];
294         let mut pos = 0;
295         for sg in &sgs {
296             mem.read_exact_at_addr(
297                 &mut data[pos..pos + sg.len as usize],
298                 GuestAddress(sg.start),
299             )?;
300             pos += sg.len as usize;
301         }
302 
303         Ok(Self {
304             data,
305             mem: mem.clone(),
306             sgs,
307             dirty: false,
308         })
309     }
310 }
311 
312 impl GuestMemoryRange for GuestMemoryShadowMapping {
as_ptr(&self) -> *const u8313     fn as_ptr(&self) -> *const u8 {
314         self.data.as_ptr()
315     }
316 
as_mut_ptr(&mut self) -> *mut u8317     fn as_mut_ptr(&mut self) -> *mut u8 {
318         self.dirty = true;
319         self.data.as_mut_ptr()
320     }
321 }
322 
323 /// Write the potentially modified shadow buffer back into the guest memory.
324 impl Drop for GuestMemoryShadowMapping {
drop(&mut self)325     fn drop(&mut self) {
326         // No need to copy back if no modification has been done.
327         if !self.dirty {
328             return;
329         }
330 
331         let mut pos = 0;
332         for sg in &self.sgs {
333             if let Err(e) = self.mem.write_all_at_addr(
334                 &self.data[pos..pos + sg.len as usize],
335                 GuestAddress(sg.start),
336             ) {
337                 base::error!("failed to write back guest memory shadow mapping: {:#}", e);
338             }
339             pos += sg.len as usize;
340         }
341     }
342 }
343 
344 /// A chunk of guest memory which can be either directly mapped, or copied into a shadow buffer.
345 enum GuestMemoryChunk {
346     Mapping(GuestMemoryMapping),
347     Shadow(GuestMemoryShadowMapping),
348 }
349 
350 impl GuestMemoryRange for GuestMemoryChunk {
as_ptr(&self) -> *const u8351     fn as_ptr(&self) -> *const u8 {
352         match self {
353             GuestMemoryChunk::Mapping(m) => m.as_ptr(),
354             GuestMemoryChunk::Shadow(s) => s.as_ptr(),
355         }
356     }
357 
as_mut_ptr(&mut self) -> *mut u8358     fn as_mut_ptr(&mut self) -> *mut u8 {
359         match self {
360             GuestMemoryChunk::Mapping(m) => m.as_mut_ptr(),
361             GuestMemoryChunk::Shadow(s) => s.as_mut_ptr(),
362         }
363     }
364 }
365 
366 /// Newtype to implement `VirtioMediaGuestMemoryMapper` on `GuestMemory`.
367 ///
368 /// Whether to use a direct mapping or to copy the guest data into a shadow buffer is decided by
369 /// the size of the guest mapping. If it is below `MAPPING_THRESHOLD`, a shadow buffer is used ;
370 /// otherwise the area is mapped.
371 struct GuestMemoryMapper(GuestMemory);
372 
373 impl VirtioMediaGuestMemoryMapper for GuestMemoryMapper {
374     type GuestMemoryMapping = GuestMemoryChunk;
375 
new_mapping(&self, sgs: Vec<SgEntry>) -> anyhow::Result<Self::GuestMemoryMapping>376     fn new_mapping(&self, sgs: Vec<SgEntry>) -> anyhow::Result<Self::GuestMemoryMapping> {
377         /// Threshold at which we perform a direct mapping of the guest memory into the host.
378         /// Anything below that is copied into a shadow buffer and synced back to the guest when
379         /// the memory chunk is destroyed.
380         const MAPPING_THRESHOLD: usize = 0x400;
381         let total_size = sgs.iter().fold(0, |total, sg| total + sg.len as usize);
382 
383         if total_size >= MAPPING_THRESHOLD {
384             GuestMemoryMapping::new(&self.0, &sgs).map(GuestMemoryChunk::Mapping)
385         } else {
386             GuestMemoryShadowMapping::new(&self.0, sgs).map(GuestMemoryChunk::Shadow)
387         }
388     }
389 }
390 
391 #[derive(EventToken, Debug)]
392 enum Token {
393     CommandQueue,
394     V4l2Session(u32),
395     Kill,
396 }
397 
398 /// Newtype to implement `SessionPoller` on `Rc<WaitContext<Token>>`.
399 #[derive(Clone)]
400 struct WaitContextPoller(Rc<WaitContext<Token>>);
401 
402 impl SessionPoller for WaitContextPoller {
add_session(&self, session: BorrowedFd, session_id: u32) -> Result<(), i32>403     fn add_session(&self, session: BorrowedFd, session_id: u32) -> Result<(), i32> {
404         self.0
405             .add_for_event(
406                 &Descriptor(session.as_raw_fd()),
407                 EventType::Read,
408                 Token::V4l2Session(session_id),
409             )
410             .map_err(|e| e.errno())
411     }
412 
remove_session(&self, session: BorrowedFd)413     fn remove_session(&self, session: BorrowedFd) {
414         let _ = self.0.delete(&Descriptor(session.as_raw_fd()));
415     }
416 }
417 
418 /// Worker to operate a virtio-media device inside a worker thread.
419 struct Worker<D: VirtioMediaDevice<Reader, Writer>> {
420     runner: VirtioMediaDeviceRunner<Reader, Writer, D, WaitContextPoller>,
421     cmd_queue: Queue,
422     wait_ctx: Rc<WaitContext<Token>>,
423 }
424 
425 impl<D> Worker<D>
426 where
427     D: VirtioMediaDevice<Reader, Writer>,
428 {
429     /// Create a new worker instance for `device`.
new( device: D, cmd_queue: Queue, kill_evt: Event, wait_ctx: Rc<WaitContext<Token>>, ) -> anyhow::Result<Self>430     fn new(
431         device: D,
432         cmd_queue: Queue,
433         kill_evt: Event,
434         wait_ctx: Rc<WaitContext<Token>>,
435     ) -> anyhow::Result<Self> {
436         wait_ctx
437             .add_many(&[
438                 (cmd_queue.event(), Token::CommandQueue),
439                 (&kill_evt, Token::Kill),
440             ])
441             .context("when adding worker events to wait context")?;
442 
443         Ok(Self {
444             runner: VirtioMediaDeviceRunner::new(device, WaitContextPoller(Rc::clone(&wait_ctx))),
445             cmd_queue,
446             wait_ctx,
447         })
448     }
449 
run(&mut self) -> anyhow::Result<()>450     fn run(&mut self) -> anyhow::Result<()> {
451         loop {
452             let wait_events = self.wait_ctx.wait().context("Wait error")?;
453 
454             for wait_event in wait_events.iter() {
455                 match wait_event.token {
456                     Token::CommandQueue => {
457                         let _ = self.cmd_queue.event().wait();
458                         while let Some(mut desc) = self.cmd_queue.pop() {
459                             self.runner
460                                 .handle_command(&mut desc.reader, &mut desc.writer);
461                             // Return the descriptor to the guest.
462                             let written = desc.writer.bytes_written() as u32;
463                             self.cmd_queue.add_used(desc, written);
464                             self.cmd_queue.trigger_interrupt();
465                         }
466                     }
467                     Token::Kill => {
468                         return Ok(());
469                     }
470                     Token::V4l2Session(session_id) => {
471                         let session = match self.runner.sessions.get_mut(&session_id) {
472                             Some(session) => session,
473                             None => {
474                                 base::error!(
475                                     "received event for non-registered session {}",
476                                     session_id
477                                 );
478                                 continue;
479                             }
480                         };
481 
482                         if let Err(e) = self.runner.device.process_events(session) {
483                             base::error!(
484                                 "error while processing events for session {}: {:#}",
485                                 session_id,
486                                 e
487                             );
488                             if let Some(session) = self.runner.sessions.remove(&session_id) {
489                                 self.runner.device.close_session(session);
490                             }
491                         }
492                     }
493                 }
494             }
495         }
496     }
497 }
498 
499 /// Implements the required traits to operate a [`VirtioMediaDevice`] under crosvm.
500 struct CrosvmVirtioMediaDevice<
501     D: VirtioMediaDevice<Reader, Writer>,
502     F: Fn(EventQueue, GuestMemoryMapper, HostMemoryMapper<ArcedMemoryMapper>) -> anyhow::Result<D>,
503 > {
504     /// Closure to create the device once all its resources are acquired.
505     create_device: F,
506     /// Virtio configuration area.
507     config: VirtioMediaDeviceConfig,
508 
509     /// Virtio device features.
510     base_features: u64,
511     /// Mapper to make host video buffers visible to the guest.
512     ///
513     /// We unfortunately need to put it behind a `Arc` because the mapper is only passed once,
514     /// whereas the device can be activated several times, so we need to keep a reference to it
515     /// even after it is passed to the device.
516     shm_mapper: Option<ArcedMemoryMapper>,
517     /// Worker thread for the device.
518     worker_thread: Option<WorkerThread<()>>,
519 }
520 
521 impl<D, F> CrosvmVirtioMediaDevice<D, F>
522 where
523     D: VirtioMediaDevice<Reader, Writer>,
524     F: Fn(EventQueue, GuestMemoryMapper, HostMemoryMapper<ArcedMemoryMapper>) -> anyhow::Result<D>,
525 {
new(base_features: u64, config: VirtioMediaDeviceConfig, create_device: F) -> Self526     fn new(base_features: u64, config: VirtioMediaDeviceConfig, create_device: F) -> Self {
527         Self {
528             base_features,
529             config,
530             shm_mapper: None,
531             create_device,
532             worker_thread: None,
533         }
534     }
535 }
536 
537 const HOST_MAPPER_RANGE: u64 = 1 << 32;
538 
539 impl<D, F> VirtioDevice for CrosvmVirtioMediaDevice<D, F>
540 where
541     D: VirtioMediaDevice<Reader, Writer> + Send + 'static,
542     F: Fn(EventQueue, GuestMemoryMapper, HostMemoryMapper<ArcedMemoryMapper>) -> anyhow::Result<D>
543         + Send,
544 {
keep_rds(&self) -> Vec<base::RawDescriptor>545     fn keep_rds(&self) -> Vec<base::RawDescriptor> {
546         let mut keep_rds = Vec::new();
547 
548         if let Some(fd) = self.shm_mapper.as_ref().and_then(|m| m.as_raw_descriptor()) {
549             keep_rds.push(fd);
550         }
551 
552         keep_rds
553     }
554 
device_type(&self) -> DeviceType555     fn device_type(&self) -> DeviceType {
556         DeviceType::Media
557     }
558 
queue_max_sizes(&self) -> &[u16]559     fn queue_max_sizes(&self) -> &[u16] {
560         QUEUE_SIZES
561     }
562 
features(&self) -> u64563     fn features(&self) -> u64 {
564         self.base_features
565     }
566 
read_config(&self, offset: u64, data: &mut [u8])567     fn read_config(&self, offset: u64, data: &mut [u8]) {
568         copy_config(data, 0, self.config.as_ref(), offset);
569     }
570 
activate( &mut self, mem: vm_memory::GuestMemory, _interrupt: Interrupt, mut queues: BTreeMap<usize, Queue>, ) -> anyhow::Result<()>571     fn activate(
572         &mut self,
573         mem: vm_memory::GuestMemory,
574         _interrupt: Interrupt,
575         mut queues: BTreeMap<usize, Queue>,
576     ) -> anyhow::Result<()> {
577         if queues.len() != QUEUE_SIZES.len() {
578             anyhow::bail!(
579                 "wrong number of queues are passed: expected {}, actual {}",
580                 queues.len(),
581                 QUEUE_SIZES.len()
582             );
583         }
584 
585         let cmd_queue = queues.remove(&0).context("missing queue 0")?;
586         let event_queue = EventQueue(queues.remove(&1).context("missing queue 1")?);
587 
588         let shm_mapper = self
589             .shm_mapper
590             .clone()
591             .take()
592             .context("shared memory mapper was not specified")?;
593 
594         let wait_ctx = WaitContext::new()?;
595         let device = (self.create_device)(
596             event_queue,
597             GuestMemoryMapper(mem),
598             HostMemoryMapper {
599                 shm_mapper,
600                 allocator: AddressAllocator::new(
601                     AddressRange::from_start_and_end(0, HOST_MAPPER_RANGE - 1),
602                     Some(base::pagesize() as u64),
603                     None,
604                 )?,
605             },
606         )?;
607 
608         let worker_thread = WorkerThread::start("v_media_worker", move |e| {
609             let wait_ctx = Rc::new(wait_ctx);
610             let mut worker = match Worker::new(device, cmd_queue, e, wait_ctx) {
611                 Ok(worker) => worker,
612                 Err(e) => {
613                     error!("failed to create virtio-media worker: {:#}", e);
614                     return;
615                 }
616             };
617             if let Err(e) = worker.run() {
618                 error!("virtio_media worker exited with error: {:#}", e);
619             }
620         });
621 
622         self.worker_thread = Some(worker_thread);
623         Ok(())
624     }
625 
reset(&mut self) -> anyhow::Result<()>626     fn reset(&mut self) -> anyhow::Result<()> {
627         if let Some(worker_thread) = self.worker_thread.take() {
628             worker_thread.stop();
629         }
630 
631         Ok(())
632     }
633 
get_shared_memory_region(&self) -> Option<SharedMemoryRegion>634     fn get_shared_memory_region(&self) -> Option<SharedMemoryRegion> {
635         Some(SharedMemoryRegion {
636             id: 0,
637             // We need a 32-bit address space as m2m devices start their CAPTURE buffers' offsets
638             // at 2GB.
639             length: HOST_MAPPER_RANGE,
640         })
641     }
642 
set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>)643     fn set_shared_memory_mapper(&mut self, mapper: Box<dyn SharedMemoryMapper>) {
644         self.shm_mapper = Some(ArcedMemoryMapper::from(mapper));
645     }
646 }
647 
648 /// Create a simple media capture device.
649 ///
650 /// This device can only generate a fixed pattern at a fixed resolution, and should only be used
651 /// for checking that the virtio-media pipeline is working properly.
create_virtio_media_simple_capture_device(features: u64) -> Box<dyn VirtioDevice>652 pub fn create_virtio_media_simple_capture_device(features: u64) -> Box<dyn VirtioDevice> {
653     use virtio_media::devices::SimpleCaptureDevice;
654     use virtio_media::v4l2r::ioctl::Capabilities;
655 
656     let mut card = [0u8; 32];
657     let card_name = "simple_device";
658     card[0..card_name.len()].copy_from_slice(card_name.as_bytes());
659 
660     let device = CrosvmVirtioMediaDevice::new(
661         features,
662         VirtioMediaDeviceConfig {
663             device_caps: (Capabilities::VIDEO_CAPTURE | Capabilities::STREAMING).bits(),
664             // VFL_TYPE_VIDEO
665             device_type: 0,
666             card,
667         },
668         |event_queue, _, host_mapper| Ok(SimpleCaptureDevice::new(event_queue, host_mapper)),
669     );
670 
671     Box::new(device)
672 }
673 
674 /// Create a proxy device for a host V4L2 device.
675 ///
676 /// Since V4L2 is a Linux-specific API, this is only available on Linux targets.
677 #[cfg(any(target_os = "android", target_os = "linux"))]
create_virtio_media_v4l2_proxy_device<P: AsRef<Path>>( features: u64, device_path: P, ) -> anyhow::Result<Box<dyn VirtioDevice>>678 pub fn create_virtio_media_v4l2_proxy_device<P: AsRef<Path>>(
679     features: u64,
680     device_path: P,
681 ) -> anyhow::Result<Box<dyn VirtioDevice>> {
682     use virtio_media::devices::V4l2ProxyDevice;
683     use virtio_media::v4l2r;
684     use virtio_media::v4l2r::ioctl::Capabilities;
685 
686     let device = v4l2r::device::Device::open(
687         device_path.as_ref(),
688         v4l2r::device::DeviceConfig::new().non_blocking_dqbuf(),
689     )?;
690     let mut device_caps = device.caps().device_caps();
691 
692     // We are only exposing one device worth of capabilities.
693     device_caps.remove(Capabilities::DEVICE_CAPS);
694 
695     // Read-write is not supported by design.
696     device_caps.remove(Capabilities::READWRITE);
697 
698     let mut config = VirtioMediaDeviceConfig {
699         device_caps: device_caps.bits(),
700         // VFL_TYPE_VIDEO
701         device_type: 0,
702         card: Default::default(),
703     };
704     let card = &device.caps().card;
705     let name_slice = card[0..std::cmp::min(card.len(), config.card.len())].as_bytes();
706     config.card.as_mut_slice()[0..name_slice.len()].copy_from_slice(name_slice);
707     let device_path = PathBuf::from(device_path.as_ref());
708 
709     let device = CrosvmVirtioMediaDevice::new(
710         features,
711         config,
712         move |event_queue, guest_mapper, host_mapper| {
713             let device =
714                 V4l2ProxyDevice::new(device_path.clone(), event_queue, guest_mapper, host_mapper);
715 
716             Ok(device)
717         },
718     );
719 
720     Ok(Box::new(device))
721 }
722 
723 /// Create a decoder adapter device.
724 ///
725 /// This is a regular virtio-media decoder device leveraging the virtio-video decoder backends.
726 #[cfg(feature = "video-decoder")]
create_virtio_media_decoder_adapter_device( features: u64, _gpu_tube: base::Tube, backend: VideoBackendType, ) -> anyhow::Result<Box<dyn VirtioDevice>>727 pub fn create_virtio_media_decoder_adapter_device(
728     features: u64,
729     _gpu_tube: base::Tube,
730     backend: VideoBackendType,
731 ) -> anyhow::Result<Box<dyn VirtioDevice>> {
732     use decoder_adapter::VirtioVideoAdapter;
733     use virtio_media::devices::video_decoder::VideoDecoder;
734     use virtio_media::v4l2r::ioctl::Capabilities;
735 
736     #[cfg(feature = "ffmpeg")]
737     use crate::virtio::video::decoder::backend::ffmpeg::FfmpegDecoder;
738     #[cfg(feature = "vaapi")]
739     use crate::virtio::video::decoder::backend::vaapi::VaapiDecoder;
740     #[cfg(feature = "libvda")]
741     use crate::virtio::video::decoder::backend::vda::LibvdaDecoder;
742     use crate::virtio::video::decoder::DecoderBackend;
743 
744     let mut card = [0u8; 32];
745     let card_name = format!("{:?} decoder adapter", backend).to_lowercase();
746     card[0..card_name.len()].copy_from_slice(card_name.as_bytes());
747     let config = VirtioMediaDeviceConfig {
748         device_caps: (Capabilities::VIDEO_M2M_MPLANE | Capabilities::STREAMING).bits(),
749         // VFL_TYPE_VIDEO
750         device_type: 0,
751         card,
752     };
753 
754     let create_device = move |event_queue, _, host_mapper: HostMemoryMapper<ArcedMemoryMapper>| {
755         let backend = match backend {
756             #[cfg(feature = "libvda")]
757             VideoBackendType::Libvda => {
758                 LibvdaDecoder::new(libvda::decode::VdaImplType::Gavda)?.into_trait_object()
759             }
760             #[cfg(feature = "libvda")]
761             VideoBackendType::LibvdaVd => {
762                 LibvdaDecoder::new(libvda::decode::VdaImplType::Gavd)?.into_trait_object()
763             }
764             #[cfg(feature = "vaapi")]
765             VideoBackendType::Vaapi => VaapiDecoder::new()?.into_trait_object(),
766             #[cfg(feature = "ffmpeg")]
767             VideoBackendType::Ffmpeg => FfmpegDecoder::new().into_trait_object(),
768         };
769 
770         let adapter = VirtioVideoAdapter::new(backend);
771         let decoder = VideoDecoder::new(adapter, event_queue, host_mapper);
772 
773         Ok(decoder)
774     };
775 
776     Ok(Box::new(CrosvmVirtioMediaDevice::new(
777         features,
778         config,
779         create_device,
780     )))
781 }
782