• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Implementation of a virtio video decoder backed by a device.
6 
7 use std::collections::btree_map::Entry;
8 use std::collections::BTreeMap;
9 use std::collections::BTreeSet;
10 use std::collections::VecDeque;
11 
12 pub use backend::DecoderBackend;
13 use backend::*;
14 use base::error;
15 use base::AsRawDescriptor;
16 use base::Descriptor;
17 use base::SafeDescriptor;
18 use base::Tube;
19 use base::WaitContext;
20 use vm_memory::GuestMemory;
21 
22 use crate::virtio::video::async_cmd_desc_map::AsyncCmdDescMap;
23 use crate::virtio::video::command::QueueType;
24 use crate::virtio::video::command::VideoCmd;
25 use crate::virtio::video::control::CtrlType;
26 use crate::virtio::video::control::CtrlVal;
27 use crate::virtio::video::control::QueryCtrlType;
28 use crate::virtio::video::device::*;
29 use crate::virtio::video::error::*;
30 use crate::virtio::video::event::*;
31 use crate::virtio::video::format::*;
32 use crate::virtio::video::params::Params;
33 use crate::virtio::video::protocol;
34 use crate::virtio::video::resource::*;
35 use crate::virtio::video::response::CmdResponse;
36 
37 pub mod backend;
38 pub mod capability;
39 
40 use capability::*;
41 
42 type StreamId = u32;
43 type ResourceId = u32;
44 
45 // ResourceId given by the driver
46 type InputResourceId = u32;
47 type OutputResourceId = u32;
48 
49 // Id for a frame buffer passed to Chrome.
50 // We cannot use OutputResourceId as is because this ID must be between 0 and ((# of buffers) - 1).
51 //
52 // TODO(b/1518105): Once we decide to generate resource_id in the device side,
53 // we don't need this value and can pass OutputResourceId to Chrome directly.
54 type FrameBufferId = i32;
55 
56 // The result of OutputResources.queue_resource().
57 enum QueueOutputResourceResult {
58     UsingAsEos,                // The resource is kept as EOS buffer.
59     Reused(FrameBufferId),     // The resource has been registered before.
60     Registered(FrameBufferId), // The resource is queued first time.
61 }
62 
63 struct InputResource {
64     /// The actual underlying resource.
65     resource: GuestResource,
66     /// Offset from `resource` from which data starts.
67     offset: u32,
68 }
69 
70 /// Maps an input resource ID to the underlying resource and its useful information.
71 type InputResources = BTreeMap<InputResourceId, InputResource>;
72 
73 #[derive(Default)]
74 struct OutputResources {
75     // OutputResourceId <-> FrameBufferId
76     res_id_to_frame_buf_id: BTreeMap<OutputResourceId, FrameBufferId>,
77     frame_buf_id_to_res_id: BTreeMap<FrameBufferId, OutputResourceId>,
78 
79     // Store the resource id of the queued output buffers.
80     queued_res_ids: BTreeSet<OutputResourceId>,
81 
82     // Reserves output resource ID that will be used to notify EOS.
83     // If a guest enqueues a resource with this ID, the resource must not be sent to the host.
84     // Once the value is set, it won't be changed until resolution is changed or a stream is
85     // destroyed.
86     eos_resource_id: Option<OutputResourceId>,
87 
88     // This is a flag that shows whether the device's set_output_parameters has called.
89     // This will be set to true when ResourceQueue for OutputBuffer is called for the first time.
90     //
91     // TODO(b/1518105): This field is added as a hack because the current virtio-video v3 spec
92     // doesn't have a way to send a number of frame buffers the guest provides.
93     // Once we have the way in the virtio-video protocol, we should remove this flag.
94     output_params_set: bool,
95 
96     // OutputResourceId -> ResourceHandle
97     res_id_to_res_handle: BTreeMap<OutputResourceId, GuestResource>,
98 
99     // Maps the resource_id of an output buffer to its descriptor, for output buffers that may be
100     // accessed by the guest CPU which we need to poll for completion before passing to the guest.
101     res_id_to_descriptor: BTreeMap<OutputResourceId, SafeDescriptor>,
102 }
103 
104 impl OutputResources {
queue_resource( &mut self, resource_id: OutputResourceId, ) -> VideoResult<QueueOutputResourceResult>105     fn queue_resource(
106         &mut self,
107         resource_id: OutputResourceId,
108     ) -> VideoResult<QueueOutputResourceResult> {
109         if !self.queued_res_ids.insert(resource_id) {
110             error!("resource_id {} is already queued", resource_id);
111             return Err(VideoError::InvalidParameter);
112         }
113 
114         // Stores an output buffer to notify EOS.
115         // This is necessary because libvda is unable to indicate EOS along with returned buffers.
116         // For now, when a `Flush()` completes, this saved resource will be returned as a zero-sized
117         // buffer with the EOS flag.
118         // TODO(b/149725148): Remove this when libvda supports buffer flags.
119         if *self.eos_resource_id.get_or_insert(resource_id) == resource_id {
120             return Ok(QueueOutputResourceResult::UsingAsEos);
121         }
122 
123         Ok(match self.res_id_to_frame_buf_id.entry(resource_id) {
124             Entry::Occupied(e) => QueueOutputResourceResult::Reused(*e.get()),
125             Entry::Vacant(_) => {
126                 let buffer_id = self.res_id_to_frame_buf_id.len() as FrameBufferId;
127                 self.res_id_to_frame_buf_id.insert(resource_id, buffer_id);
128                 self.frame_buf_id_to_res_id.insert(buffer_id, resource_id);
129                 QueueOutputResourceResult::Registered(buffer_id)
130             }
131         })
132     }
133 
dequeue_frame_buffer( &mut self, buffer_id: FrameBufferId, stream_id: StreamId, ) -> Option<ResourceId>134     fn dequeue_frame_buffer(
135         &mut self,
136         buffer_id: FrameBufferId,
137         stream_id: StreamId,
138     ) -> Option<ResourceId> {
139         let resource_id = match self.frame_buf_id_to_res_id.get(&buffer_id) {
140             Some(id) => *id,
141             None => {
142                 error!(
143                     "unknown frame buffer id {} for stream {}",
144                     buffer_id, stream_id
145                 );
146                 return None;
147             }
148         };
149 
150         self.queued_res_ids.take(&resource_id).or_else(|| {
151             error!(
152                 "resource_id {} is not enqueued for stream {}",
153                 resource_id, stream_id
154             );
155             None
156         })
157     }
158 
dequeue_eos_resource_id(&mut self) -> Option<OutputResourceId>159     fn dequeue_eos_resource_id(&mut self) -> Option<OutputResourceId> {
160         self.queued_res_ids.take(&self.eos_resource_id?)
161     }
162 
output_params_set(&mut self) -> bool163     fn output_params_set(&mut self) -> bool {
164         if !self.output_params_set {
165             self.output_params_set = true;
166             return true;
167         }
168         false
169     }
170 }
171 
172 enum PendingResponse {
173     PictureReady {
174         picture_buffer_id: i32,
175         timestamp: u64,
176     },
177     FlushCompleted,
178     // Signals that we need to block on the `Descriptor` before processing further events.
179     BufferBarrier(Descriptor),
180     // Signals that we are currently blocking on the `Descriptor`.
181     PollingBufferBarrier(Descriptor),
182 }
183 
184 // Context is associated with one `DecoderSession`, which corresponds to one stream from the
185 // virtio-video's point of view.
186 struct Context<S: DecoderSession> {
187     stream_id: StreamId,
188 
189     in_params: Params,
190     out_params: Params,
191 
192     in_res: InputResources,
193     out_res: OutputResources,
194 
195     // Set the flag when we ask the decoder reset, and unset when the reset is done.
196     is_resetting: bool,
197 
198     pending_responses: VecDeque<PendingResponse>,
199 
200     session: Option<S>,
201 }
202 
203 impl<S: DecoderSession> Context<S> {
new( stream_id: StreamId, format: Format, in_resource_type: ResourceType, out_resource_type: ResourceType, ) -> Self204     fn new(
205         stream_id: StreamId,
206         format: Format,
207         in_resource_type: ResourceType,
208         out_resource_type: ResourceType,
209     ) -> Self {
210         const DEFAULT_WIDTH: u32 = 640;
211         const DEFAULT_HEIGHT: u32 = 480;
212         const DEFAULT_INPUT_BUFFER_SIZE: u32 = 1024 * 1024;
213 
214         let out_plane_formats =
215             PlaneFormat::get_plane_layout(Format::NV12, DEFAULT_WIDTH, DEFAULT_HEIGHT).unwrap();
216 
217         Context {
218             stream_id,
219             in_params: Params {
220                 format: Some(format),
221                 frame_width: DEFAULT_WIDTH,
222                 frame_height: DEFAULT_HEIGHT,
223                 resource_type: in_resource_type,
224                 min_buffers: 1,
225                 max_buffers: 32,
226                 plane_formats: vec![PlaneFormat {
227                     plane_size: DEFAULT_INPUT_BUFFER_SIZE,
228                     ..Default::default()
229                 }],
230                 ..Default::default()
231             },
232             out_params: Params {
233                 format: Some(Format::NV12),
234                 frame_width: DEFAULT_WIDTH,
235                 frame_height: DEFAULT_HEIGHT,
236                 resource_type: out_resource_type,
237                 plane_formats: out_plane_formats,
238                 ..Default::default()
239             },
240             in_res: Default::default(),
241             out_res: Default::default(),
242             is_resetting: false,
243             pending_responses: Default::default(),
244             session: None,
245         }
246     }
247 
output_pending_responses( &mut self, wait_ctx: &WaitContext<Token>, ) -> Vec<VideoEvtResponseType>248     fn output_pending_responses(
249         &mut self,
250         wait_ctx: &WaitContext<Token>,
251     ) -> Vec<VideoEvtResponseType> {
252         let mut event_responses = vec![];
253         while let Some(mut responses) = self.output_pending_response() {
254             event_responses.append(&mut responses);
255         }
256 
257         // Check whether the next response is a buffer barrier we need to poll on.
258         if let Some(PendingResponse::BufferBarrier(desc)) = self.pending_responses.front() {
259             let desc = Descriptor(desc.as_raw_descriptor());
260             self.pending_responses.pop_front();
261             match wait_ctx.add(&desc, Token::BufferBarrier { id: self.stream_id }) {
262                 Ok(()) => self
263                     .pending_responses
264                     .push_front(PendingResponse::PollingBufferBarrier(desc)),
265                 Err(e) => {
266                     error!("failed to add buffer FD to wait context, returning uncompleted buffer: {:#}", e)
267                 }
268             }
269         }
270 
271         event_responses
272     }
273 
output_pending_response(&mut self) -> Option<Vec<VideoEvtResponseType>>274     fn output_pending_response(&mut self) -> Option<Vec<VideoEvtResponseType>> {
275         let responses = match self.pending_responses.front()? {
276             PendingResponse::BufferBarrier(_) | PendingResponse::PollingBufferBarrier(_) => {
277                 return None
278             }
279             PendingResponse::PictureReady {
280                 picture_buffer_id,
281                 timestamp,
282             } => {
283                 let resource_id = self
284                     .out_res
285                     .dequeue_frame_buffer(*picture_buffer_id, self.stream_id)?;
286 
287                 vec![VideoEvtResponseType::AsyncCmd(
288                     AsyncCmdResponse::from_response(
289                         AsyncCmdTag::Queue {
290                             stream_id: self.stream_id,
291                             queue_type: QueueType::Output,
292                             resource_id,
293                         },
294                         CmdResponse::ResourceQueue {
295                             timestamp: *timestamp,
296                             // TODO(b/149725148): Set buffer flags once libvda exposes them.
297                             flags: 0,
298                             // `size` is only used for the encoder.
299                             size: 0,
300                         },
301                     ),
302                 )]
303             }
304             PendingResponse::FlushCompleted => {
305                 let eos_resource_id = self.out_res.dequeue_eos_resource_id()?;
306                 let eos_tag = AsyncCmdTag::Queue {
307                     stream_id: self.stream_id,
308                     queue_type: QueueType::Output,
309                     resource_id: eos_resource_id,
310                 };
311                 let eos_response = CmdResponse::ResourceQueue {
312                     timestamp: 0,
313                     flags: protocol::VIRTIO_VIDEO_BUFFER_FLAG_EOS,
314                     size: 0,
315                 };
316                 vec![
317                     VideoEvtResponseType::AsyncCmd(AsyncCmdResponse::from_response(
318                         eos_tag,
319                         eos_response,
320                     )),
321                     VideoEvtResponseType::AsyncCmd(AsyncCmdResponse::from_response(
322                         AsyncCmdTag::Drain {
323                             stream_id: self.stream_id,
324                         },
325                         CmdResponse::NoData,
326                     )),
327                 ]
328             }
329         };
330         self.pending_responses.pop_front().unwrap();
331 
332         Some(responses)
333     }
334 
register_resource( &mut self, queue_type: QueueType, resource_id: u32, resource: GuestResource, offset: u32, )335     fn register_resource(
336         &mut self,
337         queue_type: QueueType,
338         resource_id: u32,
339         resource: GuestResource,
340         offset: u32,
341     ) {
342         match queue_type {
343             QueueType::Input => {
344                 self.in_res
345                     .insert(resource_id, InputResource { resource, offset });
346             }
347             QueueType::Output => {
348                 self.out_res
349                     .res_id_to_res_handle
350                     .insert(resource_id, resource);
351             }
352         };
353     }
354 
355     /*
356      * Functions handling decoder events.
357      */
358 
handle_provide_picture_buffers( &mut self, min_num_buffers: u32, width: i32, height: i32, visible_rect: Rect, )359     fn handle_provide_picture_buffers(
360         &mut self,
361         min_num_buffers: u32,
362         width: i32,
363         height: i32,
364         visible_rect: Rect,
365     ) {
366         // We only support NV12.
367         let format = Some(Format::NV12);
368 
369         let plane_formats =
370             PlaneFormat::get_plane_layout(Format::NV12, width as u32, height as u32).unwrap();
371 
372         self.in_params.frame_width = width as u32;
373         self.in_params.frame_height = height as u32;
374 
375         self.out_params = Params {
376             format,
377             // The resource type is not changed by a provide picture buffers event.
378             resource_type: self.out_params.resource_type,
379             // Note that rect_width is sometimes smaller.
380             frame_width: width as u32,
381             frame_height: height as u32,
382             // Adding 1 to `min_buffers` to reserve a resource for `eos_resource_id`.
383             min_buffers: min_num_buffers + 1,
384             max_buffers: 32,
385             crop: Crop {
386                 left: visible_rect.left as u32,
387                 top: visible_rect.top as u32,
388                 width: (visible_rect.right - visible_rect.left) as u32,
389                 height: (visible_rect.bottom - visible_rect.top) as u32,
390             },
391             plane_formats,
392             // No need to set `frame_rate`, as it's only for the encoder.
393             ..Default::default()
394         };
395     }
396 }
397 
398 /// A thin wrapper of a map of contexts with error handlings.
399 struct ContextMap<S: DecoderSession> {
400     map: BTreeMap<StreamId, Context<S>>,
401 }
402 
403 impl<S: DecoderSession> ContextMap<S> {
insert(&mut self, ctx: Context<S>) -> VideoResult<()>404     fn insert(&mut self, ctx: Context<S>) -> VideoResult<()> {
405         match self.map.entry(ctx.stream_id) {
406             Entry::Vacant(e) => {
407                 e.insert(ctx);
408                 Ok(())
409             }
410             Entry::Occupied(_) => {
411                 error!("session {} already exists", ctx.stream_id);
412                 Err(VideoError::InvalidStreamId(ctx.stream_id))
413             }
414         }
415     }
416 
get(&self, stream_id: &StreamId) -> VideoResult<&Context<S>>417     fn get(&self, stream_id: &StreamId) -> VideoResult<&Context<S>> {
418         self.map.get(stream_id).ok_or_else(|| {
419             error!("failed to get context of stream {}", *stream_id);
420             VideoError::InvalidStreamId(*stream_id)
421         })
422     }
423 
get_mut(&mut self, stream_id: &StreamId) -> VideoResult<&mut Context<S>>424     fn get_mut(&mut self, stream_id: &StreamId) -> VideoResult<&mut Context<S>> {
425         self.map.get_mut(stream_id).ok_or_else(|| {
426             error!("failed to get context of stream {}", *stream_id);
427             VideoError::InvalidStreamId(*stream_id)
428         })
429     }
430 }
431 
432 impl<S: DecoderSession> Default for ContextMap<S> {
default() -> Self433     fn default() -> Self {
434         Self {
435             map: Default::default(),
436         }
437     }
438 }
439 
440 /// Represents information of a decoder backed by a `DecoderBackend`.
441 pub struct Decoder<D: DecoderBackend> {
442     decoder: D,
443     capability: Capability,
444     contexts: ContextMap<D::Session>,
445     resource_bridge: Tube,
446     mem: GuestMemory,
447 }
448 
449 impl<D: DecoderBackend> Decoder<D> {
450     /// Build a new decoder using the provided `backend`.
new(backend: D, resource_bridge: Tube, mem: GuestMemory) -> Self451     pub fn new(backend: D, resource_bridge: Tube, mem: GuestMemory) -> Self {
452         let capability = backend.get_capabilities();
453 
454         Self {
455             decoder: backend,
456             capability,
457             contexts: Default::default(),
458             resource_bridge,
459             mem,
460         }
461     }
462 
463     /*
464      * Functions processing virtio-video commands.
465      */
466 
query_capabilities(&self, queue_type: QueueType) -> CmdResponse467     fn query_capabilities(&self, queue_type: QueueType) -> CmdResponse {
468         let descs = match queue_type {
469             QueueType::Input => self.capability.input_formats().clone(),
470             QueueType::Output => self.capability.output_formats().clone(),
471         };
472 
473         CmdResponse::QueryCapability(descs)
474     }
475 
create_stream( &mut self, stream_id: StreamId, coded_format: Format, input_resource_type: ResourceType, output_resource_type: ResourceType, ) -> VideoResult<VideoCmdResponseType>476     fn create_stream(
477         &mut self,
478         stream_id: StreamId,
479         coded_format: Format,
480         input_resource_type: ResourceType,
481         output_resource_type: ResourceType,
482     ) -> VideoResult<VideoCmdResponseType> {
483         // Create an instance of `Context`.
484         // Note that the `DecoderSession` will be created not here but at the first call of
485         // `ResourceCreate`. This is because we need to fix a coded format for it, which
486         // will be set by `SetParams`.
487         self.contexts.insert(Context::new(
488             stream_id,
489             coded_format,
490             input_resource_type,
491             output_resource_type,
492         ))?;
493         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
494     }
495 
destroy_stream(&mut self, stream_id: StreamId)496     fn destroy_stream(&mut self, stream_id: StreamId) {
497         if self.contexts.map.remove(&stream_id).is_none() {
498             error!("Tried to destroy an invalid stream context {}", stream_id);
499         }
500     }
501 
create_session( decoder: &mut D, wait_ctx: &WaitContext<Token>, ctx: &Context<D::Session>, stream_id: StreamId, ) -> VideoResult<D::Session>502     fn create_session(
503         decoder: &mut D,
504         wait_ctx: &WaitContext<Token>,
505         ctx: &Context<D::Session>,
506         stream_id: StreamId,
507     ) -> VideoResult<D::Session> {
508         let format = match ctx.in_params.format {
509             Some(f) => f,
510             None => {
511                 error!("bitstream format is not specified");
512                 return Err(VideoError::InvalidParameter);
513             }
514         };
515 
516         let session = decoder.new_session(format)?;
517 
518         wait_ctx
519             .add(session.event_pipe(), Token::Event { id: stream_id })
520             .map_err(|e| {
521                 error!(
522                     "failed to add FD to poll context for session {}: {}",
523                     stream_id, e
524                 );
525                 VideoError::InvalidOperation
526             })?;
527 
528         Ok(session)
529     }
530 
create_resource( &mut self, wait_ctx: &WaitContext<Token>, stream_id: StreamId, queue_type: QueueType, resource_id: ResourceId, plane_offsets: Vec<u32>, plane_entries: Vec<Vec<UnresolvedResourceEntry>>, ) -> VideoResult<VideoCmdResponseType>531     fn create_resource(
532         &mut self,
533         wait_ctx: &WaitContext<Token>,
534         stream_id: StreamId,
535         queue_type: QueueType,
536         resource_id: ResourceId,
537         plane_offsets: Vec<u32>,
538         plane_entries: Vec<Vec<UnresolvedResourceEntry>>,
539     ) -> VideoResult<VideoCmdResponseType> {
540         let ctx = self.contexts.get_mut(&stream_id)?;
541 
542         // Create a instance of `DecoderSession` at the first time `ResourceCreate` is
543         // called here.
544         if ctx.session.is_none() {
545             ctx.session = Some(Self::create_session(
546                 &mut self.decoder,
547                 wait_ctx,
548                 ctx,
549                 stream_id,
550             )?);
551         }
552 
553         // We only support single-buffer resources for now.
554         let entries = if plane_entries.len() != 1 {
555             return Err(VideoError::InvalidArgument);
556         } else {
557             // unwrap() is safe because we just tested that `plane_entries` had exactly one element.
558             plane_entries.first().unwrap()
559         };
560 
561         // Now try to resolve our resource.
562         let (resource_type, params) = match queue_type {
563             QueueType::Input => (ctx.in_params.resource_type, &ctx.in_params),
564             QueueType::Output => (ctx.out_params.resource_type, &ctx.out_params),
565         };
566 
567         let resource = match resource_type {
568             ResourceType::VirtioObject => {
569                 // Virtio object resources only have one entry.
570                 if entries.len() != 1 {
571                     return Err(VideoError::InvalidArgument);
572                 }
573                 GuestResource::from_virtio_object_entry(
574                     // SAFETY:
575                     // Safe because we confirmed the correct type for the resource.
576                     // unwrap() is also safe here because we just tested above that `entries` had
577                     // exactly one element.
578                     entries.first().unwrap().object(),
579                     &self.resource_bridge,
580                     params,
581                 )
582                 .map_err(|_| VideoError::InvalidArgument)?
583             }
584             ResourceType::GuestPages => GuestResource::from_virtio_guest_mem_entry(
585                 // SAFETY:
586                 // Safe because we confirmed the correct type for the resource.
587                 unsafe {
588                     std::slice::from_raw_parts(
589                         entries.as_ptr() as *const protocol::virtio_video_mem_entry,
590                         entries.len(),
591                     )
592                 },
593                 &self.mem,
594                 params,
595             )
596             .map_err(|_| VideoError::InvalidArgument)?,
597         };
598 
599         let offset = plane_offsets.first().copied().unwrap_or(0);
600         ctx.register_resource(queue_type, resource_id, resource, offset);
601 
602         if queue_type == QueueType::Input {
603             return Ok(VideoCmdResponseType::Sync(CmdResponse::NoData));
604         };
605 
606         // We assume ResourceCreate is not called to an output resource that is already
607         // imported to Chrome for now.
608         // TODO(keiichiw): We need to support this case for a guest client who may use
609         // arbitrary numbers of buffers. (e.g. C2V4L2Component in ARCVM)
610         // Such a client is valid as long as it uses at most 32 buffers at the same time.
611         if let Some(frame_buf_id) = ctx.out_res.res_id_to_frame_buf_id.get(&resource_id) {
612             error!(
613                 "resource {} has already been imported to Chrome as a frame buffer {}",
614                 resource_id, frame_buf_id
615             );
616             return Err(VideoError::InvalidOperation);
617         }
618 
619         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
620     }
621 
destroy_all_resources( &mut self, stream_id: StreamId, queue_type: QueueType, ) -> VideoResult<VideoCmdResponseType>622     fn destroy_all_resources(
623         &mut self,
624         stream_id: StreamId,
625         queue_type: QueueType,
626     ) -> VideoResult<VideoCmdResponseType> {
627         let ctx = self.contexts.get_mut(&stream_id)?;
628 
629         // Reset the associated context.
630         match queue_type {
631             QueueType::Input => {
632                 ctx.in_res = Default::default();
633             }
634             QueueType::Output => {
635                 ctx.out_res = Default::default();
636             }
637         }
638         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
639     }
640 
queue_input_resource( &mut self, stream_id: StreamId, resource_id: ResourceId, timestamp: u64, data_sizes: Vec<u32>, ) -> VideoResult<VideoCmdResponseType>641     fn queue_input_resource(
642         &mut self,
643         stream_id: StreamId,
644         resource_id: ResourceId,
645         timestamp: u64,
646         data_sizes: Vec<u32>,
647     ) -> VideoResult<VideoCmdResponseType> {
648         let ctx = self.contexts.get_mut(&stream_id)?;
649 
650         if data_sizes.len() != 1 {
651             error!("num_data_sizes must be 1 but {}", data_sizes.len());
652             return Err(VideoError::InvalidOperation);
653         }
654 
655         let session = ctx.session.as_mut().ok_or(VideoError::InvalidOperation)?;
656 
657         let InputResource { resource, offset } =
658             ctx.in_res
659                 .get(&resource_id)
660                 .ok_or(VideoError::InvalidResourceId {
661                     stream_id,
662                     resource_id,
663                 })?;
664 
665         session.decode(
666             resource_id,
667             timestamp,
668             resource
669                 .handle
670                 .try_clone()
671                 .map_err(|_| VideoError::InvalidParameter)?,
672             *offset,
673             data_sizes[0], // bytes_used
674         )?;
675 
676         Ok(VideoCmdResponseType::Async(AsyncCmdTag::Queue {
677             stream_id,
678             queue_type: QueueType::Input,
679             resource_id,
680         }))
681     }
682 
queue_output_resource( &mut self, stream_id: StreamId, resource_id: ResourceId, ) -> VideoResult<VideoCmdResponseType>683     fn queue_output_resource(
684         &mut self,
685         stream_id: StreamId,
686         resource_id: ResourceId,
687     ) -> VideoResult<VideoCmdResponseType> {
688         let ctx = self.contexts.get_mut(&stream_id)?;
689 
690         // Check if the current pixel format is set to NV12.
691         match ctx.out_params.format {
692             Some(Format::NV12) => (), // OK
693             Some(f) => {
694                 error!(
695                     "video decoder only supports NV12 as a frame format, got {}",
696                     f
697                 );
698                 return Err(VideoError::InvalidOperation);
699             }
700             None => {
701                 error!("output format is not set");
702                 return Err(VideoError::InvalidOperation);
703             }
704         };
705 
706         match ctx.out_res.queue_resource(resource_id)? {
707             QueueOutputResourceResult::UsingAsEos => {
708                 // Don't enqueue this resource to the host.
709                 Ok(())
710             }
711             QueueOutputResourceResult::Reused(buffer_id) => {
712                 let res = ctx.pending_responses.iter()
713                     .find(|&res| {
714                         matches!(res, PendingResponse::PictureReady { picture_buffer_id, .. } if *picture_buffer_id == buffer_id)
715                     });
716 
717                 if res.is_some() {
718                     Ok(())
719                 } else {
720                     ctx.session
721                         .as_mut()
722                         .ok_or(VideoError::InvalidOperation)?
723                         .reuse_output_buffer(buffer_id)
724                 }
725             }
726             QueueOutputResourceResult::Registered(buffer_id) => {
727                 // Take full ownership of the output resource, since we will only import it once
728                 // into the backend.
729                 let resource = ctx
730                     .out_res
731                     .res_id_to_res_handle
732                     .remove(&resource_id)
733                     .ok_or(VideoError::InvalidResourceId {
734                         stream_id,
735                         resource_id,
736                     })?;
737 
738                 let session = ctx.session.as_mut().ok_or(VideoError::InvalidOperation)?;
739 
740                 ctx.out_res.res_id_to_descriptor.remove(&resource_id);
741                 if resource.guest_cpu_mappable {
742                     if let GuestResourceHandle::VirtioObject(VirtioObjectHandle { desc, .. }) =
743                         &resource.handle
744                     {
745                         let desc = desc.try_clone().map_err(|e| {
746                             VideoError::BackendFailure(anyhow::anyhow!(e).context(
747                                 "failed to clone buffer descriptor for completion barrier",
748                             ))
749                         })?;
750                         ctx.out_res.res_id_to_descriptor.insert(resource_id, desc);
751                     }
752                 }
753 
754                 // Set output_buffer_count before passing the first output buffer.
755                 if ctx.out_res.output_params_set() {
756                     const OUTPUT_BUFFER_COUNT: usize = 32;
757 
758                     // Set the buffer count to the maximum value.
759                     // TODO(b/1518105): This is a hack due to the lack of way of telling a number of
760                     // frame buffers explictly in virtio-video v3 RFC. Once we have the way,
761                     // set_output_buffer_count should be called with a value passed by the guest.
762                     session.set_output_parameters(OUTPUT_BUFFER_COUNT, Format::NV12)?;
763                 }
764 
765                 session.use_output_buffer(buffer_id, resource)
766             }
767         }?;
768         Ok(VideoCmdResponseType::Async(AsyncCmdTag::Queue {
769             stream_id,
770             queue_type: QueueType::Output,
771             resource_id,
772         }))
773     }
774 
get_params( &self, stream_id: StreamId, queue_type: QueueType, is_ext: bool, ) -> VideoResult<VideoCmdResponseType>775     fn get_params(
776         &self,
777         stream_id: StreamId,
778         queue_type: QueueType,
779         is_ext: bool,
780     ) -> VideoResult<VideoCmdResponseType> {
781         let ctx = self.contexts.get(&stream_id)?;
782         let params = match queue_type {
783             QueueType::Input => ctx.in_params.clone(),
784             QueueType::Output => ctx.out_params.clone(),
785         };
786         Ok(VideoCmdResponseType::Sync(CmdResponse::GetParams {
787             queue_type,
788             params,
789             is_ext,
790         }))
791     }
792 
set_params( &mut self, stream_id: StreamId, queue_type: QueueType, params: Params, is_ext: bool, ) -> VideoResult<VideoCmdResponseType>793     fn set_params(
794         &mut self,
795         stream_id: StreamId,
796         queue_type: QueueType,
797         params: Params,
798         is_ext: bool,
799     ) -> VideoResult<VideoCmdResponseType> {
800         let ctx = self.contexts.get_mut(&stream_id)?;
801         match queue_type {
802             QueueType::Input => {
803                 if ctx.session.is_some() {
804                     error!("parameter for input cannot be changed once decoding started");
805                     return Err(VideoError::InvalidParameter);
806                 }
807 
808                 // Only a few parameters can be changed by the guest.
809                 ctx.in_params.format = params.format;
810                 ctx.in_params.plane_formats = params.plane_formats;
811                 // The resource type can only be changed through the SET_PARAMS_EXT command.
812                 if is_ext {
813                     ctx.in_params.resource_type = params.resource_type;
814                 }
815             }
816             QueueType::Output => {
817                 // The guest can only change the resource type of the output queue if no resource
818                 // has been imported yet.
819                 if ctx.out_res.output_params_set {
820                     error!("parameter for output cannot be changed once resources are imported");
821                     return Err(VideoError::InvalidParameter);
822                 }
823                 if is_ext {
824                     ctx.out_params.resource_type = params.resource_type;
825                 }
826             }
827         };
828         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
829     }
830 
query_control(&self, ctrl_type: QueryCtrlType) -> VideoResult<VideoCmdResponseType>831     fn query_control(&self, ctrl_type: QueryCtrlType) -> VideoResult<VideoCmdResponseType> {
832         match self.capability.query_control(&ctrl_type) {
833             Some(resp) => Ok(VideoCmdResponseType::Sync(CmdResponse::QueryControl(resp))),
834             None => {
835                 error!("querying an unsupported control: {:?}", ctrl_type);
836                 Err(VideoError::InvalidArgument)
837             }
838         }
839     }
840 
get_control( &self, stream_id: StreamId, ctrl_type: CtrlType, ) -> VideoResult<VideoCmdResponseType>841     fn get_control(
842         &self,
843         stream_id: StreamId,
844         ctrl_type: CtrlType,
845     ) -> VideoResult<VideoCmdResponseType> {
846         let ctx = self.contexts.get(&stream_id)?;
847         match ctrl_type {
848             CtrlType::Profile => {
849                 let profile = match ctx.in_params.format {
850                     Some(Format::VP8) => Profile::VP8Profile0,
851                     Some(Format::VP9) => Profile::VP9Profile0,
852                     Some(Format::H264) => Profile::H264Baseline,
853                     Some(Format::Hevc) => Profile::HevcMain,
854                     Some(f) => {
855                         error!("specified format is invalid: {}", f);
856                         return Err(VideoError::InvalidArgument);
857                     }
858                     None => {
859                         error!("bitstream format is not set");
860                         return Err(VideoError::InvalidArgument);
861                     }
862                 };
863 
864                 Ok(CtrlVal::Profile(profile))
865             }
866             CtrlType::Level => {
867                 let level = match ctx.in_params.format {
868                     Some(Format::H264) => Level::H264_1_0,
869                     Some(f) => {
870                         error!("specified format has no level: {}", f);
871                         return Err(VideoError::InvalidArgument);
872                     }
873                     None => {
874                         error!("bitstream format is not set");
875                         return Err(VideoError::InvalidArgument);
876                     }
877                 };
878 
879                 Ok(CtrlVal::Level(level))
880             }
881             t => {
882                 error!("cannot get a control value: {:?}", t);
883                 Err(VideoError::InvalidArgument)
884             }
885         }
886         .map(|ctrl_val| VideoCmdResponseType::Sync(CmdResponse::GetControl(ctrl_val)))
887     }
888 
drain_stream(&mut self, stream_id: StreamId) -> VideoResult<VideoCmdResponseType>889     fn drain_stream(&mut self, stream_id: StreamId) -> VideoResult<VideoCmdResponseType> {
890         self.contexts
891             .get_mut(&stream_id)?
892             .session
893             .as_mut()
894             .ok_or(VideoError::InvalidOperation)?
895             .flush()?;
896         Ok(VideoCmdResponseType::Async(AsyncCmdTag::Drain {
897             stream_id,
898         }))
899     }
900 
clear_queue( &mut self, stream_id: StreamId, queue_type: QueueType, wait_ctx: &WaitContext<Token>, ) -> VideoResult<VideoCmdResponseType>901     fn clear_queue(
902         &mut self,
903         stream_id: StreamId,
904         queue_type: QueueType,
905         wait_ctx: &WaitContext<Token>,
906     ) -> VideoResult<VideoCmdResponseType> {
907         let ctx = self.contexts.get_mut(&stream_id)?;
908 
909         // TODO(b/153406792): Though QUEUE_CLEAR is defined as a per-queue command in the
910         // specification, the VDA's `Reset()` clears the input buffers and may (or may not) drop
911         // output buffers. So, we call it only for input and resets only the crosvm's internal
912         // context for output.
913         // This code can be a problem when a guest application wants to reset only one queue by
914         // REQBUFS(0). To handle this problem correctly, we need to make libvda expose
915         // DismissPictureBuffer() method.
916         match queue_type {
917             QueueType::Input => {
918                 if let Some(session) = ctx.session.as_mut() {
919                     session.reset()?;
920                     ctx.is_resetting = true;
921                     // Remove all the buffer barriers we are waiting on.
922                     for polled_barrier in ctx.pending_responses.iter_mut().filter_map(|r| {
923                         if let PendingResponse::PollingBufferBarrier(desc) = r {
924                             Some(desc)
925                         } else {
926                             None
927                         }
928                     }) {
929                         wait_ctx.delete(polled_barrier).unwrap_or_else(|e| {
930                             base::warn!(
931                                 "failed to remove buffer barrier from wait context: {:#}",
932                                 e
933                             )
934                         });
935                     }
936                     ctx.pending_responses.clear();
937                     Ok(VideoCmdResponseType::Async(AsyncCmdTag::Clear {
938                         stream_id,
939                         queue_type: QueueType::Input,
940                     }))
941                 } else {
942                     Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
943                 }
944             }
945             QueueType::Output => {
946                 if let Some(session) = ctx.session.as_mut() {
947                     session.clear_output_buffers()?;
948                     ctx.out_res.queued_res_ids.clear();
949                 }
950                 Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
951             }
952         }
953     }
954 }
955 
956 impl<D: DecoderBackend> Device for Decoder<D> {
process_cmd( &mut self, cmd: VideoCmd, wait_ctx: &WaitContext<Token>, ) -> ( VideoCmdResponseType, Option<(u32, Vec<VideoEvtResponseType>)>, )957     fn process_cmd(
958         &mut self,
959         cmd: VideoCmd,
960         wait_ctx: &WaitContext<Token>,
961     ) -> (
962         VideoCmdResponseType,
963         Option<(u32, Vec<VideoEvtResponseType>)>,
964     ) {
965         use VideoCmd::*;
966         use VideoCmdResponseType::Sync;
967 
968         let mut event_ret = None;
969         let cmd_response = match cmd {
970             QueryCapability { queue_type } => Ok(Sync(self.query_capabilities(queue_type))),
971             StreamCreate {
972                 stream_id,
973                 coded_format,
974                 input_resource_type,
975                 output_resource_type,
976             } => self.create_stream(
977                 stream_id,
978                 coded_format,
979                 input_resource_type,
980                 output_resource_type,
981             ),
982             StreamDestroy { stream_id } => {
983                 self.destroy_stream(stream_id);
984                 Ok(Sync(CmdResponse::NoData))
985             }
986             ResourceCreate {
987                 stream_id,
988                 queue_type,
989                 resource_id,
990                 plane_offsets,
991                 plane_entries,
992             } => self.create_resource(
993                 wait_ctx,
994                 stream_id,
995                 queue_type,
996                 resource_id,
997                 plane_offsets,
998                 plane_entries,
999             ),
1000             ResourceDestroyAll {
1001                 stream_id,
1002                 queue_type,
1003             } => self.destroy_all_resources(stream_id, queue_type),
1004             ResourceQueue {
1005                 stream_id,
1006                 queue_type: QueueType::Input,
1007                 resource_id,
1008                 timestamp,
1009                 data_sizes,
1010             } => self.queue_input_resource(stream_id, resource_id, timestamp, data_sizes),
1011             ResourceQueue {
1012                 stream_id,
1013                 queue_type: QueueType::Output,
1014                 resource_id,
1015                 ..
1016             } => {
1017                 let resp = self.queue_output_resource(stream_id, resource_id);
1018                 if resp.is_ok() {
1019                     if let Ok(ctx) = self.contexts.get_mut(&stream_id) {
1020                         event_ret = Some((stream_id, ctx.output_pending_responses(wait_ctx)));
1021                     }
1022                 }
1023                 resp
1024             }
1025             GetParams {
1026                 stream_id,
1027                 queue_type,
1028                 is_ext,
1029             } => self.get_params(stream_id, queue_type, is_ext),
1030             SetParams {
1031                 stream_id,
1032                 queue_type,
1033                 params,
1034                 is_ext,
1035             } => self.set_params(stream_id, queue_type, params, is_ext),
1036             QueryControl { query_ctrl_type } => self.query_control(query_ctrl_type),
1037             GetControl {
1038                 stream_id,
1039                 ctrl_type,
1040             } => self.get_control(stream_id, ctrl_type),
1041             SetControl { .. } => {
1042                 error!("SET_CONTROL is not allowed for decoder");
1043                 Err(VideoError::InvalidOperation)
1044             }
1045             StreamDrain { stream_id } => self.drain_stream(stream_id),
1046             QueueClear {
1047                 stream_id,
1048                 queue_type,
1049             } => self.clear_queue(stream_id, queue_type, wait_ctx),
1050         };
1051 
1052         let cmd_ret = match cmd_response {
1053             Ok(r) => r,
1054             Err(e) => {
1055                 error!("returning error response: {}", &e);
1056                 Sync(e.into())
1057             }
1058         };
1059         (cmd_ret, event_ret)
1060     }
1061 
process_event( &mut self, desc_map: &mut AsyncCmdDescMap, stream_id: u32, wait_ctx: &WaitContext<Token>, ) -> Option<Vec<VideoEvtResponseType>>1062     fn process_event(
1063         &mut self,
1064         desc_map: &mut AsyncCmdDescMap,
1065         stream_id: u32,
1066         wait_ctx: &WaitContext<Token>,
1067     ) -> Option<Vec<VideoEvtResponseType>> {
1068         // TODO(b/161774071): Switch the return value from Option to VideoResult or another
1069         // result that would allow us to return an error to the caller.
1070 
1071         use crate::virtio::video::device::VideoEvtResponseType::*;
1072 
1073         let ctx = match self.contexts.get_mut(&stream_id) {
1074             Ok(ctx) => ctx,
1075             Err(e) => {
1076                 error!("failed to get a context for session {}: {}", stream_id, e);
1077                 return None;
1078             }
1079         };
1080 
1081         let session = match ctx.session.as_mut() {
1082             Some(s) => s,
1083             None => {
1084                 error!("session not yet created for context {}", stream_id);
1085                 return None;
1086             }
1087         };
1088 
1089         let event = match session.read_event() {
1090             Ok(event) => event,
1091             Err(e) => {
1092                 error!("failed to read an event from session {}: {}", stream_id, e);
1093                 return None;
1094             }
1095         };
1096 
1097         let event_responses = match event {
1098             DecoderEvent::ProvidePictureBuffers {
1099                 min_num_buffers,
1100                 width,
1101                 height,
1102                 visible_rect,
1103             } => {
1104                 ctx.handle_provide_picture_buffers(min_num_buffers, width, height, visible_rect);
1105                 vec![Event(VideoEvt {
1106                     typ: EvtType::DecResChanged,
1107                     stream_id,
1108                 })]
1109             }
1110             DecoderEvent::PictureReady {
1111                 picture_buffer_id,
1112                 timestamp,
1113                 ..
1114             } => {
1115                 if ctx.is_resetting {
1116                     vec![]
1117                 } else {
1118                     // Do we need to wait for processing on the buffer to be completed before
1119                     // passing it to the guest? If so add a barrier to our pending events.
1120                     if let Some(desc) = ctx
1121                         .out_res
1122                         .frame_buf_id_to_res_id
1123                         .get(&picture_buffer_id)
1124                         .and_then(|res_id| ctx.out_res.res_id_to_descriptor.get(res_id))
1125                     {
1126                         let desc = Descriptor(desc.as_raw_descriptor());
1127                         ctx.pending_responses
1128                             .push_back(PendingResponse::BufferBarrier(desc));
1129                     }
1130                     ctx.pending_responses
1131                         .push_back(PendingResponse::PictureReady {
1132                             picture_buffer_id,
1133                             timestamp,
1134                         });
1135                     ctx.output_pending_responses(wait_ctx)
1136                 }
1137             }
1138             DecoderEvent::NotifyEndOfBitstreamBuffer(resource_id) => {
1139                 let async_response = AsyncCmdResponse::from_response(
1140                     AsyncCmdTag::Queue {
1141                         stream_id,
1142                         queue_type: QueueType::Input,
1143                         resource_id,
1144                     },
1145                     CmdResponse::ResourceQueue {
1146                         timestamp: 0, // ignored for bitstream buffers.
1147                         flags: 0,     // no flag is raised, as it's returned successfully.
1148                         size: 0,      // this field is only for encoder
1149                     },
1150                 );
1151                 vec![AsyncCmd(async_response)]
1152             }
1153             DecoderEvent::FlushCompleted(flush_result) => {
1154                 match flush_result {
1155                     Ok(()) => {
1156                         ctx.pending_responses
1157                             .push_back(PendingResponse::FlushCompleted);
1158                         ctx.output_pending_responses(wait_ctx)
1159                     }
1160                     Err(error) => {
1161                         // TODO(b/151810591): If `resp` is `libvda::decode::Response::Canceled`,
1162                         // we should notify it to the driver in some way.
1163                         error!(
1164                             "failed to 'Flush' in VDA (stream id {}): {:?}",
1165                             stream_id, error
1166                         );
1167                         vec![AsyncCmd(AsyncCmdResponse::from_error(
1168                             AsyncCmdTag::Drain { stream_id },
1169                             error,
1170                         ))]
1171                     }
1172                 }
1173             }
1174             DecoderEvent::ResetCompleted(reset_result) => {
1175                 ctx.is_resetting = false;
1176                 let tag = AsyncCmdTag::Clear {
1177                     stream_id,
1178                     queue_type: QueueType::Input,
1179                 };
1180                 match reset_result {
1181                     Ok(()) => {
1182                         let mut responses: Vec<_> = desc_map
1183                             .create_cancellation_responses(
1184                                 &stream_id,
1185                                 Some(QueueType::Input),
1186                                 Some(tag),
1187                             )
1188                             .into_iter()
1189                             .map(AsyncCmd)
1190                             .collect();
1191                         responses.push(AsyncCmd(AsyncCmdResponse::from_response(
1192                             tag,
1193                             CmdResponse::NoData,
1194                         )));
1195                         responses
1196                     }
1197                     Err(error) => {
1198                         error!(
1199                             "failed to 'Reset' in VDA (stream id {}): {:?}",
1200                             stream_id, error
1201                         );
1202                         vec![AsyncCmd(AsyncCmdResponse::from_error(tag, error))]
1203                     }
1204                 }
1205             }
1206             DecoderEvent::NotifyError(error) => {
1207                 error!("an error is notified by VDA: {}", error);
1208                 vec![Event(VideoEvt {
1209                     typ: EvtType::Error,
1210                     stream_id,
1211                 })]
1212             }
1213         };
1214 
1215         Some(event_responses)
1216     }
1217 
process_buffer_barrier( &mut self, stream_id: u32, wait_ctx: &WaitContext<Token>, ) -> Option<Vec<VideoEvtResponseType>>1218     fn process_buffer_barrier(
1219         &mut self,
1220         stream_id: u32,
1221         wait_ctx: &WaitContext<Token>,
1222     ) -> Option<Vec<VideoEvtResponseType>> {
1223         let ctx = match self.contexts.get_mut(&stream_id) {
1224             Ok(ctx) => ctx,
1225             Err(e) => {
1226                 error!("failed to get a context for session {}: {}", stream_id, e);
1227                 return None;
1228             }
1229         };
1230 
1231         match ctx.pending_responses.front() {
1232             Some(PendingResponse::PollingBufferBarrier(desc)) => {
1233                 // `delete` can return an error if the descriptor has been closed by e.g. the GPU
1234                 // driver. We can safely ignore these.
1235                 let _ = wait_ctx.delete(&Descriptor(desc.as_raw_descriptor()));
1236                 ctx.pending_responses.pop_front();
1237             }
1238             _ => {
1239                 error!("expected a buffer barrier, but found none");
1240             }
1241         }
1242 
1243         Some(ctx.output_pending_responses(wait_ctx))
1244     }
1245 }
1246