• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Implementation of a virtio video decoder backed by a device.
6 
7 use std::collections::btree_map::Entry;
8 use std::collections::{BTreeMap, BTreeSet, VecDeque};
9 use std::convert::TryInto;
10 
11 use backend::*;
12 use base::{error, IntoRawDescriptor, Tube, WaitContext};
13 
14 use crate::virtio::resource_bridge::{self, BufferInfo, ResourceInfo, ResourceRequest};
15 use crate::virtio::video::async_cmd_desc_map::AsyncCmdDescMap;
16 use crate::virtio::video::command::{QueueType, VideoCmd};
17 use crate::virtio::video::control::{CtrlType, CtrlVal, QueryCtrlType};
18 use crate::virtio::video::device::*;
19 use crate::virtio::video::error::*;
20 use crate::virtio::video::event::*;
21 use crate::virtio::video::format::*;
22 use crate::virtio::video::params::Params;
23 use crate::virtio::video::protocol;
24 use crate::virtio::video::response::CmdResponse;
25 
26 mod backend;
27 mod capability;
28 
29 use capability::*;
30 
31 type StreamId = u32;
32 type ResourceId = u32;
33 
34 // ResourceId given by the driver
35 type InputResourceId = u32;
36 type OutputResourceId = u32;
37 
38 // Id for a frame buffer passed to Chrome.
39 // We cannot use OutputResourceId as is because this ID must be between 0 and ((# of buffers) - 1).
40 //
41 // TODO(b/1518105): Once we decide to generate resource_id in the device side,
42 // we don't need this value and can pass OutputResourceId to Chrome directly.
43 type FrameBufferId = i32;
44 
45 type ResourceHandle = u32;
46 type Timestamp = u64;
47 
48 // The result of OutputResources.queue_resource().
49 enum QueueOutputResourceResult {
50     UsingAsEos,                // The resource is kept as EOS buffer.
51     Reused(FrameBufferId),     // The resource has been registered before.
52     Registered(FrameBufferId), // The resource is queued first time.
53 }
54 
55 #[derive(Default)]
56 struct InputResources {
57     // Timestamp -> InputResourceId
58     timestamp_to_res_id: BTreeMap<Timestamp, InputResourceId>,
59 
60     // InputResourceId -> ResourceHandle
61     res_id_to_res_handle: BTreeMap<InputResourceId, ResourceHandle>,
62 
63     // InputResourceId -> data offset
64     res_id_to_offset: BTreeMap<InputResourceId, u32>,
65 }
66 
67 #[derive(Default)]
68 struct OutputResources {
69     // OutputResourceId <-> FrameBufferId
70     res_id_to_frame_buf_id: BTreeMap<OutputResourceId, FrameBufferId>,
71     frame_buf_id_to_res_id: BTreeMap<FrameBufferId, OutputResourceId>,
72 
73     // Store the resource id of the queued output buffers.
74     queued_res_ids: BTreeSet<OutputResourceId>,
75 
76     // Reserves output resource ID that will be used to notify EOS.
77     // If a guest enqueues a resource with this ID, the resource must not be sent to the host.
78     // Once the value is set, it won't be changed until resolution is changed or a stream is
79     // destroyed.
80     eos_resource_id: Option<OutputResourceId>,
81 
82     // This is a flag that shows whether the device's set_output_buffer_count is called.
83     // This will be set to true when ResourceCreate for OutputBuffer is called for the first time.
84     //
85     // TODO(b/1518105): This field is added as a hack because the current virtio-video v3 spec
86     // doesn't have a way to send a number of frame buffers the guest provides.
87     // Once we have the way in the virtio-video protocol, we should remove this flag.
88     is_output_buffer_count_set: bool,
89 
90     // OutputResourceId -> ResourceHandle
91     res_id_to_res_handle: BTreeMap<OutputResourceId, ResourceHandle>,
92 }
93 
94 impl OutputResources {
queue_resource( &mut self, resource_id: OutputResourceId, ) -> VideoResult<QueueOutputResourceResult>95     fn queue_resource(
96         &mut self,
97         resource_id: OutputResourceId,
98     ) -> VideoResult<QueueOutputResourceResult> {
99         if !self.queued_res_ids.insert(resource_id) {
100             error!("resource_id {} is already queued", resource_id);
101             return Err(VideoError::InvalidParameter);
102         }
103 
104         // Stores an output buffer to notify EOS.
105         // This is necessary because libvda is unable to indicate EOS along with returned buffers.
106         // For now, when a `Flush()` completes, this saved resource will be returned as a zero-sized
107         // buffer with the EOS flag.
108         // TODO(b/149725148): Remove this when libvda supports buffer flags.
109         if *self.eos_resource_id.get_or_insert(resource_id) == resource_id {
110             return Ok(QueueOutputResourceResult::UsingAsEos);
111         }
112 
113         Ok(match self.res_id_to_frame_buf_id.entry(resource_id) {
114             Entry::Occupied(e) => QueueOutputResourceResult::Reused(*e.get()),
115             Entry::Vacant(_) => {
116                 let buffer_id = self.res_id_to_frame_buf_id.len() as FrameBufferId;
117                 self.res_id_to_frame_buf_id.insert(resource_id, buffer_id);
118                 self.frame_buf_id_to_res_id.insert(buffer_id, resource_id);
119                 QueueOutputResourceResult::Registered(buffer_id)
120             }
121         })
122     }
123 
dequeue_frame_buffer( &mut self, buffer_id: FrameBufferId, stream_id: StreamId, ) -> Option<ResourceId>124     fn dequeue_frame_buffer(
125         &mut self,
126         buffer_id: FrameBufferId,
127         stream_id: StreamId,
128     ) -> Option<ResourceId> {
129         let resource_id = match self.frame_buf_id_to_res_id.get(&buffer_id) {
130             Some(id) => *id,
131             None => {
132                 error!(
133                     "unknown frame buffer id {} for stream {}",
134                     buffer_id, stream_id
135                 );
136                 return None;
137             }
138         };
139 
140         self.queued_res_ids.take(&resource_id).or_else(|| {
141             error!(
142                 "resource_id {} is not enqueued for stream {}",
143                 resource_id, stream_id
144             );
145             None
146         })
147     }
148 
dequeue_eos_resource_id(&mut self) -> Option<OutputResourceId>149     fn dequeue_eos_resource_id(&mut self) -> Option<OutputResourceId> {
150         self.queued_res_ids.take(&self.eos_resource_id?)
151     }
152 
set_output_buffer_count(&mut self) -> bool153     fn set_output_buffer_count(&mut self) -> bool {
154         if !self.is_output_buffer_count_set {
155             self.is_output_buffer_count_set = true;
156             return true;
157         }
158         false
159     }
160 }
161 
162 struct PictureReadyEvent {
163     picture_buffer_id: i32,
164     bitstream_id: i32,
165     visible_rect: Rect,
166 }
167 
168 // Context is associated with one `DecoderSession`, which corresponds to one stream from the
169 // virtio-video's point of view.
170 #[derive(Default)]
171 struct Context<S: DecoderSession> {
172     stream_id: StreamId,
173 
174     in_params: Params,
175     out_params: Params,
176 
177     in_res: InputResources,
178     out_res: OutputResources,
179 
180     // Set the flag when we ask the decoder reset, and unset when the reset is done.
181     is_resetting: bool,
182 
183     pending_ready_pictures: VecDeque<PictureReadyEvent>,
184 
185     session: Option<S>,
186 }
187 
188 impl<S: DecoderSession> Context<S> {
new(stream_id: StreamId, format: Format) -> Self189     fn new(stream_id: StreamId, format: Format) -> Self {
190         Context {
191             stream_id,
192             in_params: Params {
193                 format: Some(format),
194                 min_buffers: 1,
195                 max_buffers: 32,
196                 plane_formats: vec![Default::default()],
197                 ..Default::default()
198             },
199             out_params: Default::default(),
200             in_res: Default::default(),
201             out_res: Default::default(),
202             is_resetting: false,
203             pending_ready_pictures: Default::default(),
204             session: None,
205         }
206     }
207 
output_pending_pictures(&mut self) -> Vec<VideoEvtResponseType>208     fn output_pending_pictures(&mut self) -> Vec<VideoEvtResponseType> {
209         let mut responses = vec![];
210         while let Some(async_response) = self.output_pending_picture() {
211             responses.push(VideoEvtResponseType::AsyncCmd(async_response));
212         }
213         responses
214     }
215 
output_pending_picture(&mut self) -> Option<AsyncCmdResponse>216     fn output_pending_picture(&mut self) -> Option<AsyncCmdResponse> {
217         let response = {
218             let PictureReadyEvent {
219                 picture_buffer_id,
220                 bitstream_id,
221                 visible_rect,
222             } = self.pending_ready_pictures.front()?;
223 
224             let plane_size = ((visible_rect.right - visible_rect.left)
225                 * (visible_rect.bottom - visible_rect.top)) as u32;
226             for fmt in self.out_params.plane_formats.iter_mut() {
227                 fmt.plane_size = plane_size;
228                 // We don't need to set `plane_formats[i].stride` for the decoder.
229             }
230 
231             let resource_id = self
232                 .out_res
233                 .dequeue_frame_buffer(*picture_buffer_id, self.stream_id)?;
234 
235             AsyncCmdResponse::from_response(
236                 AsyncCmdTag::Queue {
237                     stream_id: self.stream_id,
238                     queue_type: QueueType::Output,
239                     resource_id,
240                 },
241                 CmdResponse::ResourceQueue {
242                     // Conversion from sec to nsec.
243                     timestamp: (*bitstream_id as u64) * 1_000_000_000,
244                     // TODO(b/149725148): Set buffer flags once libvda exposes them.
245                     flags: 0,
246                     // `size` is only used for the encoder.
247                     size: 0,
248                 },
249             )
250         };
251         self.pending_ready_pictures.pop_front().unwrap();
252 
253         Some(response)
254     }
255 
get_resource_info( &self, queue_type: QueueType, res_bridge: &Tube, resource_id: u32, ) -> VideoResult<BufferInfo>256     fn get_resource_info(
257         &self,
258         queue_type: QueueType,
259         res_bridge: &Tube,
260         resource_id: u32,
261     ) -> VideoResult<BufferInfo> {
262         let res_id_to_res_handle = match queue_type {
263             QueueType::Input => &self.in_res.res_id_to_res_handle,
264             QueueType::Output => &self.out_res.res_id_to_res_handle,
265         };
266 
267         let handle = res_id_to_res_handle.get(&resource_id).copied().ok_or(
268             VideoError::InvalidResourceId {
269                 stream_id: self.stream_id,
270                 resource_id,
271             },
272         )?;
273         match resource_bridge::get_resource_info(
274             res_bridge,
275             ResourceRequest::GetBuffer { id: handle },
276         ) {
277             Ok(ResourceInfo::Buffer(buffer_info)) => Ok(buffer_info),
278             Ok(_) => Err(VideoError::InvalidArgument),
279             Err(e) => Err(VideoError::ResourceBridgeFailure(e)),
280         }
281     }
282 
register_buffer(&mut self, queue_type: QueueType, resource_id: u32, uuid: &u128)283     fn register_buffer(&mut self, queue_type: QueueType, resource_id: u32, uuid: &u128) {
284         // TODO(stevensd): `Virtio3DBackend::resource_assign_uuid` is currently implemented to use
285         // 32-bits resource_handles as UUIDs. Once it starts using real UUIDs, we need to update
286         // this conversion.
287         let handle = TryInto::<u32>::try_into(*uuid).expect("uuid is larger than 32 bits");
288         let res_id_to_res_handle = match queue_type {
289             QueueType::Input => &mut self.in_res.res_id_to_res_handle,
290             QueueType::Output => &mut self.out_res.res_id_to_res_handle,
291         };
292         res_id_to_res_handle.insert(resource_id, handle);
293     }
294 
295     /*
296      * Functions handling decoder events.
297      */
298 
handle_provide_picture_buffers( &mut self, min_num_buffers: u32, width: i32, height: i32, visible_rect: Rect, )299     fn handle_provide_picture_buffers(
300         &mut self,
301         min_num_buffers: u32,
302         width: i32,
303         height: i32,
304         visible_rect: Rect,
305     ) {
306         // We only support NV12.
307         let format = Some(Format::NV12);
308 
309         let rect_width: u32 = (visible_rect.right - visible_rect.left) as u32;
310         let rect_height: u32 = (visible_rect.bottom - visible_rect.top) as u32;
311 
312         let plane_size = rect_width * rect_height;
313         let stride = rect_width;
314         let plane_formats = vec![
315             PlaneFormat { plane_size, stride },
316             PlaneFormat { plane_size, stride },
317         ];
318 
319         self.out_params = Params {
320             format,
321             // Note that rect_width is sometimes smaller.
322             frame_width: width as u32,
323             frame_height: height as u32,
324             // Adding 1 to `min_buffers` to reserve a resource for `eos_resource_id`.
325             min_buffers: min_num_buffers + 1,
326             max_buffers: 32,
327             crop: Crop {
328                 left: visible_rect.left as u32,
329                 top: visible_rect.top as u32,
330                 width: rect_width,
331                 height: rect_height,
332             },
333             plane_formats,
334             // No need to set `frame_rate`, as it's only for the encoder.
335             ..Default::default()
336         };
337     }
338 
handle_notify_end_of_bitstream_buffer(&mut self, bitstream_id: i32) -> Option<ResourceId>339     fn handle_notify_end_of_bitstream_buffer(&mut self, bitstream_id: i32) -> Option<ResourceId> {
340         // `bitstream_id` in libvda is a timestamp passed via RESOURCE_QUEUE for the input buffer
341         // in second.
342         let timestamp: u64 = (bitstream_id as u64) * 1_000_000_000;
343         self.in_res
344             .timestamp_to_res_id
345             .remove(&(timestamp as u64))
346             .or_else(|| {
347                 error!("failed to remove a timestamp {}", timestamp);
348                 None
349             })
350     }
351 }
352 
353 /// A thin wrapper of a map of contexts with error handlings.
354 struct ContextMap<S: DecoderSession> {
355     map: BTreeMap<StreamId, Context<S>>,
356 }
357 
358 impl<S: DecoderSession> ContextMap<S> {
new() -> Self359     fn new() -> Self {
360         ContextMap {
361             map: Default::default(),
362         }
363     }
364 
insert(&mut self, ctx: Context<S>) -> VideoResult<()>365     fn insert(&mut self, ctx: Context<S>) -> VideoResult<()> {
366         match self.map.entry(ctx.stream_id) {
367             Entry::Vacant(e) => {
368                 e.insert(ctx);
369                 Ok(())
370             }
371             Entry::Occupied(_) => {
372                 error!("session {} already exists", ctx.stream_id);
373                 Err(VideoError::InvalidStreamId(ctx.stream_id))
374             }
375         }
376     }
377 
get(&self, stream_id: &StreamId) -> VideoResult<&Context<S>>378     fn get(&self, stream_id: &StreamId) -> VideoResult<&Context<S>> {
379         self.map.get(stream_id).ok_or_else(|| {
380             error!("failed to get context of stream {}", *stream_id);
381             VideoError::InvalidStreamId(*stream_id)
382         })
383     }
384 
get_mut(&mut self, stream_id: &StreamId) -> VideoResult<&mut Context<S>>385     fn get_mut(&mut self, stream_id: &StreamId) -> VideoResult<&mut Context<S>> {
386         self.map.get_mut(stream_id).ok_or_else(|| {
387             error!("failed to get context of stream {}", *stream_id);
388             VideoError::InvalidStreamId(*stream_id)
389         })
390     }
391 }
392 
393 /// Represents information of a decoder backed by a `DecoderBackend`.
394 pub struct Decoder<D: DecoderBackend> {
395     decoder: D,
396     capability: Capability,
397     contexts: ContextMap<D::Session>,
398 }
399 
400 impl<'a, D: DecoderBackend> Decoder<D> {
401     /*
402      * Functions processing virtio-video commands.
403      */
404 
query_capabilities(&self, queue_type: QueueType) -> CmdResponse405     fn query_capabilities(&self, queue_type: QueueType) -> CmdResponse {
406         let descs = match queue_type {
407             QueueType::Input => self.capability.in_fmts.clone(),
408             QueueType::Output => self.capability.out_fmts.clone(),
409         };
410 
411         CmdResponse::QueryCapability(descs)
412     }
413 
create_stream( &mut self, stream_id: StreamId, coded_format: Format, ) -> VideoResult<VideoCmdResponseType>414     fn create_stream(
415         &mut self,
416         stream_id: StreamId,
417         coded_format: Format,
418     ) -> VideoResult<VideoCmdResponseType> {
419         // Create an instance of `Context`.
420         // Note that the `DecoderSession` will be created not here but at the first call of
421         // `ResourceCreate`. This is because we need to fix a coded format for it, which
422         // will be set by `SetParams`.
423         self.contexts
424             .insert(Context::new(stream_id, coded_format))?;
425         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
426     }
427 
destroy_stream(&mut self, stream_id: StreamId)428     fn destroy_stream(&mut self, stream_id: StreamId) {
429         if self.contexts.map.remove(&stream_id).is_none() {
430             error!("Tried to destroy an invalid stream context {}", stream_id);
431         }
432     }
433 
create_session( decoder: &D, wait_ctx: &WaitContext<Token>, ctx: &Context<D::Session>, stream_id: StreamId, ) -> VideoResult<D::Session>434     fn create_session(
435         decoder: &D,
436         wait_ctx: &WaitContext<Token>,
437         ctx: &Context<D::Session>,
438         stream_id: StreamId,
439     ) -> VideoResult<D::Session> {
440         let format = match ctx.in_params.format {
441             Some(f) => f,
442             None => {
443                 error!("bitstream format is not specified");
444                 return Err(VideoError::InvalidParameter);
445             }
446         };
447 
448         let session = decoder.new_session(format)?;
449 
450         wait_ctx
451             .add(session.event_pipe(), Token::Event { id: stream_id })
452             .map_err(|e| {
453                 error!(
454                     "failed to add FD to poll context for session {}: {}",
455                     stream_id, e
456                 );
457                 VideoError::InvalidOperation
458             })?;
459 
460         Ok(session)
461     }
462 
create_resource( &mut self, wait_ctx: &WaitContext<Token>, stream_id: StreamId, queue_type: QueueType, resource_id: ResourceId, plane_offsets: Vec<u32>, uuid: u128, ) -> VideoResult<VideoCmdResponseType>463     fn create_resource(
464         &mut self,
465         wait_ctx: &WaitContext<Token>,
466         stream_id: StreamId,
467         queue_type: QueueType,
468         resource_id: ResourceId,
469         plane_offsets: Vec<u32>,
470         uuid: u128,
471     ) -> VideoResult<VideoCmdResponseType> {
472         let ctx = self.contexts.get_mut(&stream_id)?;
473 
474         // Create a instance of `DecoderSession` at the first time `ResourceCreate` is
475         // called here.
476         if ctx.session.is_none() {
477             ctx.session = Some(Self::create_session(
478                 &self.decoder,
479                 wait_ctx,
480                 ctx,
481                 stream_id,
482             )?);
483         }
484 
485         ctx.register_buffer(queue_type, resource_id, &uuid);
486 
487         if queue_type == QueueType::Input {
488             ctx.in_res
489                 .res_id_to_offset
490                 .insert(resource_id, plane_offsets.get(0).copied().unwrap_or(0));
491             return Ok(VideoCmdResponseType::Sync(CmdResponse::NoData));
492         };
493 
494         // We assume ResourceCreate is not called to an output resource that is already
495         // imported to Chrome for now.
496         // TODO(keiichiw): We need to support this case for a guest client who may use
497         // arbitrary numbers of buffers. (e.g. C2V4L2Component in ARCVM)
498         // Such a client is valid as long as it uses at most 32 buffers at the same time.
499         if let Some(frame_buf_id) = ctx.out_res.res_id_to_frame_buf_id.get(&resource_id) {
500             error!(
501                 "resource {} has already been imported to Chrome as a frame buffer {}",
502                 resource_id, frame_buf_id
503             );
504             return Err(VideoError::InvalidOperation);
505         }
506 
507         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
508     }
509 
destroy_all_resources( &mut self, stream_id: StreamId, queue_type: QueueType, ) -> VideoResult<VideoCmdResponseType>510     fn destroy_all_resources(
511         &mut self,
512         stream_id: StreamId,
513         queue_type: QueueType,
514     ) -> VideoResult<VideoCmdResponseType> {
515         let ctx = self.contexts.get_mut(&stream_id)?;
516 
517         // Reset the associated context.
518         match queue_type {
519             QueueType::Input => {
520                 ctx.in_res = Default::default();
521             }
522             QueueType::Output => {
523                 ctx.out_res = Default::default();
524             }
525         }
526         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
527     }
528 
queue_input_resource( &mut self, resource_bridge: &Tube, stream_id: StreamId, resource_id: ResourceId, timestamp: u64, data_sizes: Vec<u32>, ) -> VideoResult<VideoCmdResponseType>529     fn queue_input_resource(
530         &mut self,
531         resource_bridge: &Tube,
532         stream_id: StreamId,
533         resource_id: ResourceId,
534         timestamp: u64,
535         data_sizes: Vec<u32>,
536     ) -> VideoResult<VideoCmdResponseType> {
537         let ctx = self.contexts.get_mut(&stream_id)?;
538         let session = ctx.session.as_ref().ok_or(VideoError::InvalidOperation)?;
539 
540         if data_sizes.len() != 1 {
541             error!("num_data_sizes must be 1 but {}", data_sizes.len());
542             return Err(VideoError::InvalidOperation);
543         }
544 
545         // Take an ownership of this file by `into_raw_descriptor()` as this file will be closed
546         // by the `DecoderBackend`.
547         let fd = ctx
548             .get_resource_info(QueueType::Input, resource_bridge, resource_id)?
549             .file
550             .into_raw_descriptor();
551 
552         // Register a mapping of timestamp to resource_id
553         if let Some(old_resource_id) = ctx
554             .in_res
555             .timestamp_to_res_id
556             .insert(timestamp, resource_id)
557         {
558             error!(
559                 "Mapping from timestamp {} to resource_id ({} => {}) exists!",
560                 timestamp, old_resource_id, resource_id
561             );
562         }
563 
564         let offset = match ctx.in_res.res_id_to_offset.get(&resource_id) {
565             Some(offset) => *offset,
566             None => {
567                 error!("Failed to find offset for {}", resource_id);
568                 0
569             }
570         };
571 
572         // While the virtio-video driver handles timestamps as nanoseconds,
573         // Chrome assumes per-second timestamps coming. So, we need a conversion from nsec
574         // to sec.
575         // Note that this value should not be an unix time stamp but a frame number that
576         // a guest passes to a driver as a 32-bit integer in our implementation.
577         // So, overflow must not happen in this conversion.
578         let ts_sec: i32 = (timestamp / 1_000_000_000) as i32;
579         session.decode(
580             ts_sec,
581             fd,
582             offset,
583             data_sizes[0], // bytes_used
584         )?;
585 
586         Ok(VideoCmdResponseType::Async(AsyncCmdTag::Queue {
587             stream_id,
588             queue_type: QueueType::Input,
589             resource_id,
590         }))
591     }
592 
queue_output_resource( &mut self, resource_bridge: &Tube, stream_id: StreamId, resource_id: ResourceId, ) -> VideoResult<VideoCmdResponseType>593     fn queue_output_resource(
594         &mut self,
595         resource_bridge: &Tube,
596         stream_id: StreamId,
597         resource_id: ResourceId,
598     ) -> VideoResult<VideoCmdResponseType> {
599         let ctx = self.contexts.get_mut(&stream_id)?;
600         let session = ctx.session.as_ref().ok_or(VideoError::InvalidOperation)?;
601 
602         // Check if the current pixel format is set to NV12.
603         match ctx.out_params.format {
604             Some(Format::NV12) => (), // OK
605             Some(f) => {
606                 error!(
607                     "video decoder only supports NV12 as a frame format, got {}",
608                     f
609                 );
610                 return Err(VideoError::InvalidOperation);
611             }
612             None => {
613                 error!("output format is not set");
614                 return Err(VideoError::InvalidOperation);
615             }
616         };
617 
618         match ctx.out_res.queue_resource(resource_id)? {
619             QueueOutputResourceResult::UsingAsEos => {
620                 // Don't enqueue this resource to the host.
621                 Ok(())
622             }
623             QueueOutputResourceResult::Reused(buffer_id) => session.reuse_output_buffer(buffer_id),
624             QueueOutputResourceResult::Registered(buffer_id) => {
625                 let resource_info =
626                     ctx.get_resource_info(QueueType::Output, resource_bridge, resource_id)?;
627                 let planes = vec![
628                     FramePlane {
629                         offset: resource_info.planes[0].offset as i32,
630                         stride: resource_info.planes[0].stride as i32,
631                     },
632                     FramePlane {
633                         offset: resource_info.planes[1].offset as i32,
634                         stride: resource_info.planes[1].stride as i32,
635                     },
636                 ];
637 
638                 // Set output_buffer_count before passing the first output buffer.
639                 if ctx.out_res.set_output_buffer_count() {
640                     const OUTPUT_BUFFER_COUNT: usize = 32;
641 
642                     // Set the buffer count to the maximum value.
643                     // TODO(b/1518105): This is a hack due to the lack of way of telling a number of
644                     // frame buffers explictly in virtio-video v3 RFC. Once we have the way,
645                     // set_output_buffer_count should be called with a value passed by the guest.
646                     session.set_output_buffer_count(OUTPUT_BUFFER_COUNT)?;
647                 }
648 
649                 // Take ownership of this file by `into_raw_descriptor()` as this
650                 // file will be closed by libvda.
651                 let fd = resource_info.file.into_raw_descriptor();
652                 session.use_output_buffer(
653                     buffer_id as i32,
654                     Format::NV12,
655                     fd,
656                     &planes,
657                     resource_info.modifier,
658                 )
659             }
660         }?;
661         Ok(VideoCmdResponseType::Async(AsyncCmdTag::Queue {
662             stream_id,
663             queue_type: QueueType::Output,
664             resource_id,
665         }))
666     }
667 
get_params( &self, stream_id: StreamId, queue_type: QueueType, ) -> VideoResult<VideoCmdResponseType>668     fn get_params(
669         &self,
670         stream_id: StreamId,
671         queue_type: QueueType,
672     ) -> VideoResult<VideoCmdResponseType> {
673         let ctx = self.contexts.get(&stream_id)?;
674         let params = match queue_type {
675             QueueType::Input => ctx.in_params.clone(),
676             QueueType::Output => ctx.out_params.clone(),
677         };
678         Ok(VideoCmdResponseType::Sync(CmdResponse::GetParams {
679             queue_type,
680             params,
681         }))
682     }
683 
set_params( &mut self, stream_id: StreamId, queue_type: QueueType, params: Params, ) -> VideoResult<VideoCmdResponseType>684     fn set_params(
685         &mut self,
686         stream_id: StreamId,
687         queue_type: QueueType,
688         params: Params,
689     ) -> VideoResult<VideoCmdResponseType> {
690         let ctx = self.contexts.get_mut(&stream_id)?;
691         match queue_type {
692             QueueType::Input => {
693                 if ctx.session.is_some() {
694                     error!("parameter for input cannot be changed once decoding started");
695                     return Err(VideoError::InvalidParameter);
696                 }
697 
698                 // Only a few parameters can be changed by the guest.
699                 ctx.in_params.format = params.format;
700                 ctx.in_params.plane_formats = params.plane_formats;
701             }
702             QueueType::Output => {
703                 // The guest cannot update parameters for output queue in the decoder.
704             }
705         };
706         Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
707     }
708 
query_control(&self, ctrl_type: QueryCtrlType) -> VideoResult<VideoCmdResponseType>709     fn query_control(&self, ctrl_type: QueryCtrlType) -> VideoResult<VideoCmdResponseType> {
710         match self.capability.query_control(&ctrl_type) {
711             Some(resp) => Ok(VideoCmdResponseType::Sync(CmdResponse::QueryControl(resp))),
712             None => {
713                 error!("querying an unsupported control: {:?}", ctrl_type);
714                 Err(VideoError::InvalidArgument)
715             }
716         }
717     }
718 
get_control( &self, stream_id: StreamId, ctrl_type: CtrlType, ) -> VideoResult<VideoCmdResponseType>719     fn get_control(
720         &self,
721         stream_id: StreamId,
722         ctrl_type: CtrlType,
723     ) -> VideoResult<VideoCmdResponseType> {
724         let ctx = self.contexts.get(&stream_id)?;
725         match ctrl_type {
726             CtrlType::Profile => {
727                 let profile = match ctx.in_params.format {
728                     Some(Format::VP8) => Profile::VP8Profile0,
729                     Some(Format::VP9) => Profile::VP9Profile0,
730                     Some(Format::H264) => Profile::H264Baseline,
731                     Some(f) => {
732                         error!("specified format is invalid: {}", f);
733                         return Err(VideoError::InvalidArgument);
734                     }
735                     None => {
736                         error!("bitstream format is not set");
737                         return Err(VideoError::InvalidArgument);
738                     }
739                 };
740 
741                 Ok(CtrlVal::Profile(profile))
742             }
743             CtrlType::Level => {
744                 let level = match ctx.in_params.format {
745                     Some(Format::H264) => Level::H264_1_0,
746                     Some(f) => {
747                         error!("specified format has no level: {}", f);
748                         return Err(VideoError::InvalidArgument);
749                     }
750                     None => {
751                         error!("bitstream format is not set");
752                         return Err(VideoError::InvalidArgument);
753                     }
754                 };
755 
756                 Ok(CtrlVal::Level(level))
757             }
758             t => {
759                 error!("cannot get a control value: {:?}", t);
760                 Err(VideoError::InvalidArgument)
761             }
762         }
763         .map(|ctrl_val| VideoCmdResponseType::Sync(CmdResponse::GetControl(ctrl_val)))
764     }
765 
drain_stream(&mut self, stream_id: StreamId) -> VideoResult<VideoCmdResponseType>766     fn drain_stream(&mut self, stream_id: StreamId) -> VideoResult<VideoCmdResponseType> {
767         self.contexts
768             .get(&stream_id)?
769             .session
770             .as_ref()
771             .ok_or(VideoError::InvalidOperation)?
772             .flush()?;
773         Ok(VideoCmdResponseType::Async(AsyncCmdTag::Drain {
774             stream_id,
775         }))
776     }
777 
clear_queue( &mut self, stream_id: StreamId, queue_type: QueueType, ) -> VideoResult<VideoCmdResponseType>778     fn clear_queue(
779         &mut self,
780         stream_id: StreamId,
781         queue_type: QueueType,
782     ) -> VideoResult<VideoCmdResponseType> {
783         let ctx = self.contexts.get_mut(&stream_id)?;
784         let session = ctx.session.as_ref().ok_or(VideoError::InvalidOperation)?;
785 
786         // TODO(b/153406792): Though QUEUE_CLEAR is defined as a per-queue command in the
787         // specification, the VDA's `Reset()` clears the input buffers and may (or may not) drop
788         // output buffers. So, we call it only for input and resets only the crosvm's internal
789         // context for output.
790         // This code can be a problem when a guest application wants to reset only one queue by
791         // REQBUFS(0). To handle this problem correctly, we need to make libvda expose
792         // DismissPictureBuffer() method.
793         match queue_type {
794             QueueType::Input => {
795                 session.reset()?;
796                 ctx.is_resetting = true;
797                 ctx.pending_ready_pictures.clear();
798                 Ok(VideoCmdResponseType::Async(AsyncCmdTag::Clear {
799                     stream_id,
800                     queue_type: QueueType::Input,
801                 }))
802             }
803             QueueType::Output => {
804                 ctx.out_res.queued_res_ids.clear();
805                 Ok(VideoCmdResponseType::Sync(CmdResponse::NoData))
806             }
807         }
808     }
809 }
810 
811 impl<D: DecoderBackend> Device for Decoder<D> {
process_cmd( &mut self, cmd: VideoCmd, wait_ctx: &WaitContext<Token>, resource_bridge: &Tube, ) -> ( VideoCmdResponseType, Option<(u32, Vec<VideoEvtResponseType>)>, )812     fn process_cmd(
813         &mut self,
814         cmd: VideoCmd,
815         wait_ctx: &WaitContext<Token>,
816         resource_bridge: &Tube,
817     ) -> (
818         VideoCmdResponseType,
819         Option<(u32, Vec<VideoEvtResponseType>)>,
820     ) {
821         use VideoCmd::*;
822         use VideoCmdResponseType::Sync;
823 
824         let mut event_ret = None;
825         let cmd_response = match cmd {
826             QueryCapability { queue_type } => Ok(Sync(self.query_capabilities(queue_type))),
827             StreamCreate {
828                 stream_id,
829                 coded_format,
830             } => self.create_stream(stream_id, coded_format),
831             StreamDestroy { stream_id } => {
832                 self.destroy_stream(stream_id);
833                 Ok(Sync(CmdResponse::NoData))
834             }
835             ResourceCreate {
836                 stream_id,
837                 queue_type,
838                 resource_id,
839                 plane_offsets,
840                 uuid,
841             } => self.create_resource(
842                 wait_ctx,
843                 stream_id,
844                 queue_type,
845                 resource_id,
846                 plane_offsets,
847                 uuid,
848             ),
849             ResourceDestroyAll {
850                 stream_id,
851                 queue_type,
852             } => self.destroy_all_resources(stream_id, queue_type),
853             ResourceQueue {
854                 stream_id,
855                 queue_type: QueueType::Input,
856                 resource_id,
857                 timestamp,
858                 data_sizes,
859             } => self.queue_input_resource(
860                 resource_bridge,
861                 stream_id,
862                 resource_id,
863                 timestamp,
864                 data_sizes,
865             ),
866             ResourceQueue {
867                 stream_id,
868                 queue_type: QueueType::Output,
869                 resource_id,
870                 ..
871             } => {
872                 let resp = self.queue_output_resource(resource_bridge, stream_id, resource_id);
873                 if resp.is_ok() {
874                     if let Ok(ctx) = self.contexts.get_mut(&stream_id) {
875                         event_ret = Some((stream_id, ctx.output_pending_pictures()));
876                     }
877                 }
878                 resp
879             }
880             GetParams {
881                 stream_id,
882                 queue_type,
883             } => self.get_params(stream_id, queue_type),
884             SetParams {
885                 stream_id,
886                 queue_type,
887                 params,
888             } => self.set_params(stream_id, queue_type, params),
889             QueryControl { query_ctrl_type } => self.query_control(query_ctrl_type),
890             GetControl {
891                 stream_id,
892                 ctrl_type,
893             } => self.get_control(stream_id, ctrl_type),
894             SetControl { .. } => {
895                 error!("SET_CONTROL is not allowed for decoder");
896                 Err(VideoError::InvalidOperation)
897             }
898             StreamDrain { stream_id } => self.drain_stream(stream_id),
899             QueueClear {
900                 stream_id,
901                 queue_type,
902             } => self.clear_queue(stream_id, queue_type),
903         };
904 
905         let cmd_ret = match cmd_response {
906             Ok(r) => r,
907             Err(e) => {
908                 error!("returning error response: {}", &e);
909                 Sync(e.into())
910             }
911         };
912         (cmd_ret, event_ret)
913     }
914 
process_event( &mut self, desc_map: &mut AsyncCmdDescMap, stream_id: u32, ) -> Option<Vec<VideoEvtResponseType>>915     fn process_event(
916         &mut self,
917         desc_map: &mut AsyncCmdDescMap,
918         stream_id: u32,
919     ) -> Option<Vec<VideoEvtResponseType>> {
920         // TODO(b/161774071): Switch the return value from Option to VideoResult or another
921         // result that would allow us to return an error to the caller.
922 
923         use crate::virtio::video::device::VideoEvtResponseType::*;
924 
925         let ctx = match self.contexts.get_mut(&stream_id) {
926             Ok(ctx) => ctx,
927             Err(e) => {
928                 error!("failed to get a context for session {}: {}", stream_id, e);
929                 return None;
930             }
931         };
932 
933         let session = match ctx.session.as_mut() {
934             Some(s) => s,
935             None => {
936                 error!("session not yet created for context {}", stream_id);
937                 return None;
938             }
939         };
940 
941         let event = match session.read_event() {
942             Ok(event) => event,
943             Err(e) => {
944                 error!("failed to read an event from session {}: {}", stream_id, e);
945                 return None;
946             }
947         };
948 
949         let event_responses = match event {
950             DecoderEvent::ProvidePictureBuffers {
951                 min_num_buffers,
952                 width,
953                 height,
954                 visible_rect,
955             } => {
956                 ctx.handle_provide_picture_buffers(min_num_buffers, width, height, visible_rect);
957                 vec![Event(VideoEvt {
958                     typ: EvtType::DecResChanged,
959                     stream_id,
960                 })]
961             }
962             DecoderEvent::PictureReady {
963                 picture_buffer_id, // FrameBufferId
964                 bitstream_id,      // timestamp in second
965                 visible_rect,
966             } => {
967                 if ctx.is_resetting {
968                     vec![]
969                 } else {
970                     ctx.pending_ready_pictures.push_back(PictureReadyEvent {
971                         picture_buffer_id,
972                         bitstream_id,
973                         visible_rect,
974                     });
975                     ctx.output_pending_pictures()
976                 }
977             }
978             DecoderEvent::NotifyEndOfBitstreamBuffer(bitstream_id) => {
979                 let resource_id = ctx.handle_notify_end_of_bitstream_buffer(bitstream_id)?;
980                 let async_response = AsyncCmdResponse::from_response(
981                     AsyncCmdTag::Queue {
982                         stream_id,
983                         queue_type: QueueType::Input,
984                         resource_id,
985                     },
986                     CmdResponse::ResourceQueue {
987                         timestamp: 0, // ignored for bitstream buffers.
988                         flags: 0,     // no flag is raised, as it's returned successfully.
989                         size: 0,      // this field is only for encoder
990                     },
991                 );
992                 vec![AsyncCmd(async_response)]
993             }
994             DecoderEvent::FlushCompleted(flush_result) => {
995                 match flush_result {
996                     Ok(()) => {
997                         let eos_resource_id = match ctx.out_res.dequeue_eos_resource_id() {
998                             Some(r) => r,
999                             None => {
1000                                 // TODO(b/168750131): Instead of trigger error, we should wait for
1001                                 // the next output buffer enqueued, then dequeue the buffer with
1002                                 // EOS flag.
1003                                 error!(
1004                                     "No EOS resource available on successful flush response (stream id {})",
1005                                     stream_id);
1006                                 return Some(vec![Event(VideoEvt {
1007                                     typ: EvtType::Error,
1008                                     stream_id,
1009                                 })]);
1010                             }
1011                         };
1012 
1013                         let eos_tag = AsyncCmdTag::Queue {
1014                             stream_id,
1015                             queue_type: QueueType::Output,
1016                             resource_id: eos_resource_id,
1017                         };
1018 
1019                         let eos_response = CmdResponse::ResourceQueue {
1020                             timestamp: 0,
1021                             flags: protocol::VIRTIO_VIDEO_BUFFER_FLAG_EOS,
1022                             size: 0,
1023                         };
1024                         vec![
1025                             AsyncCmd(AsyncCmdResponse::from_response(eos_tag, eos_response)),
1026                             AsyncCmd(AsyncCmdResponse::from_response(
1027                                 AsyncCmdTag::Drain { stream_id },
1028                                 CmdResponse::NoData,
1029                             )),
1030                         ]
1031                     }
1032                     Err(error) => {
1033                         // TODO(b/151810591): If `resp` is `libvda::decode::Response::Canceled`,
1034                         // we should notify it to the driver in some way.
1035                         error!(
1036                             "failed to 'Flush' in VDA (stream id {}): {:?}",
1037                             stream_id, error
1038                         );
1039                         vec![AsyncCmd(AsyncCmdResponse::from_error(
1040                             AsyncCmdTag::Drain { stream_id },
1041                             error,
1042                         ))]
1043                     }
1044                 }
1045             }
1046             DecoderEvent::ResetCompleted(reset_result) => {
1047                 ctx.is_resetting = false;
1048                 let tag = AsyncCmdTag::Clear {
1049                     stream_id,
1050                     queue_type: QueueType::Input,
1051                 };
1052                 match reset_result {
1053                     Ok(()) => {
1054                         let mut responses: Vec<_> = desc_map
1055                             .create_cancellation_responses(
1056                                 &stream_id,
1057                                 Some(QueueType::Input),
1058                                 Some(tag),
1059                             )
1060                             .into_iter()
1061                             .map(AsyncCmd)
1062                             .collect();
1063                         responses.push(AsyncCmd(AsyncCmdResponse::from_response(
1064                             tag,
1065                             CmdResponse::NoData,
1066                         )));
1067                         responses
1068                     }
1069                     Err(error) => {
1070                         error!(
1071                             "failed to 'Reset' in VDA (stream id {}): {:?}",
1072                             stream_id, error
1073                         );
1074                         vec![AsyncCmd(AsyncCmdResponse::from_error(tag, error))]
1075                     }
1076                 }
1077             }
1078             DecoderEvent::NotifyError(error) => {
1079                 error!("an error is notified by VDA: {}", error);
1080                 vec![Event(VideoEvt {
1081                     typ: EvtType::Error,
1082                     stream_id,
1083                 })]
1084             }
1085         };
1086 
1087         Some(event_responses)
1088     }
1089 }
1090 
1091 /// Create a new decoder instance using a Libvda decoder instance to perform
1092 /// the decoding.
1093 impl<'a> Decoder<&'a libvda::decode::VdaInstance> {
new(vda: &'a libvda::decode::VdaInstance) -> Self1094     pub fn new(vda: &'a libvda::decode::VdaInstance) -> Self {
1095         Decoder {
1096             decoder: vda,
1097             capability: Capability::new(vda.get_capabilities()),
1098             contexts: ContextMap::new(),
1099         }
1100     }
1101 }
1102