1 // Copyright 2023 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 use std::rc::Rc;
6
7 use anyhow::anyhow;
8 use anyhow::Context;
9 use libva::Display;
10
11 use crate::backend::vaapi::decoder::va_surface_id;
12 use crate::backend::vaapi::decoder::DecodedHandle as VADecodedHandle;
13 use crate::backend::vaapi::decoder::VaStreamInfo;
14 use crate::backend::vaapi::decoder::VaapiBackend;
15 use crate::backend::vaapi::decoder::VaapiPicture;
16 use crate::codec::av1::parser::BitDepth;
17 use crate::codec::av1::parser::FrameHeaderObu;
18 use crate::codec::av1::parser::Profile;
19 use crate::codec::av1::parser::StreamInfo;
20 use crate::codec::av1::parser::TileGroupObu;
21 use crate::codec::av1::parser::WarpModelType;
22 use crate::codec::av1::parser::MAX_SEGMENTS;
23 use crate::codec::av1::parser::MAX_TILE_COLS;
24 use crate::codec::av1::parser::MAX_TILE_ROWS;
25 use crate::codec::av1::parser::NUM_REF_FRAMES;
26 use crate::codec::av1::parser::SEG_LVL_MAX;
27 use crate::decoder::stateless::av1::Av1;
28 use crate::decoder::stateless::av1::StatelessAV1DecoderBackend;
29 use crate::decoder::stateless::NewPictureError;
30 use crate::decoder::stateless::NewPictureResult;
31 use crate::decoder::stateless::NewStatelessDecoderError;
32 use crate::decoder::stateless::StatelessBackendResult;
33 use crate::decoder::stateless::StatelessDecoder;
34 use crate::decoder::stateless::StatelessDecoderBackendPicture;
35 use crate::decoder::BlockingMode;
36 use crate::video_frame::VideoFrame;
37 use crate::Rect;
38 use crate::Resolution;
39
40 /// The number of surfaces to allocate for this codec.
41 const NUM_SURFACES: usize = 16;
42
43 impl VaStreamInfo for &StreamInfo {
va_profile(&self) -> anyhow::Result<i32>44 fn va_profile(&self) -> anyhow::Result<i32> {
45 match self.seq_header.seq_profile {
46 Profile::Profile0 => Ok(libva::VAProfile::VAProfileAV1Profile0),
47 Profile::Profile1 => Ok(libva::VAProfile::VAProfileAV1Profile1),
48 Profile::Profile2 => {
49 Err(anyhow!("Profile {:?} is not supported by VAAPI", self.seq_header.seq_profile))
50 }
51 }
52 }
53
rt_format(&self) -> anyhow::Result<u32>54 fn rt_format(&self) -> anyhow::Result<u32> {
55 // See table 6.4.1.
56 match self.seq_header.seq_profile {
57 Profile::Profile0 => {
58 if self.seq_header.bit_depth == BitDepth::Depth8 {
59 Ok(libva::VA_RT_FORMAT_YUV420)
60 } else if self.seq_header.bit_depth == BitDepth::Depth10 {
61 Ok(libva::VA_RT_FORMAT_YUV420_10)
62 } else {
63 Err(anyhow!(
64 "Unsupported bit depth {:?} for profile {:?}",
65 self.seq_header.bit_depth,
66 self.seq_header.seq_profile
67 ))
68 }
69 }
70 Profile::Profile1 => {
71 if self.seq_header.bit_depth == BitDepth::Depth8 {
72 Ok(libva::VA_RT_FORMAT_YUV444)
73 } else if self.seq_header.bit_depth == BitDepth::Depth10 {
74 Ok(libva::VA_RT_FORMAT_YUV444_10)
75 } else {
76 Err(anyhow!(
77 "Unsupported bit depth {:?} for profile {:?}",
78 self.seq_header.bit_depth,
79 self.seq_header.seq_profile
80 ))
81 }
82 }
83 Profile::Profile2 => {
84 Err(anyhow!("Profile {:?} is not supported by VAAPI", self.seq_header.seq_profile))
85 }
86 }
87 }
88
min_num_surfaces(&self) -> usize89 fn min_num_surfaces(&self) -> usize {
90 NUM_SURFACES
91 }
92
coded_size(&self) -> Resolution93 fn coded_size(&self) -> Resolution {
94 Resolution::from((
95 self.seq_header.max_frame_width_minus_1 as u32 + 1,
96 self.seq_header.max_frame_height_minus_1 as u32 + 1,
97 ))
98 }
99
visible_rect(&self) -> Rect100 fn visible_rect(&self) -> Rect {
101 Rect::from(((0, 0), (self.render_width, self.render_height)))
102 }
103 }
104
105 impl From<&FrameHeaderObu> for libva::AV1FilmGrain {
from(hdr: &FrameHeaderObu) -> Self106 fn from(hdr: &FrameHeaderObu) -> Self {
107 let fg = &hdr.film_grain_params;
108
109 if fg.apply_grain {
110 log::warn!("Film grain is not officially supported yet.")
111 }
112
113 let film_grain_fields = libva::AV1FilmGrainFields::new(
114 u32::from(fg.apply_grain),
115 u32::from(fg.chroma_scaling_from_luma),
116 u32::from(fg.grain_scaling_minus_8),
117 fg.ar_coeff_lag,
118 fg.ar_coeff_shift_minus_6 as u32,
119 fg.grain_scale_shift as u32,
120 u32::from(fg.overlap_flag),
121 u32::from(fg.clip_to_restricted_range),
122 );
123
124 const NUM_POINT_Y: usize = 14;
125 let fg_point_y_value = {
126 let mut fg_point_y_value = [0u8; NUM_POINT_Y];
127 fg_point_y_value.copy_from_slice(&fg.point_y_value[0..NUM_POINT_Y]);
128 fg_point_y_value
129 };
130 let fg_point_y_scaling = {
131 let mut fg_point_y_scaling = [0u8; NUM_POINT_Y];
132 fg_point_y_scaling.copy_from_slice(&fg.point_y_scaling[0..NUM_POINT_Y]);
133 fg_point_y_scaling
134 };
135
136 const NUM_POINT_CB: usize = 10;
137 let fg_point_cb_value = {
138 let mut fg_point_cb_value = [0u8; NUM_POINT_CB];
139 fg_point_cb_value.copy_from_slice(&fg.point_cb_value[0..NUM_POINT_CB]);
140 fg_point_cb_value
141 };
142 let fg_point_cb_scaling = {
143 let mut fg_point_cb_scaling = [0u8; NUM_POINT_CB];
144 fg_point_cb_scaling.copy_from_slice(&fg.point_cb_scaling[0..NUM_POINT_CB]);
145 fg_point_cb_scaling
146 };
147
148 const NUM_POINT_CR: usize = 10;
149 let fg_point_cr_value = {
150 let mut fg_point_cr_value = [0u8; NUM_POINT_CR];
151 fg_point_cr_value.copy_from_slice(&fg.point_cr_value[0..NUM_POINT_CR]);
152 fg_point_cr_value
153 };
154 let fg_point_cr_scaling = {
155 let mut fg_point_cr_scaling = [0u8; NUM_POINT_CR];
156 fg_point_cr_scaling.copy_from_slice(&fg.point_cr_scaling[0..NUM_POINT_CR]);
157 fg_point_cr_scaling
158 };
159
160 let fg_ar_coeffs_y = {
161 let mut fg_ar_coeffs_y = [0i8; 24];
162 fg_ar_coeffs_y
163 .iter_mut()
164 .zip(fg.ar_coeffs_y_plus_128.iter().copied())
165 .for_each(|(dest, src)| *dest = ((src as i16) - 128) as i8);
166 fg_ar_coeffs_y
167 };
168 let fg_ar_coeffs_cb = {
169 let mut fg_ar_coeffs_cb = [0i8; 25];
170 fg_ar_coeffs_cb
171 .iter_mut()
172 .zip(fg.ar_coeffs_cb_plus_128.iter().copied())
173 .for_each(|(dest, src)| *dest = ((src as i16) - 128) as i8);
174 fg_ar_coeffs_cb
175 };
176 let fg_ar_coeffs_cr = {
177 let mut fg_ar_coeffs_cr = [0i8; 25];
178 fg_ar_coeffs_cr
179 .iter_mut()
180 .zip(fg.ar_coeffs_cr_plus_128.iter().copied())
181 .for_each(|(dest, src)| *dest = ((src as i16) - 128) as i8);
182 fg_ar_coeffs_cr
183 };
184
185 libva::AV1FilmGrain::new(
186 &film_grain_fields,
187 fg.grain_seed,
188 fg.num_y_points,
189 fg_point_y_value,
190 fg_point_y_scaling,
191 fg.num_cb_points,
192 fg_point_cb_value,
193 fg_point_cb_scaling,
194 fg.num_cr_points,
195 fg_point_cr_value,
196 fg_point_cr_scaling,
197 fg_ar_coeffs_y,
198 fg_ar_coeffs_cb,
199 fg_ar_coeffs_cr,
200 fg.cb_mult,
201 fg.cb_luma_mult,
202 fg.cb_offset,
203 fg.cr_mult,
204 fg.cr_luma_mult,
205 fg.cr_offset,
206 )
207 }
208 }
209
build_wm_info(hdr: &FrameHeaderObu) -> [libva::AV1WarpedMotionParams; 7]210 fn build_wm_info(hdr: &FrameHeaderObu) -> [libva::AV1WarpedMotionParams; 7] {
211 let mut wm = vec![];
212 let gm = &hdr.global_motion_params;
213 for i in 1..=7 {
214 let wm_type = match gm.gm_type[i] {
215 /* TODO: these were not exported in cros-libva */
216 WarpModelType::Identity => 0,
217 WarpModelType::Translation => 1,
218 WarpModelType::RotZoom => 2,
219 WarpModelType::Affine => 3,
220 };
221
222 let params = {
223 let mut params = [0; 8];
224 params[0..6].copy_from_slice(&gm.gm_params[i][0..6]);
225 params
226 };
227
228 wm.push(libva::AV1WarpedMotionParams::new(wm_type, params, u8::from(!gm.warp_valid[i])));
229 }
230
231 match wm.try_into() {
232 Ok(wm) => wm,
233 Err(_) => unreachable!("The Vec should have the right size"),
234 }
235 }
236
build_pic_param<V: VideoFrame>( hdr: &FrameHeaderObu, stream_info: &StreamInfo, current_frame: libva::VASurfaceID, reference_frames: &[Option<VADecodedHandle<V>>; NUM_REF_FRAMES], ) -> anyhow::Result<libva::BufferType>237 fn build_pic_param<V: VideoFrame>(
238 hdr: &FrameHeaderObu,
239 stream_info: &StreamInfo,
240 current_frame: libva::VASurfaceID,
241 reference_frames: &[Option<VADecodedHandle<V>>; NUM_REF_FRAMES],
242 ) -> anyhow::Result<libva::BufferType> {
243 let seq = stream_info.seq_header.clone();
244
245 let seq_info_fields = libva::AV1SeqFields::new(
246 u32::from(seq.still_picture),
247 u32::from(seq.use_128x128_superblock),
248 u32::from(seq.enable_filter_intra),
249 u32::from(seq.enable_intra_edge_filter),
250 u32::from(seq.enable_interintra_compound),
251 u32::from(seq.enable_masked_compound),
252 u32::from(seq.enable_dual_filter),
253 u32::from(seq.enable_order_hint),
254 u32::from(seq.enable_jnt_comp),
255 u32::from(seq.enable_cdef),
256 u32::from(seq.color_config.mono_chrome),
257 u32::from(seq.color_config.color_range),
258 u32::from(seq.color_config.subsampling_x),
259 u32::from(seq.color_config.subsampling_y),
260 seq.color_config.chroma_sample_position as u32,
261 u32::from(seq.film_grain_params_present),
262 );
263
264 let seg = &hdr.segmentation_params;
265 let seg_info_fields = libva::AV1SegmentInfoFields::new(
266 u32::from(seg.segmentation_enabled),
267 u32::from(seg.segmentation_update_map),
268 u32::from(seg.segmentation_temporal_update),
269 u32::from(seg.segmentation_update_data),
270 );
271
272 let seg_feature_mask = {
273 let mut seg_feature_mask = [0u8; MAX_SEGMENTS];
274 #[allow(clippy::needless_range_loop)]
275 for i in 0..MAX_SEGMENTS {
276 let mut mask = 0;
277 for j in 0..SEG_LVL_MAX {
278 if seg.feature_enabled[i][j] {
279 mask |= 1 << j;
280 }
281 }
282 seg_feature_mask[i] = mask;
283 }
284 seg_feature_mask
285 };
286
287 let seg_info =
288 libva::AV1Segmentation::new(&seg_info_fields, seg.feature_data, seg_feature_mask);
289
290 let pic_info_fields = libva::AV1PicInfoFields::new(
291 hdr.frame_type as u32,
292 u32::from(hdr.show_frame),
293 u32::from(hdr.showable_frame),
294 u32::from(hdr.error_resilient_mode),
295 u32::from(hdr.disable_cdf_update),
296 hdr.allow_screen_content_tools,
297 hdr.force_integer_mv,
298 u32::from(hdr.allow_intrabc),
299 u32::from(hdr.use_superres),
300 u32::from(hdr.allow_high_precision_mv),
301 u32::from(hdr.is_motion_mode_switchable),
302 u32::from(hdr.use_ref_frame_mvs),
303 u32::from(hdr.disable_frame_end_update_cdf),
304 u32::from(hdr.tile_info.uniform_tile_spacing_flag),
305 u32::from(hdr.allow_warped_motion),
306 0, /* large_scale_tile */
307 );
308
309 let bit_depth_idx = match seq.bit_depth {
310 BitDepth::Depth8 => 0,
311 BitDepth::Depth10 => 1,
312 BitDepth::Depth12 => 2,
313 };
314
315 let ref_frame_map: [libva::VASurfaceID; NUM_REF_FRAMES] =
316 reference_frames.iter().map(va_surface_id).collect::<Vec<_>>().try_into().unwrap();
317
318 let width_in_sbs_minus_1 = {
319 let mut width_in_sbs_minus_1 = [0; MAX_TILE_COLS - 1];
320 #[allow(clippy::needless_range_loop)]
321 for i in 0..width_in_sbs_minus_1.len() {
322 width_in_sbs_minus_1[i] = u16::try_from(hdr.tile_info.width_in_sbs_minus_1[i])
323 .context("Invalid width_in_sbs_minus_1")?;
324 }
325 width_in_sbs_minus_1
326 };
327
328 let height_in_sbs_minus_1 = {
329 let mut height_in_sbs_minus_1 = [0; MAX_TILE_ROWS - 1];
330 #[allow(clippy::needless_range_loop)]
331 for i in 0..height_in_sbs_minus_1.len() {
332 height_in_sbs_minus_1[i] = u16::try_from(hdr.tile_info.height_in_sbs_minus_1[i])
333 .context("Invalid height_in_sbs_minus_1")?;
334 }
335 height_in_sbs_minus_1
336 };
337
338 let lf = &hdr.loop_filter_params;
339 let filter_level = [lf.loop_filter_level[0], lf.loop_filter_level[1]];
340
341 let lf_fields = libva::AV1LoopFilterFields::new(
342 lf.loop_filter_sharpness,
343 u8::from(lf.loop_filter_delta_enabled),
344 u8::from(lf.loop_filter_delta_update),
345 );
346
347 let quant = &hdr.quantization_params;
348 let qmatrix_fields = libva::AV1QMatrixFields::new(
349 u16::from(quant.using_qmatrix),
350 u16::try_from(quant.qm_y).context("Invalid qm_y")?,
351 u16::try_from(quant.qm_u).context("Invalid qm_u")?,
352 u16::try_from(quant.qm_v).context("Invalid qm_v")?,
353 );
354
355 let mode_control_fields = libva::AV1ModeControlFields::new(
356 u32::from(quant.delta_q_present),
357 quant.delta_q_res,
358 u32::from(lf.delta_lf_present),
359 lf.delta_lf_res as u32,
360 lf.delta_lf_multi as u32,
361 hdr.tx_mode as u32,
362 u32::from(hdr.reference_select),
363 u32::from(hdr.reduced_tx_set),
364 u32::from(hdr.skip_mode_present),
365 );
366
367 let cdef = &hdr.cdef_params;
368 let (cdef_y_strengths, cdef_uv_strengths) = {
369 let num_cdef_strenghts = 1 << cdef.cdef_bits;
370 let mut cdef_y_strengths = [0; 8];
371 let mut cdef_uv_strengths = [0; 8];
372
373 #[allow(clippy::needless_range_loop)]
374 for i in 0..num_cdef_strenghts {
375 let mut sec_strength = cdef.cdef_y_sec_strength[i];
376 if sec_strength == 4 {
377 sec_strength -= 1;
378 }
379 cdef_y_strengths[i] =
380 u8::try_from(((cdef.cdef_y_pri_strength[i] & 0xf) << 2) | (sec_strength & 0x3))
381 .context("Failed to merge primary and secondary strengths")?;
382 }
383
384 #[allow(clippy::needless_range_loop)]
385 for i in 0..num_cdef_strenghts {
386 let mut sec_strength = cdef.cdef_uv_sec_strength[i];
387 if sec_strength == 4 {
388 sec_strength -= 1;
389 }
390 cdef_uv_strengths[i] =
391 u8::try_from(((cdef.cdef_uv_pri_strength[i] & 0xf) << 2) | (sec_strength & 0x3))
392 .context("Failed to merge primary and secondary strengths")?;
393 }
394
395 (cdef_y_strengths, cdef_uv_strengths)
396 };
397
398 let lr = &hdr.loop_restoration_params;
399 let loop_restoration_fields = libva::AV1LoopRestorationFields::new(
400 lr.frame_restoration_type[0] as u16,
401 lr.frame_restoration_type[1] as u16,
402 lr.frame_restoration_type[2] as u16,
403 u16::from(lr.lr_unit_shift),
404 u16::from(lr.lr_uv_shift),
405 );
406
407 let wm = build_wm_info(hdr);
408
409 let pic_param = libva::PictureParameterBufferAV1::new(
410 u8::try_from(seq.seq_profile as u32).context("Invalid profile")?,
411 u8::try_from(seq.order_hint_bits_minus_1).context("Invalid order hint bits")?,
412 bit_depth_idx,
413 u8::try_from(seq.color_config.matrix_coefficients as u32)
414 .context("Invalid matrix_coefficients")?,
415 &seq_info_fields,
416 current_frame,
417 libva::VA_INVALID_SURFACE, /* film grain is unsupported for now */
418 vec![], /* anchor_frames_list */
419 u16::try_from(hdr.upscaled_width - 1).context("Invalid frame width")?,
420 u16::try_from(hdr.frame_height - 1).context("Invalid frame height")?,
421 0, /* output_frame_width_in_tiles_minus_1 */
422 0, /* output_frame_height_in_tiles_minus_1 */
423 ref_frame_map,
424 hdr.ref_frame_idx,
425 u8::try_from(hdr.primary_ref_frame).context("Invalid primary_ref_frame")?,
426 u8::try_from(hdr.order_hint).context("Invalid order_hint")?,
427 &seg_info,
428 &libva::AV1FilmGrain::from(hdr),
429 u8::try_from(hdr.tile_info.tile_cols).context("Invalid tile_cols")?,
430 u8::try_from(hdr.tile_info.tile_rows).context("Invalid tile_rows")?,
431 width_in_sbs_minus_1,
432 height_in_sbs_minus_1,
433 0, /* large-scale tile not supported */
434 u16::try_from(hdr.tile_info.context_update_tile_id)
435 .context("Invalid context_update_tile_id")?,
436 &pic_info_fields,
437 u8::try_from(hdr.superres_denom).context("Invalid superres_denom")?,
438 u8::try_from(hdr.interpolation_filter as u32).context("Invalid interpolation_filter")?,
439 filter_level,
440 lf.loop_filter_level[2],
441 lf.loop_filter_level[3],
442 &lf_fields,
443 lf.loop_filter_ref_deltas,
444 lf.loop_filter_mode_deltas,
445 u8::try_from(quant.base_q_idx).context("Invalid base_q_idx")?,
446 i8::try_from(quant.delta_q_y_dc).context("Invalid delta_q_y_dc")?,
447 i8::try_from(quant.delta_q_u_dc).context("Invalid delta_q_u_dc")?,
448 i8::try_from(quant.delta_q_u_ac).context("Invalid delta_q_u_ac")?,
449 i8::try_from(quant.delta_q_v_dc).context("Invalid delta_q_v_dc")?,
450 i8::try_from(quant.delta_q_v_ac).context("Invalid delta_q_v_ac")?,
451 &qmatrix_fields,
452 &mode_control_fields,
453 u8::try_from(hdr.cdef_params.cdef_damping - 3).context("Invalid cdef_damping")?,
454 u8::try_from(hdr.cdef_params.cdef_bits).context("Invalid cdef_bits")?,
455 cdef_y_strengths,
456 cdef_uv_strengths,
457 &loop_restoration_fields,
458 &wm,
459 );
460
461 Ok(libva::BufferType::PictureParameter(libva::PictureParameter::AV1(pic_param)))
462 }
463
build_slice_params_for_tg(tg: &TileGroupObu) -> anyhow::Result<libva::BufferType>464 fn build_slice_params_for_tg(tg: &TileGroupObu) -> anyhow::Result<libva::BufferType> {
465 let mut slice_params = libva::SliceParameterBufferAV1::new();
466
467 for tile in &tg.tiles {
468 /* all tiles must be submitted in the same slice parameter array */
469 slice_params.add_slice_parameter(
470 tile.tile_size,
471 tile.tile_offset,
472 0,
473 u16::try_from(tile.tile_row).context("Invalid tile_row")?,
474 u16::try_from(tile.tile_col).context("Invalid tile_col")?,
475 u16::try_from(tg.tg_start).context("Invalid tg_start")?,
476 u16::try_from(tg.tg_end).context("Invalid tg_end")?,
477 0,
478 0,
479 );
480 }
481
482 Ok(libva::BufferType::SliceParameter(libva::SliceParameter::AV1(slice_params)))
483 }
484
build_slice_data_for_tg(tg: TileGroupObu) -> libva::BufferType485 fn build_slice_data_for_tg(tg: TileGroupObu) -> libva::BufferType {
486 let TileGroupObu { obu, .. } = tg;
487 libva::BufferType::SliceData(Vec::from(obu.as_ref()))
488 }
489
490 impl<V: VideoFrame> StatelessDecoderBackendPicture<Av1> for VaapiBackend<V> {
491 type Picture = VaapiPicture<V>;
492 }
493
494 impl<V: VideoFrame> StatelessAV1DecoderBackend for VaapiBackend<V> {
change_stream_info(&mut self, stream_info: &StreamInfo) -> StatelessBackendResult<()>495 fn change_stream_info(&mut self, stream_info: &StreamInfo) -> StatelessBackendResult<()> {
496 self.new_sequence(stream_info)
497 }
498
new_picture( &mut self, _hdr: &FrameHeaderObu, timestamp: u64, alloc_cb: &mut dyn FnMut() -> Option<V>, ) -> NewPictureResult<Self::Picture>499 fn new_picture(
500 &mut self,
501 _hdr: &FrameHeaderObu,
502 timestamp: u64,
503 alloc_cb: &mut dyn FnMut() -> Option<V>,
504 ) -> NewPictureResult<Self::Picture> {
505 Ok(VaapiPicture::new(
506 timestamp,
507 Rc::clone(&self.context),
508 alloc_cb().ok_or(NewPictureError::OutOfOutputBuffers)?,
509 ))
510 }
511
begin_picture( &mut self, picture: &mut Self::Picture, stream_info: &StreamInfo, hdr: &FrameHeaderObu, reference_frames: &[Option<Self::Handle>; NUM_REF_FRAMES], ) -> StatelessBackendResult<()>512 fn begin_picture(
513 &mut self,
514 picture: &mut Self::Picture,
515 stream_info: &StreamInfo,
516 hdr: &FrameHeaderObu,
517 reference_frames: &[Option<Self::Handle>; NUM_REF_FRAMES],
518 ) -> StatelessBackendResult<()> {
519 let pic_param = build_pic_param(hdr, stream_info, picture.surface().id(), reference_frames)
520 .context("Failed to build picture parameter")?;
521 let pic_param = self
522 .context
523 .create_buffer(pic_param)
524 .context("Failed to create picture parameter buffer")?;
525 picture.add_buffer(pic_param);
526
527 Ok(())
528 }
529
decode_tile_group( &mut self, picture: &mut Self::Picture, tile_group: TileGroupObu, ) -> crate::decoder::stateless::StatelessBackendResult<()>530 fn decode_tile_group(
531 &mut self,
532 picture: &mut Self::Picture,
533 tile_group: TileGroupObu,
534 ) -> crate::decoder::stateless::StatelessBackendResult<()> {
535 let slice_params = build_slice_params_for_tg(&tile_group)?;
536 let slice_data = build_slice_data_for_tg(tile_group);
537
538 let context = &self.context;
539
540 let buffer = context
541 .create_buffer(slice_params)
542 .context("Failed to create slice parameter buffer")?;
543
544 picture.add_buffer(buffer);
545
546 let buffer =
547 context.create_buffer(slice_data).context("Failed to create slice data buffer")?;
548
549 picture.add_buffer(buffer);
550
551 Ok(())
552 }
553
submit_picture(&mut self, picture: Self::Picture) -> StatelessBackendResult<Self::Handle>554 fn submit_picture(&mut self, picture: Self::Picture) -> StatelessBackendResult<Self::Handle> {
555 self.process_picture::<Av1>(picture)
556 }
557 }
558
559 impl<V: VideoFrame> StatelessDecoder<Av1, VaapiBackend<V>> {
560 // Creates a new instance of the decoder using the VAAPI backend.
new_vaapi( display: Rc<Display>, blocking_mode: BlockingMode, ) -> Result<Self, NewStatelessDecoderError>561 pub fn new_vaapi(
562 display: Rc<Display>,
563 blocking_mode: BlockingMode,
564 ) -> Result<Self, NewStatelessDecoderError> {
565 Self::new(VaapiBackend::new(display, true), blocking_mode)
566 }
567 }
568
569 #[cfg(test)]
570 mod tests {
571 use libva::Display;
572
573 use crate::bitstream_utils::IvfIterator;
574 use crate::decoder::stateless::av1::Av1;
575 use crate::decoder::stateless::tests::test_decode_stream;
576 use crate::decoder::stateless::tests::TestStream;
577 use crate::decoder::stateless::StatelessDecoder;
578 use crate::decoder::BlockingMode;
579 use crate::utils::simple_playback_loop;
580 use crate::utils::simple_playback_loop_owned_frames;
581 use crate::DecodedFormat;
582
583 /// Run `test` using the vaapi decoder, in both blocking and non-blocking modes.
test_decoder_vaapi( test: &TestStream, output_format: DecodedFormat, blocking_mode: BlockingMode, )584 fn test_decoder_vaapi(
585 test: &TestStream,
586 output_format: DecodedFormat,
587 blocking_mode: BlockingMode,
588 ) {
589 let display = Display::open().unwrap();
590 let decoder = StatelessDecoder::<Av1, _>::new_vaapi::<()>(display, blocking_mode).unwrap();
591
592 test_decode_stream(
593 |d, s, f| {
594 simple_playback_loop(
595 d,
596 IvfIterator::new(s),
597 f,
598 &mut simple_playback_loop_owned_frames,
599 output_format,
600 blocking_mode,
601 )
602 },
603 decoder,
604 test,
605 true,
606 false,
607 );
608 }
609
610 #[test]
611 // Ignore this test by default as it requires libva-compatible hardware.
612 #[ignore]
test_25fps_av1_blocking()613 fn test_25fps_av1_blocking() {
614 use crate::decoder::stateless::av1::tests::DECODE_TEST_25FPS;
615 test_decoder_vaapi(&DECODE_TEST_25FPS, DecodedFormat::NV12, BlockingMode::Blocking);
616 }
617
618 #[test]
619 // Ignore this test by default as it requires libva-compatible hardware.
620 #[ignore]
test_25fps_av1_non_blocking()621 fn test_25fps_av1_non_blocking() {
622 use crate::decoder::stateless::av1::tests::DECODE_TEST_25FPS;
623 test_decoder_vaapi(&DECODE_TEST_25FPS, DecodedFormat::NV12, BlockingMode::NonBlocking);
624 }
625 }
626