1 /**************************************************************************
2 *
3 * Copyright 2010 Thomas Balling Sørensen & Orasanu Lucian.
4 * Copyright 2014 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "pipe/p_video_codec.h"
30
31 #include "util/u_handle_table.h"
32 #include "util/u_video.h"
33 #include "util/u_memory.h"
34 #include "util/set.h"
35
36 #include "util/vl_vlc.h"
37 #include "vl/vl_winsys.h"
38
39 #include "va_private.h"
40
41 void
vlVaSetSurfaceContext(vlVaDriver * drv,vlVaSurface * surf,vlVaContext * context)42 vlVaSetSurfaceContext(vlVaDriver *drv, vlVaSurface *surf, vlVaContext *context)
43 {
44 if (surf->ctx == context)
45 return;
46
47 if (surf->ctx) {
48 assert(_mesa_set_search(surf->ctx->surfaces, surf));
49 _mesa_set_remove_key(surf->ctx->surfaces, surf);
50
51 /* Only drivers supporting PIPE_VIDEO_ENTRYPOINT_PROCESSING will create
52 * decoder for postproc context and thus be able to wait on and destroy
53 * the surface fence. On other drivers we need to destroy the fence here
54 * otherwise vaQuerySurfaceStatus/vaSyncSurface will fail and we'll also
55 * potentially leak the fence.
56 */
57 if (surf->fence && !context->decoder &&
58 context->templat.entrypoint == PIPE_VIDEO_ENTRYPOINT_PROCESSING &&
59 surf->ctx->decoder && surf->ctx->decoder->destroy_fence &&
60 !drv->pipe->screen->get_video_param(drv->pipe->screen,
61 PIPE_VIDEO_PROFILE_UNKNOWN,
62 PIPE_VIDEO_ENTRYPOINT_PROCESSING,
63 PIPE_VIDEO_CAP_SUPPORTED)) {
64 surf->ctx->decoder->destroy_fence(surf->ctx->decoder, surf->fence);
65 surf->fence = NULL;
66 }
67 }
68
69 surf->ctx = context;
70 _mesa_set_add(surf->ctx->surfaces, surf);
71 }
72
73 static void
vlVaSetBufferContext(vlVaDriver * drv,vlVaBuffer * buf,vlVaContext * context)74 vlVaSetBufferContext(vlVaDriver *drv, vlVaBuffer *buf, vlVaContext *context)
75 {
76 if (buf->ctx == context)
77 return;
78
79 if (buf->ctx) {
80 assert(_mesa_set_search(buf->ctx->buffers, buf));
81 _mesa_set_remove_key(buf->ctx->buffers, buf);
82 }
83
84 buf->ctx = context;
85 _mesa_set_add(buf->ctx->buffers, buf);
86 }
87
88 VAStatus
vlVaBeginPicture(VADriverContextP ctx,VAContextID context_id,VASurfaceID render_target)89 vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID render_target)
90 {
91 vlVaDriver *drv;
92 vlVaContext *context;
93 vlVaSurface *surf;
94
95 if (!ctx)
96 return VA_STATUS_ERROR_INVALID_CONTEXT;
97
98 drv = VL_VA_DRIVER(ctx);
99 if (!drv)
100 return VA_STATUS_ERROR_INVALID_CONTEXT;
101
102 mtx_lock(&drv->mutex);
103 context = handle_table_get(drv->htab, context_id);
104 if (!context) {
105 mtx_unlock(&drv->mutex);
106 return VA_STATUS_ERROR_INVALID_CONTEXT;
107 }
108
109 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG12) {
110 context->desc.mpeg12.intra_matrix = NULL;
111 context->desc.mpeg12.non_intra_matrix = NULL;
112 }
113
114 surf = handle_table_get(drv->htab, render_target);
115 vlVaGetSurfaceBuffer(drv, surf);
116 if (!surf || !surf->buffer) {
117 mtx_unlock(&drv->mutex);
118 return VA_STATUS_ERROR_INVALID_SURFACE;
119 }
120
121 if (surf->coded_buf) {
122 surf->coded_buf->coded_surf = NULL;
123 surf->coded_buf = NULL;
124 }
125
126 /* Encode only reads from the surface and doesn't set surface fence. */
127 if (context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
128 vlVaSetSurfaceContext(drv, surf, context);
129
130 context->target_id = render_target;
131 context->target = surf->buffer;
132
133 if (context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
134 context->needs_begin_frame = true;
135
136 if (!context->decoder) {
137 mtx_unlock(&drv->mutex);
138 return VA_STATUS_SUCCESS;
139 }
140
141 /* meta data and seis are per picture basis, it needs to be
142 * cleared before rendering the picture. */
143 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
144 switch (u_reduce_video_profile(context->templat.profile)) {
145 case PIPE_VIDEO_FORMAT_AV1:
146 context->desc.av1enc.metadata_flags.value = 0;
147 context->desc.av1enc.roi.num = 0;
148 context->desc.av1enc.intra_refresh.mode = INTRA_REFRESH_MODE_NONE;
149 break;
150 case PIPE_VIDEO_FORMAT_HEVC:
151 context->desc.h265enc.roi.num = 0;
152 context->desc.h265enc.intra_refresh.mode = INTRA_REFRESH_MODE_NONE;
153 break;
154 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
155 context->desc.h264enc.roi.num = 0;
156 context->desc.h264enc.intra_refresh.mode = INTRA_REFRESH_MODE_NONE;
157 break;
158 default:
159 break;
160 }
161 }
162
163 context->slice_data_offset = 0;
164 context->have_slice_params = false;
165
166 mtx_unlock(&drv->mutex);
167 return VA_STATUS_SUCCESS;
168 }
169
170 void
vlVaGetReferenceFrame(vlVaDriver * drv,VASurfaceID surface_id,struct pipe_video_buffer ** ref_frame)171 vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
172 struct pipe_video_buffer **ref_frame)
173 {
174 vlVaSurface *surf = handle_table_get(drv->htab, surface_id);
175 if (surf)
176 *ref_frame = vlVaGetSurfaceBuffer(drv, surf);
177 else
178 *ref_frame = NULL;
179 }
180 /*
181 * in->quality = 0; without any settings, it is using speed preset
182 * and no preencode and no vbaq. It is the fastest setting.
183 * in->quality = 1; suggested setting, with balanced preset, and
184 * preencode and vbaq
185 * in->quality = others; it is the customized setting
186 * with valid bit (bit #0) set to "1"
187 * for example:
188 *
189 * 0x3 (balance preset, no pre-encoding, no vbaq)
190 * 0x13 (balanced preset, no pre-encoding, vbaq)
191 * 0x13 (balanced preset, no pre-encoding, vbaq)
192 * 0x9 (speed preset, pre-encoding, no vbaq)
193 * 0x19 (speed preset, pre-encoding, vbaq)
194 *
195 * The quality value has to be treated as a combination
196 * of preset mode, pre-encoding and vbaq settings.
197 * The quality and speed could be vary according to
198 * different settings,
199 */
200 void
vlVaHandleVAEncMiscParameterTypeQualityLevel(struct pipe_enc_quality_modes * p,vlVaQualityBits * in)201 vlVaHandleVAEncMiscParameterTypeQualityLevel(struct pipe_enc_quality_modes *p, vlVaQualityBits *in)
202 {
203 if (!in->quality) {
204 p->level = 0;
205 p->preset_mode = PRESET_MODE_SPEED;
206 p->pre_encode_mode = PREENCODING_MODE_DISABLE;
207 p->vbaq_mode = VBAQ_DISABLE;
208
209 return;
210 }
211
212 if (p->level != in->quality) {
213 if (in->quality == 1) {
214 p->preset_mode = PRESET_MODE_BALANCE;
215 p->pre_encode_mode = PREENCODING_MODE_DEFAULT;
216 p->vbaq_mode = VBAQ_AUTO;
217 } else {
218 p->preset_mode = in->preset_mode;
219 p->pre_encode_mode = in->pre_encode_mode;
220 p->vbaq_mode = in->vbaq_mode;
221 }
222 }
223 p->level = in->quality;
224 }
225
226 static VAStatus
handlePictureParameterBuffer(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)227 handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
228 {
229 VAStatus vaStatus = VA_STATUS_SUCCESS;
230 enum pipe_video_format format =
231 u_reduce_video_profile(context->templat.profile);
232
233 switch (format) {
234 case PIPE_VIDEO_FORMAT_MPEG12:
235 vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);
236 break;
237
238 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
239 vlVaHandlePictureParameterBufferH264(drv, context, buf);
240 break;
241
242 case PIPE_VIDEO_FORMAT_VC1:
243 vlVaHandlePictureParameterBufferVC1(drv, context, buf);
244 break;
245
246 case PIPE_VIDEO_FORMAT_MPEG4:
247 vlVaHandlePictureParameterBufferMPEG4(drv, context, buf);
248 break;
249
250 case PIPE_VIDEO_FORMAT_HEVC:
251 vlVaHandlePictureParameterBufferHEVC(drv, context, buf);
252 break;
253
254 case PIPE_VIDEO_FORMAT_JPEG:
255 vlVaHandlePictureParameterBufferMJPEG(drv, context, buf);
256 break;
257
258 case PIPE_VIDEO_FORMAT_VP9:
259 vlVaHandlePictureParameterBufferVP9(drv, context, buf);
260 break;
261
262 case PIPE_VIDEO_FORMAT_AV1:
263 vaStatus = vlVaHandlePictureParameterBufferAV1(drv, context, buf);
264 break;
265
266 default:
267 break;
268 }
269
270 /* Create the decoder once max_references is known. */
271 if (!context->decoder) {
272 if (!context->target)
273 return VA_STATUS_ERROR_INVALID_CONTEXT;
274
275 mtx_lock(&context->mutex);
276
277 if (format == PIPE_VIDEO_FORMAT_MPEG4_AVC)
278 context->templat.level = u_get_h264_level(context->templat.width,
279 context->templat.height, &context->templat.max_references);
280
281 context->decoder = drv->pipe->create_video_codec(drv->pipe,
282 &context->templat);
283
284 mtx_unlock(&context->mutex);
285
286 if (!context->decoder)
287 return VA_STATUS_ERROR_ALLOCATION_FAILED;
288
289 context->needs_begin_frame = true;
290 }
291
292 if (format == PIPE_VIDEO_FORMAT_VP9) {
293 context->decoder->width =
294 context->desc.vp9.picture_parameter.frame_width;
295 context->decoder->height =
296 context->desc.vp9.picture_parameter.frame_height;
297 }
298
299 return vaStatus;
300 }
301
302 static void
handleIQMatrixBuffer(vlVaContext * context,vlVaBuffer * buf)303 handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
304 {
305 switch (u_reduce_video_profile(context->templat.profile)) {
306 case PIPE_VIDEO_FORMAT_MPEG12:
307 vlVaHandleIQMatrixBufferMPEG12(context, buf);
308 break;
309
310 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
311 vlVaHandleIQMatrixBufferH264(context, buf);
312 break;
313
314 case PIPE_VIDEO_FORMAT_MPEG4:
315 vlVaHandleIQMatrixBufferMPEG4(context, buf);
316 break;
317
318 case PIPE_VIDEO_FORMAT_HEVC:
319 vlVaHandleIQMatrixBufferHEVC(context, buf);
320 break;
321
322 case PIPE_VIDEO_FORMAT_JPEG:
323 vlVaHandleIQMatrixBufferMJPEG(context, buf);
324 break;
325
326 default:
327 break;
328 }
329 }
330
331 static void
handleSliceParameterBuffer(vlVaContext * context,vlVaBuffer * buf)332 handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)
333 {
334 switch (u_reduce_video_profile(context->templat.profile)) {
335 case PIPE_VIDEO_FORMAT_MPEG12:
336 vlVaHandleSliceParameterBufferMPEG12(context, buf);
337 break;
338
339 case PIPE_VIDEO_FORMAT_VC1:
340 vlVaHandleSliceParameterBufferVC1(context, buf);
341 break;
342
343 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
344 vlVaHandleSliceParameterBufferH264(context, buf);
345 break;
346
347 case PIPE_VIDEO_FORMAT_MPEG4:
348 vlVaHandleSliceParameterBufferMPEG4(context, buf);
349 break;
350
351 case PIPE_VIDEO_FORMAT_HEVC:
352 vlVaHandleSliceParameterBufferHEVC(context, buf);
353 break;
354
355 case PIPE_VIDEO_FORMAT_JPEG:
356 vlVaHandleSliceParameterBufferMJPEG(context, buf);
357 break;
358
359 case PIPE_VIDEO_FORMAT_VP9:
360 vlVaHandleSliceParameterBufferVP9(context, buf);
361 break;
362
363 case PIPE_VIDEO_FORMAT_AV1:
364 vlVaHandleSliceParameterBufferAV1(context, buf);
365 break;
366
367 default:
368 break;
369 }
370 }
371
372 static unsigned int
bufHasStartcode(vlVaBuffer * buf,unsigned int code,unsigned int bits)373 bufHasStartcode(vlVaBuffer *buf, unsigned int code, unsigned int bits)
374 {
375 struct vl_vlc vlc = {0};
376 int i;
377
378 /* search the first 64 bytes for a startcode */
379 vl_vlc_init(&vlc, 1, (const void * const*)&buf->data, &buf->size);
380 for (i = 0; i < 64 && vl_vlc_bits_left(&vlc) >= bits; ++i) {
381 if (vl_vlc_peekbits(&vlc, bits) == code)
382 return 1;
383 vl_vlc_eatbits(&vlc, 8);
384 vl_vlc_fillbits(&vlc);
385 }
386
387 return 0;
388 }
389
390 static void
handleVAProtectedSliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)391 handleVAProtectedSliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
392 {
393 uint8_t* encrypted_data = (uint8_t*) buf->data;
394 uint8_t* drm_key;
395
396 unsigned int drm_key_size = buf->size;
397
398 drm_key = REALLOC(context->desc.base.decrypt_key,
399 context->desc.base.key_size, drm_key_size);
400 if (!drm_key)
401 return;
402 context->desc.base.decrypt_key = drm_key;
403 memcpy(context->desc.base.decrypt_key, encrypted_data, drm_key_size);
404 context->desc.base.key_size = drm_key_size;
405 context->desc.base.protected_playback = true;
406 }
407
408 static VAStatus
handleVASliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)409 handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
410 {
411 enum pipe_video_format format = u_reduce_video_profile(context->templat.profile);
412 static const uint8_t start_code_h264[] = { 0x00, 0x00, 0x01 };
413 static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };
414 static const uint8_t start_code_vc1_frame[] = { 0x00, 0x00, 0x01, 0x0d };
415 static const uint8_t start_code_vc1_field[] = { 0x00, 0x00, 0x01, 0x0c };
416 static const uint8_t start_code_vc1_slice[] = { 0x00, 0x00, 0x01, 0x0b };
417 static const uint8_t eoi_jpeg[] = { 0xff, 0xd9 };
418
419 if (!context->decoder)
420 return VA_STATUS_ERROR_INVALID_CONTEXT;
421
422 if (context->bs.allocated_size - context->bs.num_buffers < 3) {
423 context->bs.buffers = REALLOC(context->bs.buffers,
424 context->bs.allocated_size * sizeof(*context->bs.buffers),
425 (context->bs.allocated_size + 3) * sizeof(*context->bs.buffers));
426 context->bs.sizes = REALLOC(context->bs.sizes,
427 context->bs.allocated_size * sizeof(*context->bs.sizes),
428 (context->bs.allocated_size + 3) * sizeof(*context->bs.sizes));
429 context->bs.allocated_size += 3;
430 }
431
432 format = u_reduce_video_profile(context->templat.profile);
433 if (!context->desc.base.protected_playback) {
434 switch (format) {
435 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
436 if (bufHasStartcode(buf, 0x000001, 24))
437 break;
438
439 context->bs.buffers[context->bs.num_buffers] = (void *const)&start_code_h264;
440 context->bs.sizes[context->bs.num_buffers++] = sizeof(start_code_h264);
441 break;
442 case PIPE_VIDEO_FORMAT_HEVC:
443 if (bufHasStartcode(buf, 0x000001, 24))
444 break;
445
446 context->bs.buffers[context->bs.num_buffers] = (void *const)&start_code_h265;
447 context->bs.sizes[context->bs.num_buffers++] = sizeof(start_code_h265);
448 break;
449 case PIPE_VIDEO_FORMAT_VC1:
450 if (bufHasStartcode(buf, 0x000001, 24))
451 break;
452
453 if (context->decoder->profile == PIPE_VIDEO_PROFILE_VC1_ADVANCED) {
454 const uint8_t *start_code;
455 if (context->slice_data_offset)
456 start_code = start_code_vc1_slice;
457 else if (context->desc.vc1.is_first_field)
458 start_code = start_code_vc1_frame;
459 else
460 start_code = start_code_vc1_field;
461 context->bs.buffers[context->bs.num_buffers] = (void *const)start_code;
462 context->bs.sizes[context->bs.num_buffers++] = sizeof(start_code_vc1_frame);
463 }
464 break;
465 case PIPE_VIDEO_FORMAT_MPEG4:
466 if (bufHasStartcode(buf, 0x000001, 24))
467 break;
468
469 vlVaDecoderFixMPEG4Startcode(context);
470 context->bs.buffers[context->bs.num_buffers] = (void *)context->mpeg4.start_code;
471 context->bs.sizes[context->bs.num_buffers++] = context->mpeg4.start_code_size;
472 break;
473 case PIPE_VIDEO_FORMAT_JPEG:
474 if (bufHasStartcode(buf, 0xffd8ffdb, 32))
475 break;
476
477 vlVaGetJpegSliceHeader(context);
478 context->bs.buffers[context->bs.num_buffers] = (void *)context->mjpeg.slice_header;
479 context->bs.sizes[context->bs.num_buffers++] = context->mjpeg.slice_header_size;
480 break;
481 case PIPE_VIDEO_FORMAT_VP9:
482 if (false == context->desc.base.protected_playback)
483 vlVaDecoderVP9BitstreamHeader(context, buf);
484 break;
485 case PIPE_VIDEO_FORMAT_AV1:
486 break;
487 default:
488 break;
489 }
490 }
491
492 context->bs.buffers[context->bs.num_buffers] = buf->data;
493 context->bs.sizes[context->bs.num_buffers++] = buf->size;
494
495 if (format == PIPE_VIDEO_FORMAT_JPEG) {
496 context->bs.buffers[context->bs.num_buffers] = (void *const)&eoi_jpeg;
497 context->bs.sizes[context->bs.num_buffers++] = sizeof(eoi_jpeg);
498 }
499
500 if (context->needs_begin_frame) {
501 context->decoder->begin_frame(context->decoder, context->target,
502 &context->desc.base);
503 context->needs_begin_frame = false;
504 }
505 return VA_STATUS_SUCCESS;
506 }
507
508 static VAStatus
handleVAEncMiscParameterTypeRateControl(vlVaContext * context,VAEncMiscParameterBuffer * misc)509 handleVAEncMiscParameterTypeRateControl(vlVaContext *context, VAEncMiscParameterBuffer *misc)
510 {
511 VAStatus status = VA_STATUS_SUCCESS;
512
513 switch (u_reduce_video_profile(context->templat.profile)) {
514 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
515 status = vlVaHandleVAEncMiscParameterTypeRateControlH264(context, misc);
516 break;
517
518 case PIPE_VIDEO_FORMAT_HEVC:
519 status = vlVaHandleVAEncMiscParameterTypeRateControlHEVC(context, misc);
520 break;
521
522 #if VA_CHECK_VERSION(1, 16, 0)
523 case PIPE_VIDEO_FORMAT_AV1:
524 status = vlVaHandleVAEncMiscParameterTypeRateControlAV1(context, misc);
525 break;
526 #endif
527 default:
528 break;
529 }
530
531 return status;
532 }
533
534 static VAStatus
handleVAEncMiscParameterTypeFrameRate(vlVaContext * context,VAEncMiscParameterBuffer * misc)535 handleVAEncMiscParameterTypeFrameRate(vlVaContext *context, VAEncMiscParameterBuffer *misc)
536 {
537 VAStatus status = VA_STATUS_SUCCESS;
538
539 switch (u_reduce_video_profile(context->templat.profile)) {
540 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
541 status = vlVaHandleVAEncMiscParameterTypeFrameRateH264(context, misc);
542 break;
543
544 case PIPE_VIDEO_FORMAT_HEVC:
545 status = vlVaHandleVAEncMiscParameterTypeFrameRateHEVC(context, misc);
546 break;
547
548 #if VA_CHECK_VERSION(1, 16, 0)
549 case PIPE_VIDEO_FORMAT_AV1:
550 status = vlVaHandleVAEncMiscParameterTypeFrameRateAV1(context, misc);
551 break;
552 #endif
553 default:
554 break;
555 }
556
557 return status;
558 }
559
560 static VAStatus
handleVAEncMiscParameterTypeTemporalLayer(vlVaContext * context,VAEncMiscParameterBuffer * misc)561 handleVAEncMiscParameterTypeTemporalLayer(vlVaContext *context, VAEncMiscParameterBuffer *misc)
562 {
563 VAStatus status = VA_STATUS_SUCCESS;
564
565 switch (u_reduce_video_profile(context->templat.profile)) {
566 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
567 status = vlVaHandleVAEncMiscParameterTypeTemporalLayerH264(context, misc);
568 break;
569
570 case PIPE_VIDEO_FORMAT_HEVC:
571 status = vlVaHandleVAEncMiscParameterTypeTemporalLayerHEVC(context, misc);
572 break;
573
574 default:
575 break;
576 }
577
578 return status;
579 }
580
581 static VAStatus
handleVAEncSequenceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)582 handleVAEncSequenceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
583 {
584 VAStatus status = VA_STATUS_SUCCESS;
585
586 switch (u_reduce_video_profile(context->templat.profile)) {
587 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
588 status = vlVaHandleVAEncSequenceParameterBufferTypeH264(drv, context, buf);
589 break;
590
591 case PIPE_VIDEO_FORMAT_HEVC:
592 status = vlVaHandleVAEncSequenceParameterBufferTypeHEVC(drv, context, buf);
593 break;
594
595 #if VA_CHECK_VERSION(1, 16, 0)
596 case PIPE_VIDEO_FORMAT_AV1:
597 status = vlVaHandleVAEncSequenceParameterBufferTypeAV1(drv, context, buf);
598 break;
599 #endif
600
601 default:
602 break;
603 }
604
605 return status;
606 }
607
608 static VAStatus
handleVAEncMiscParameterTypeQualityLevel(vlVaContext * context,VAEncMiscParameterBuffer * misc)609 handleVAEncMiscParameterTypeQualityLevel(vlVaContext *context, VAEncMiscParameterBuffer *misc)
610 {
611 VAStatus status = VA_STATUS_SUCCESS;
612
613 switch (u_reduce_video_profile(context->templat.profile)) {
614 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
615 status = vlVaHandleVAEncMiscParameterTypeQualityLevelH264(context, misc);
616 break;
617
618 case PIPE_VIDEO_FORMAT_HEVC:
619 status = vlVaHandleVAEncMiscParameterTypeQualityLevelHEVC(context, misc);
620 break;
621
622 #if VA_CHECK_VERSION(1, 16, 0)
623 case PIPE_VIDEO_FORMAT_AV1:
624 status = vlVaHandleVAEncMiscParameterTypeQualityLevelAV1(context, misc);
625 break;
626 #endif
627
628 default:
629 break;
630 }
631
632 return status;
633 }
634
635 static VAStatus
handleVAEncMiscParameterTypeMaxFrameSize(vlVaContext * context,VAEncMiscParameterBuffer * misc)636 handleVAEncMiscParameterTypeMaxFrameSize(vlVaContext *context, VAEncMiscParameterBuffer *misc)
637 {
638 VAStatus status = VA_STATUS_SUCCESS;
639
640 switch (u_reduce_video_profile(context->templat.profile)) {
641 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
642 status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeH264(context, misc);
643 break;
644
645 case PIPE_VIDEO_FORMAT_HEVC:
646 status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeHEVC(context, misc);
647 break;
648
649 #if VA_CHECK_VERSION(1, 16, 0)
650 case PIPE_VIDEO_FORMAT_AV1:
651 status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeAV1(context, misc);
652 break;
653 #endif
654
655 default:
656 break;
657 }
658
659 return status;
660 }
661 static VAStatus
handleVAEncMiscParameterTypeHRD(vlVaContext * context,VAEncMiscParameterBuffer * misc)662 handleVAEncMiscParameterTypeHRD(vlVaContext *context, VAEncMiscParameterBuffer *misc)
663 {
664 VAStatus status = VA_STATUS_SUCCESS;
665
666 switch (u_reduce_video_profile(context->templat.profile)) {
667 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
668 status = vlVaHandleVAEncMiscParameterTypeHRDH264(context, misc);
669 break;
670
671 case PIPE_VIDEO_FORMAT_HEVC:
672 status = vlVaHandleVAEncMiscParameterTypeHRDHEVC(context, misc);
673 break;
674
675 #if VA_CHECK_VERSION(1, 16, 0)
676 case PIPE_VIDEO_FORMAT_AV1:
677 status = vlVaHandleVAEncMiscParameterTypeHRDAV1(context, misc);
678 break;
679 #endif
680
681 default:
682 break;
683 }
684
685 return status;
686 }
687
688 static VAStatus
handleVAEncMiscParameterTypeMaxSliceSize(vlVaContext * context,VAEncMiscParameterBuffer * misc)689 handleVAEncMiscParameterTypeMaxSliceSize(vlVaContext *context, VAEncMiscParameterBuffer *misc)
690 {
691 VAStatus status = VA_STATUS_SUCCESS;
692 VAEncMiscParameterMaxSliceSize *max_slice_size_buffer = (VAEncMiscParameterMaxSliceSize *)misc->data;
693 switch (u_reduce_video_profile(context->templat.profile)) {
694 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
695 {
696 context->desc.h264enc.slice_mode = PIPE_VIDEO_SLICE_MODE_MAX_SLICE_SIZE;
697 context->desc.h264enc.max_slice_bytes = max_slice_size_buffer->max_slice_size;
698 } break;
699 case PIPE_VIDEO_FORMAT_HEVC:
700 {
701 context->desc.h265enc.slice_mode = PIPE_VIDEO_SLICE_MODE_MAX_SLICE_SIZE;
702 context->desc.h265enc.max_slice_bytes = max_slice_size_buffer->max_slice_size;
703 } break;
704 default:
705 break;
706 }
707 return status;
708 }
709
710 static VAStatus
handleVAEncMiscParameterTypeRIR(vlVaContext * context,VAEncMiscParameterBuffer * misc)711 handleVAEncMiscParameterTypeRIR(vlVaContext *context, VAEncMiscParameterBuffer *misc)
712 {
713 VAStatus status = VA_STATUS_SUCCESS;
714 struct pipe_enc_intra_refresh *p_intra_refresh = NULL;
715
716 switch (u_reduce_video_profile(context->templat.profile)) {
717 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
718 p_intra_refresh = &context->desc.h264enc.intra_refresh;
719 break;
720 case PIPE_VIDEO_FORMAT_HEVC:
721 p_intra_refresh = &context->desc.h265enc.intra_refresh;
722 break;
723 #if VA_CHECK_VERSION(1, 16, 0)
724 case PIPE_VIDEO_FORMAT_AV1:
725 p_intra_refresh = &context->desc.av1enc.intra_refresh;
726 break;
727 #endif
728 default:
729 return status;
730 };
731
732 VAEncMiscParameterRIR *ir = (VAEncMiscParameterRIR *)misc->data;
733
734 if (ir->rir_flags.value == VA_ENC_INTRA_REFRESH_ROLLING_ROW)
735 p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_ROWS;
736 else if (ir->rir_flags.value == VA_ENC_INTRA_REFRESH_ROLLING_COLUMN)
737 p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_COLUMNS;
738 else if (ir->rir_flags.value) /* if any other values to use the default one*/
739 p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_COLUMNS;
740 else /* if no mode specified then no intra-refresh */
741 p_intra_refresh->mode = INTRA_REFRESH_MODE_NONE;
742
743 /* intra refresh should be started with sequence level headers */
744 p_intra_refresh->need_sequence_header = 0;
745 if (p_intra_refresh->mode) {
746 p_intra_refresh->region_size = ir->intra_insert_size;
747 p_intra_refresh->offset = ir->intra_insertion_location;
748 if (p_intra_refresh->offset == 0)
749 p_intra_refresh->need_sequence_header = 1;
750 }
751
752 return status;
753 }
754
755 static VAStatus
handleVAEncMiscParameterTypeROI(vlVaContext * context,VAEncMiscParameterBuffer * misc)756 handleVAEncMiscParameterTypeROI(vlVaContext *context, VAEncMiscParameterBuffer *misc)
757 {
758 VAStatus status = VA_STATUS_SUCCESS;
759 struct pipe_enc_roi *proi= NULL;
760 switch (u_reduce_video_profile(context->templat.profile)) {
761 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
762 proi = &context->desc.h264enc.roi;
763 break;
764 case PIPE_VIDEO_FORMAT_HEVC:
765 proi = &context->desc.h265enc.roi;
766 break;
767 #if VA_CHECK_VERSION(1, 16, 0)
768 case PIPE_VIDEO_FORMAT_AV1:
769 proi = &context->desc.av1enc.roi;
770 break;
771 #endif
772 default:
773 break;
774 };
775
776 if (proi) {
777 VAEncMiscParameterBufferROI *roi = (VAEncMiscParameterBufferROI *)misc->data;
778 /* do not support priority type, and the maximum region is 32 */
779 if ((roi->num_roi > 0 && roi->roi_flags.bits.roi_value_is_qp_delta == 0)
780 || roi->num_roi > PIPE_ENC_ROI_REGION_NUM_MAX)
781 status = VA_STATUS_ERROR_FLAG_NOT_SUPPORTED;
782 else {
783 uint32_t i;
784 VAEncROI *src = roi->roi;
785
786 proi->num = roi->num_roi;
787 for (i = 0; i < roi->num_roi; i++) {
788 proi->region[i].valid = true;
789 proi->region[i].x = src->roi_rectangle.x;
790 proi->region[i].y = src->roi_rectangle.y;
791 proi->region[i].width = src->roi_rectangle.width;
792 proi->region[i].height = src->roi_rectangle.height;
793 proi->region[i].qp_value = (int32_t)CLAMP(src->roi_value, roi->min_delta_qp, roi->max_delta_qp);
794 src++;
795 }
796
797 for (; i < PIPE_ENC_ROI_REGION_NUM_MAX; i++)
798 proi->region[i].valid = false;
799 }
800 }
801
802 return status;
803 }
804
805 static VAStatus
handleVAEncMiscParameterBufferType(vlVaContext * context,vlVaBuffer * buf)806 handleVAEncMiscParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
807 {
808 VAStatus vaStatus = VA_STATUS_SUCCESS;
809 VAEncMiscParameterBuffer *misc;
810 misc = buf->data;
811
812 switch (misc->type) {
813 case VAEncMiscParameterTypeRateControl:
814 vaStatus = handleVAEncMiscParameterTypeRateControl(context, misc);
815 break;
816
817 case VAEncMiscParameterTypeFrameRate:
818 vaStatus = handleVAEncMiscParameterTypeFrameRate(context, misc);
819 break;
820
821 case VAEncMiscParameterTypeTemporalLayerStructure:
822 vaStatus = handleVAEncMiscParameterTypeTemporalLayer(context, misc);
823 break;
824
825 case VAEncMiscParameterTypeQualityLevel:
826 vaStatus = handleVAEncMiscParameterTypeQualityLevel(context, misc);
827 break;
828
829 case VAEncMiscParameterTypeMaxFrameSize:
830 vaStatus = handleVAEncMiscParameterTypeMaxFrameSize(context, misc);
831 break;
832
833 case VAEncMiscParameterTypeHRD:
834 vaStatus = handleVAEncMiscParameterTypeHRD(context, misc);
835 break;
836
837 case VAEncMiscParameterTypeRIR:
838 vaStatus = handleVAEncMiscParameterTypeRIR(context, misc);
839 break;
840
841 case VAEncMiscParameterTypeMaxSliceSize:
842 vaStatus = handleVAEncMiscParameterTypeMaxSliceSize(context, misc);
843 break;
844
845 case VAEncMiscParameterTypeROI:
846 vaStatus = handleVAEncMiscParameterTypeROI(context, misc);
847 break;
848
849 default:
850 break;
851 }
852
853 return vaStatus;
854 }
855
856 static VAStatus
handleVAEncPictureParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)857 handleVAEncPictureParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
858 {
859 VAStatus status = VA_STATUS_SUCCESS;
860
861 switch (u_reduce_video_profile(context->templat.profile)) {
862 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
863 status = vlVaHandleVAEncPictureParameterBufferTypeH264(drv, context, buf);
864 break;
865
866 case PIPE_VIDEO_FORMAT_HEVC:
867 status = vlVaHandleVAEncPictureParameterBufferTypeHEVC(drv, context, buf);
868 break;
869
870 #if VA_CHECK_VERSION(1, 16, 0)
871 case PIPE_VIDEO_FORMAT_AV1:
872 status = vlVaHandleVAEncPictureParameterBufferTypeAV1(drv, context, buf);
873 break;
874 #endif
875
876 default:
877 break;
878 }
879
880 return status;
881 }
882
883 static VAStatus
handleVAEncSliceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)884 handleVAEncSliceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
885 {
886 VAStatus status = VA_STATUS_SUCCESS;
887
888 switch (u_reduce_video_profile(context->templat.profile)) {
889 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
890 status = vlVaHandleVAEncSliceParameterBufferTypeH264(drv, context, buf);
891 break;
892
893 case PIPE_VIDEO_FORMAT_HEVC:
894 status = vlVaHandleVAEncSliceParameterBufferTypeHEVC(drv, context, buf);
895 break;
896
897 #if VA_CHECK_VERSION(1, 16, 0)
898 case PIPE_VIDEO_FORMAT_AV1:
899 status = vlVaHandleVAEncSliceParameterBufferTypeAV1(drv, context, buf);
900 break;
901 #endif
902
903 default:
904 break;
905 }
906
907 return status;
908 }
909
910 static VAStatus
handleVAEncPackedHeaderParameterBufferType(vlVaContext * context,vlVaBuffer * buf)911 handleVAEncPackedHeaderParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
912 {
913 VAEncPackedHeaderParameterBuffer *param = buf->data;
914
915 context->packed_header_emulation_bytes = param->has_emulation_bytes;
916 context->packed_header_type = param->type;
917
918 return VA_STATUS_SUCCESS;
919 }
920
921 static VAStatus
handleVAEncPackedHeaderDataBufferType(vlVaContext * context,vlVaBuffer * buf)922 handleVAEncPackedHeaderDataBufferType(vlVaContext *context, vlVaBuffer *buf)
923 {
924 VAStatus status = VA_STATUS_SUCCESS;
925
926 switch (u_reduce_video_profile(context->templat.profile)) {
927 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
928 status = vlVaHandleVAEncPackedHeaderDataBufferTypeH264(context, buf);
929 break;
930
931 case PIPE_VIDEO_FORMAT_HEVC:
932 status = vlVaHandleVAEncPackedHeaderDataBufferTypeHEVC(context, buf);
933 break;
934
935 #if VA_CHECK_VERSION(1, 16, 0)
936 case PIPE_VIDEO_FORMAT_AV1:
937 status = vlVaHandleVAEncPackedHeaderDataBufferTypeAV1(context, buf);
938 break;
939 #endif
940
941 default:
942 break;
943 }
944
945 return status;
946 }
947
948 static VAStatus
handleVAStatsStatisticsBufferType(VADriverContextP ctx,vlVaContext * context,vlVaBuffer * buf)949 handleVAStatsStatisticsBufferType(VADriverContextP ctx, vlVaContext *context, vlVaBuffer *buf)
950 {
951 if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
952 return VA_STATUS_ERROR_UNIMPLEMENTED;
953
954 vlVaDriver *drv;
955 drv = VL_VA_DRIVER(ctx);
956
957 if (!drv)
958 return VA_STATUS_ERROR_INVALID_CONTEXT;
959
960 if (!buf->derived_surface.resource)
961 buf->derived_surface.resource = pipe_buffer_create(drv->pipe->screen, PIPE_BIND_VERTEX_BUFFER,
962 PIPE_USAGE_STREAM, buf->size);
963
964 context->target->statistics_data = buf->derived_surface.resource;
965
966 return VA_STATUS_SUCCESS;
967 }
968
969 VAStatus
vlVaRenderPicture(VADriverContextP ctx,VAContextID context_id,VABufferID * buffers,int num_buffers)970 vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buffers, int num_buffers)
971 {
972 vlVaDriver *drv;
973 vlVaContext *context;
974 VAStatus vaStatus = VA_STATUS_SUCCESS;
975
976 unsigned i;
977
978 if (!ctx)
979 return VA_STATUS_ERROR_INVALID_CONTEXT;
980
981 drv = VL_VA_DRIVER(ctx);
982 if (!drv)
983 return VA_STATUS_ERROR_INVALID_CONTEXT;
984
985 mtx_lock(&drv->mutex);
986 context = handle_table_get(drv->htab, context_id);
987 if (!context) {
988 mtx_unlock(&drv->mutex);
989 return VA_STATUS_ERROR_INVALID_CONTEXT;
990 }
991
992 if (!context->target_id) {
993 mtx_unlock(&drv->mutex);
994 return VA_STATUS_ERROR_OPERATION_FAILED;
995 }
996
997 /* Always process VAProtectedSliceDataBufferType first because it changes the state */
998 for (i = 0; i < num_buffers; ++i) {
999 vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
1000 if (!buf) {
1001 mtx_unlock(&drv->mutex);
1002 return VA_STATUS_ERROR_INVALID_BUFFER;
1003 }
1004
1005 if (buf->type == VAProtectedSliceDataBufferType)
1006 handleVAProtectedSliceDataBufferType(context, buf);
1007 }
1008
1009 for (i = 0; i < num_buffers && vaStatus == VA_STATUS_SUCCESS; ++i) {
1010 vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
1011 if (!buf) {
1012 mtx_unlock(&drv->mutex);
1013 return VA_STATUS_ERROR_INVALID_BUFFER;
1014 }
1015
1016 switch (buf->type) {
1017 case VAPictureParameterBufferType:
1018 vaStatus = handlePictureParameterBuffer(drv, context, buf);
1019 break;
1020
1021 case VAIQMatrixBufferType:
1022 handleIQMatrixBuffer(context, buf);
1023 break;
1024
1025 case VASliceParameterBufferType:
1026 handleSliceParameterBuffer(context, buf);
1027 context->have_slice_params = true;
1028 break;
1029
1030 case VASliceDataBufferType:
1031 vaStatus = handleVASliceDataBufferType(context, buf);
1032 /* Workaround for apps sending single slice data buffer followed
1033 * by multiple slice parameter buffers. */
1034 if (context->have_slice_params)
1035 context->slice_data_offset += buf->size;
1036 break;
1037
1038 case VAProcPipelineParameterBufferType:
1039 vaStatus = vlVaHandleVAProcPipelineParameterBufferType(drv, context, buf);
1040 break;
1041
1042 case VAEncSequenceParameterBufferType:
1043 vaStatus = handleVAEncSequenceParameterBufferType(drv, context, buf);
1044 break;
1045
1046 case VAEncMiscParameterBufferType:
1047 vaStatus = handleVAEncMiscParameterBufferType(context, buf);
1048 break;
1049
1050 case VAEncPictureParameterBufferType:
1051 vaStatus = handleVAEncPictureParameterBufferType(drv, context, buf);
1052 break;
1053
1054 case VAEncSliceParameterBufferType:
1055 vaStatus = handleVAEncSliceParameterBufferType(drv, context, buf);
1056 break;
1057
1058 case VAHuffmanTableBufferType:
1059 vlVaHandleHuffmanTableBufferType(context, buf);
1060 break;
1061
1062 case VAEncPackedHeaderParameterBufferType:
1063 handleVAEncPackedHeaderParameterBufferType(context, buf);
1064 break;
1065 case VAEncPackedHeaderDataBufferType:
1066 handleVAEncPackedHeaderDataBufferType(context, buf);
1067 break;
1068
1069 case VAStatsStatisticsBufferType:
1070 handleVAStatsStatisticsBufferType(ctx, context, buf);
1071 break;
1072
1073 default:
1074 break;
1075 }
1076 }
1077
1078 if (context->decoder &&
1079 context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM &&
1080 context->bs.num_buffers) {
1081 context->decoder->decode_bitstream(context->decoder, context->target, &context->desc.base,
1082 context->bs.num_buffers, (const void * const*)context->bs.buffers, context->bs.sizes);
1083 context->bs.num_buffers = 0;
1084 }
1085
1086 mtx_unlock(&drv->mutex);
1087
1088 return vaStatus;
1089 }
1090
vlVaQueryApplyFilmGrainAV1(vlVaContext * context,int * output_id,struct pipe_video_buffer *** out_target)1091 static bool vlVaQueryApplyFilmGrainAV1(vlVaContext *context,
1092 int *output_id,
1093 struct pipe_video_buffer ***out_target)
1094 {
1095 struct pipe_av1_picture_desc *av1 = NULL;
1096
1097 if (u_reduce_video_profile(context->templat.profile) != PIPE_VIDEO_FORMAT_AV1 ||
1098 context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
1099 return false;
1100
1101 av1 = &context->desc.av1;
1102 if (!av1->picture_parameter.film_grain_info.film_grain_info_fields.apply_grain)
1103 return false;
1104
1105 *output_id = av1->picture_parameter.current_frame_id;
1106 *out_target = &av1->film_grain_target;
1107 return true;
1108 }
1109
vlVaClearRawHeaders(struct util_dynarray * headers)1110 static void vlVaClearRawHeaders(struct util_dynarray *headers)
1111 {
1112 util_dynarray_foreach(headers, struct pipe_enc_raw_header, header)
1113 FREE(header->buffer);
1114 util_dynarray_clear(headers);
1115 }
1116
1117 VAStatus
vlVaEndPicture(VADriverContextP ctx,VAContextID context_id)1118 vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
1119 {
1120 vlVaDriver *drv;
1121 vlVaContext *context;
1122 vlVaBuffer *coded_buf;
1123 vlVaSurface *surf;
1124 void *feedback = NULL;
1125 struct pipe_screen *screen;
1126 bool apply_av1_fg = false;
1127 struct pipe_video_buffer **out_target;
1128 int output_id;
1129 enum pipe_format target_format;
1130
1131 if (!ctx)
1132 return VA_STATUS_ERROR_INVALID_CONTEXT;
1133
1134 drv = VL_VA_DRIVER(ctx);
1135 if (!drv)
1136 return VA_STATUS_ERROR_INVALID_CONTEXT;
1137
1138 mtx_lock(&drv->mutex);
1139 context = handle_table_get(drv->htab, context_id);
1140 if (!context) {
1141 mtx_unlock(&drv->mutex);
1142 return VA_STATUS_ERROR_INVALID_CONTEXT;
1143 }
1144
1145 if (!context->target_id) {
1146 mtx_unlock(&drv->mutex);
1147 return VA_STATUS_ERROR_OPERATION_FAILED;
1148 }
1149
1150 output_id = context->target_id;
1151 context->target_id = 0;
1152
1153 if (!context->decoder) {
1154 if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN) {
1155 mtx_unlock(&drv->mutex);
1156 return VA_STATUS_ERROR_INVALID_CONTEXT;
1157 }
1158
1159 /* VPP */
1160 mtx_unlock(&drv->mutex);
1161 return VA_STATUS_SUCCESS;
1162 }
1163
1164 if (context->needs_begin_frame) {
1165 mtx_unlock(&drv->mutex);
1166 return VA_STATUS_ERROR_OPERATION_FAILED;
1167 }
1168
1169 out_target = &context->target;
1170 apply_av1_fg = vlVaQueryApplyFilmGrainAV1(context, &output_id, &out_target);
1171
1172 surf = handle_table_get(drv->htab, output_id);
1173 if (surf && !surf->buffer && context->desc.base.protected_playback)
1174 surf->templat.bind |= PIPE_BIND_PROTECTED;
1175 vlVaGetSurfaceBuffer(drv, surf);
1176 if (!surf || !surf->buffer) {
1177 mtx_unlock(&drv->mutex);
1178 return VA_STATUS_ERROR_INVALID_SURFACE;
1179 }
1180
1181 if (apply_av1_fg) {
1182 vlVaSetSurfaceContext(drv, surf, context);
1183 *out_target = surf->buffer;
1184 }
1185
1186 context->mpeg4.frame_num++;
1187
1188 screen = context->decoder->context->screen;
1189
1190 if ((bool)(surf->templat.bind & PIPE_BIND_PROTECTED) != context->desc.base.protected_playback) {
1191 mtx_unlock(&drv->mutex);
1192 return VA_STATUS_ERROR_INVALID_SURFACE;
1193 }
1194
1195 target_format = context->target->buffer_format;
1196
1197 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
1198 coded_buf = context->coded_buf;
1199 context->desc.base.fence = &coded_buf->fence;
1200 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC)
1201 context->desc.h264enc.frame_num_cnt++;
1202
1203 if (surf->efc_surface) {
1204 assert(surf == drv->last_efc_surface);
1205 context->target = surf->efc_surface->buffer;
1206 context->desc.base.input_format = surf->efc_surface->buffer->buffer_format;
1207 context->desc.base.output_format = surf->buffer->buffer_format;
1208 surf->efc_surface = NULL;
1209 drv->last_efc_surface = NULL;
1210 } else {
1211 context->desc.base.input_format = surf->buffer->buffer_format;
1212 context->desc.base.output_format = surf->buffer->buffer_format;
1213 }
1214 context->desc.base.input_full_range = surf->full_range;
1215 target_format = context->desc.base.output_format;
1216
1217 if (coded_buf->coded_surf)
1218 coded_buf->coded_surf->coded_buf = NULL;
1219 vlVaGetBufferFeedback(coded_buf);
1220 vlVaSetBufferContext(drv, coded_buf, context);
1221
1222 int driver_metadata_support = drv->pipe->screen->get_video_param(drv->pipe->screen,
1223 context->decoder->profile,
1224 context->decoder->entrypoint,
1225 PIPE_VIDEO_CAP_ENC_SUPPORTS_FEEDBACK_METADATA);
1226 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC)
1227 context->desc.h264enc.requested_metadata = driver_metadata_support;
1228 else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
1229 context->desc.h265enc.requested_metadata = driver_metadata_support;
1230 else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_AV1)
1231 context->desc.av1enc.requested_metadata = driver_metadata_support;
1232
1233 context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
1234 context->decoder->encode_bitstream(context->decoder, context->target,
1235 coded_buf->derived_surface.resource, &feedback);
1236 coded_buf->feedback = feedback;
1237 coded_buf->coded_surf = surf;
1238 surf->coded_buf = coded_buf;
1239 } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
1240 context->desc.base.fence = &surf->fence;
1241 } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_PROCESSING) {
1242 context->desc.base.fence = &surf->fence;
1243 }
1244
1245 if (screen->is_video_target_buffer_supported &&
1246 !screen->is_video_target_buffer_supported(screen,
1247 target_format,
1248 context->target,
1249 context->decoder->profile,
1250 context->decoder->entrypoint)) {
1251 mtx_unlock(&drv->mutex);
1252 return VA_STATUS_ERROR_INVALID_SURFACE;
1253 }
1254
1255 /* when there are external handles, we can't set PIPE_FLUSH_ASYNC */
1256 if (context->desc.base.fence)
1257 context->desc.base.flush_flags = drv->has_external_handles ? 0 : PIPE_FLUSH_ASYNC;
1258
1259 if (context->decoder->end_frame(context->decoder, context->target, &context->desc.base) != 0) {
1260 mtx_unlock(&drv->mutex);
1261 return VA_STATUS_ERROR_OPERATION_FAILED;
1262 }
1263
1264 if (drv->pipe->screen->get_video_param(drv->pipe->screen,
1265 context->decoder->profile,
1266 context->decoder->entrypoint,
1267 PIPE_VIDEO_CAP_REQUIRES_FLUSH_ON_END_FRAME))
1268 context->decoder->flush(context->decoder);
1269
1270 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
1271 switch (u_reduce_video_profile(context->templat.profile)) {
1272 case PIPE_VIDEO_FORMAT_AV1:
1273 context->desc.av1enc.frame_num++;
1274 vlVaClearRawHeaders(&context->desc.av1enc.raw_headers);
1275 break;
1276 case PIPE_VIDEO_FORMAT_HEVC:
1277 context->desc.h265enc.frame_num++;
1278 vlVaClearRawHeaders(&context->desc.h265enc.raw_headers);
1279 break;
1280 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
1281 if (!context->desc.h264enc.not_referenced)
1282 context->desc.h264enc.frame_num++;
1283 vlVaClearRawHeaders(&context->desc.h264enc.raw_headers);
1284 break;
1285 default:
1286 break;
1287 }
1288 }
1289
1290 mtx_unlock(&drv->mutex);
1291 return VA_STATUS_SUCCESS;
1292 }
1293
1294 void
vlVaAddRawHeader(struct util_dynarray * headers,uint8_t type,uint32_t size,uint8_t * buf,bool is_slice,uint32_t emulation_bytes_start)1295 vlVaAddRawHeader(struct util_dynarray *headers, uint8_t type, uint32_t size,
1296 uint8_t *buf, bool is_slice, uint32_t emulation_bytes_start)
1297 {
1298 struct pipe_enc_raw_header header = {
1299 .type = type,
1300 .is_slice = is_slice,
1301 };
1302 if (emulation_bytes_start) {
1303 uint32_t pos = emulation_bytes_start, num_zeros = 0;
1304 header.buffer = MALLOC(size * 3 / 2);
1305 memcpy(header.buffer, buf, emulation_bytes_start);
1306 for (uint32_t i = emulation_bytes_start; i < size; i++) {
1307 uint8_t byte = buf[i];
1308 if (num_zeros >= 2 && byte <= 0x03) {
1309 header.buffer[pos++] = 0x03;
1310 num_zeros = 0;
1311 }
1312 header.buffer[pos++] = byte;
1313 num_zeros = byte == 0x00 ? num_zeros + 1 : 0;
1314 }
1315 header.size = pos;
1316 } else {
1317 header.size = size;
1318 header.buffer = MALLOC(header.size);
1319 memcpy(header.buffer, buf, size);
1320 }
1321 util_dynarray_append(headers, struct pipe_enc_raw_header, header);
1322 }
1323