• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2010 Thomas Balling Sørensen & Orasanu Lucian.
4  * Copyright 2014 Advanced Micro Devices, Inc.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include "pipe/p_video_codec.h"
30 
31 #include "util/u_handle_table.h"
32 #include "util/u_video.h"
33 #include "util/u_memory.h"
34 #include "util/set.h"
35 
36 #include "util/vl_vlc.h"
37 #include "vl/vl_winsys.h"
38 
39 #include "va_private.h"
40 
41 void
vlVaSetSurfaceContext(vlVaDriver * drv,vlVaSurface * surf,vlVaContext * context)42 vlVaSetSurfaceContext(vlVaDriver *drv, vlVaSurface *surf, vlVaContext *context)
43 {
44    if (surf->ctx == context)
45       return;
46 
47    if (surf->ctx) {
48       assert(_mesa_set_search(surf->ctx->surfaces, surf));
49       _mesa_set_remove_key(surf->ctx->surfaces, surf);
50 
51       /* Only drivers supporting PIPE_VIDEO_ENTRYPOINT_PROCESSING will create
52        * decoder for postproc context and thus be able to wait on and destroy
53        * the surface fence. On other drivers we need to destroy the fence here
54        * otherwise vaQuerySurfaceStatus/vaSyncSurface will fail and we'll also
55        * potentially leak the fence.
56        */
57       if (surf->fence && !context->decoder &&
58           context->templat.entrypoint == PIPE_VIDEO_ENTRYPOINT_PROCESSING &&
59           surf->ctx->decoder && surf->ctx->decoder->destroy_fence &&
60           !drv->pipe->screen->get_video_param(drv->pipe->screen,
61                                               PIPE_VIDEO_PROFILE_UNKNOWN,
62                                               PIPE_VIDEO_ENTRYPOINT_PROCESSING,
63                                               PIPE_VIDEO_CAP_SUPPORTED)) {
64          surf->ctx->decoder->destroy_fence(surf->ctx->decoder, surf->fence);
65          surf->fence = NULL;
66       }
67    }
68 
69    surf->ctx = context;
70    _mesa_set_add(surf->ctx->surfaces, surf);
71 }
72 
73 static void
vlVaSetBufferContext(vlVaDriver * drv,vlVaBuffer * buf,vlVaContext * context)74 vlVaSetBufferContext(vlVaDriver *drv, vlVaBuffer *buf, vlVaContext *context)
75 {
76    if (buf->ctx == context)
77       return;
78 
79    if (buf->ctx) {
80       assert(_mesa_set_search(buf->ctx->buffers, buf));
81       _mesa_set_remove_key(buf->ctx->buffers, buf);
82    }
83 
84    buf->ctx = context;
85    _mesa_set_add(buf->ctx->buffers, buf);
86 }
87 
88 VAStatus
vlVaBeginPicture(VADriverContextP ctx,VAContextID context_id,VASurfaceID render_target)89 vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID render_target)
90 {
91    vlVaDriver *drv;
92    vlVaContext *context;
93    vlVaSurface *surf;
94 
95    if (!ctx)
96       return VA_STATUS_ERROR_INVALID_CONTEXT;
97 
98    drv = VL_VA_DRIVER(ctx);
99    if (!drv)
100       return VA_STATUS_ERROR_INVALID_CONTEXT;
101 
102    mtx_lock(&drv->mutex);
103    context = handle_table_get(drv->htab, context_id);
104    if (!context) {
105       mtx_unlock(&drv->mutex);
106       return VA_STATUS_ERROR_INVALID_CONTEXT;
107    }
108 
109    if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG12) {
110       context->desc.mpeg12.intra_matrix = NULL;
111       context->desc.mpeg12.non_intra_matrix = NULL;
112    }
113 
114    surf = handle_table_get(drv->htab, render_target);
115    vlVaGetSurfaceBuffer(drv, surf);
116    if (!surf || !surf->buffer) {
117       mtx_unlock(&drv->mutex);
118       return VA_STATUS_ERROR_INVALID_SURFACE;
119    }
120 
121    if (surf->coded_buf) {
122       surf->coded_buf->coded_surf = NULL;
123       surf->coded_buf = NULL;
124    }
125 
126    /* Encode only reads from the surface and doesn't set surface fence. */
127    if (context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
128       vlVaSetSurfaceContext(drv, surf, context);
129 
130    context->target_id = render_target;
131    context->target = surf->buffer;
132 
133    if (context->templat.entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
134       context->needs_begin_frame = true;
135 
136    if (!context->decoder) {
137       mtx_unlock(&drv->mutex);
138       return VA_STATUS_SUCCESS;
139    }
140 
141    /* meta data and seis are per picture basis, it needs to be
142     * cleared before rendering the picture. */
143    if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
144       switch (u_reduce_video_profile(context->templat.profile)) {
145          case PIPE_VIDEO_FORMAT_AV1:
146             context->desc.av1enc.metadata_flags.value = 0;
147             context->desc.av1enc.roi.num = 0;
148             context->desc.av1enc.intra_refresh.mode = INTRA_REFRESH_MODE_NONE;
149             break;
150          case PIPE_VIDEO_FORMAT_HEVC:
151             context->desc.h265enc.roi.num = 0;
152             context->desc.h265enc.intra_refresh.mode = INTRA_REFRESH_MODE_NONE;
153             break;
154          case PIPE_VIDEO_FORMAT_MPEG4_AVC:
155             context->desc.h264enc.roi.num = 0;
156             context->desc.h264enc.intra_refresh.mode = INTRA_REFRESH_MODE_NONE;
157             break;
158          default:
159             break;
160       }
161    }
162 
163    context->slice_data_offset = 0;
164    context->have_slice_params = false;
165 
166    mtx_unlock(&drv->mutex);
167    return VA_STATUS_SUCCESS;
168 }
169 
170 void
vlVaGetReferenceFrame(vlVaDriver * drv,VASurfaceID surface_id,struct pipe_video_buffer ** ref_frame)171 vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
172                       struct pipe_video_buffer **ref_frame)
173 {
174    vlVaSurface *surf = handle_table_get(drv->htab, surface_id);
175    if (surf)
176       *ref_frame = vlVaGetSurfaceBuffer(drv, surf);
177    else
178       *ref_frame = NULL;
179 }
180 /*
181  * in->quality = 0; without any settings, it is using speed preset
182  *                  and no preencode and no vbaq. It is the fastest setting.
183  * in->quality = 1; suggested setting, with balanced preset, and
184  *                  preencode and vbaq
185  * in->quality = others; it is the customized setting
186  *                  with valid bit (bit #0) set to "1"
187  *                  for example:
188  *
189  *                  0x3  (balance preset, no pre-encoding, no vbaq)
190  *                  0x13 (balanced preset, no pre-encoding, vbaq)
191  *                  0x13 (balanced preset, no pre-encoding, vbaq)
192  *                  0x9  (speed preset, pre-encoding, no vbaq)
193  *                  0x19 (speed preset, pre-encoding, vbaq)
194  *
195  *                  The quality value has to be treated as a combination
196  *                  of preset mode, pre-encoding and vbaq settings.
197  *                  The quality and speed could be vary according to
198  *                  different settings,
199  */
200 void
vlVaHandleVAEncMiscParameterTypeQualityLevel(struct pipe_enc_quality_modes * p,vlVaQualityBits * in)201 vlVaHandleVAEncMiscParameterTypeQualityLevel(struct pipe_enc_quality_modes *p, vlVaQualityBits *in)
202 {
203    if (!in->quality) {
204       p->level = 0;
205       p->preset_mode = PRESET_MODE_SPEED;
206       p->pre_encode_mode = PREENCODING_MODE_DISABLE;
207       p->vbaq_mode = VBAQ_DISABLE;
208 
209       return;
210    }
211 
212    if (p->level != in->quality) {
213       if (in->quality == 1) {
214          p->preset_mode = PRESET_MODE_BALANCE;
215          p->pre_encode_mode = PREENCODING_MODE_DEFAULT;
216          p->vbaq_mode = VBAQ_AUTO;
217       } else {
218          p->preset_mode = in->preset_mode;
219          p->pre_encode_mode = in->pre_encode_mode;
220          p->vbaq_mode = in->vbaq_mode;
221       }
222    }
223    p->level = in->quality;
224 }
225 
226 static VAStatus
handlePictureParameterBuffer(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)227 handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
228 {
229    VAStatus vaStatus = VA_STATUS_SUCCESS;
230    enum pipe_video_format format =
231       u_reduce_video_profile(context->templat.profile);
232 
233    switch (format) {
234    case PIPE_VIDEO_FORMAT_MPEG12:
235       vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);
236       break;
237 
238    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
239       vlVaHandlePictureParameterBufferH264(drv, context, buf);
240       break;
241 
242    case PIPE_VIDEO_FORMAT_VC1:
243       vlVaHandlePictureParameterBufferVC1(drv, context, buf);
244       break;
245 
246    case PIPE_VIDEO_FORMAT_MPEG4:
247       vlVaHandlePictureParameterBufferMPEG4(drv, context, buf);
248       break;
249 
250    case PIPE_VIDEO_FORMAT_HEVC:
251       vlVaHandlePictureParameterBufferHEVC(drv, context, buf);
252       break;
253 
254    case PIPE_VIDEO_FORMAT_JPEG:
255       vlVaHandlePictureParameterBufferMJPEG(drv, context, buf);
256       break;
257 
258    case PIPE_VIDEO_FORMAT_VP9:
259       vlVaHandlePictureParameterBufferVP9(drv, context, buf);
260       break;
261 
262    case PIPE_VIDEO_FORMAT_AV1:
263       vlVaHandlePictureParameterBufferAV1(drv, context, buf);
264       break;
265 
266    default:
267       break;
268    }
269 
270    /* Create the decoder once max_references is known. */
271    if (!context->decoder) {
272       if (!context->target)
273          return VA_STATUS_ERROR_INVALID_CONTEXT;
274 
275       if (format == PIPE_VIDEO_FORMAT_MPEG4_AVC)
276          context->templat.level = u_get_h264_level(context->templat.width,
277             context->templat.height, &context->templat.max_references);
278 
279       context->decoder = drv->pipe->create_video_codec(drv->pipe,
280          &context->templat);
281 
282       if (!context->decoder)
283          return VA_STATUS_ERROR_ALLOCATION_FAILED;
284 
285       context->needs_begin_frame = true;
286    }
287 
288    if (format == PIPE_VIDEO_FORMAT_VP9) {
289       context->decoder->width =
290          context->desc.vp9.picture_parameter.frame_width;
291       context->decoder->height =
292          context->desc.vp9.picture_parameter.frame_height;
293    }
294 
295    return vaStatus;
296 }
297 
298 static void
handleIQMatrixBuffer(vlVaContext * context,vlVaBuffer * buf)299 handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
300 {
301    switch (u_reduce_video_profile(context->templat.profile)) {
302    case PIPE_VIDEO_FORMAT_MPEG12:
303       vlVaHandleIQMatrixBufferMPEG12(context, buf);
304       break;
305 
306    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
307       vlVaHandleIQMatrixBufferH264(context, buf);
308       break;
309 
310    case PIPE_VIDEO_FORMAT_MPEG4:
311       vlVaHandleIQMatrixBufferMPEG4(context, buf);
312       break;
313 
314    case PIPE_VIDEO_FORMAT_HEVC:
315       vlVaHandleIQMatrixBufferHEVC(context, buf);
316       break;
317 
318    case PIPE_VIDEO_FORMAT_JPEG:
319       vlVaHandleIQMatrixBufferMJPEG(context, buf);
320       break;
321 
322    default:
323       break;
324    }
325 }
326 
327 static void
handleSliceParameterBuffer(vlVaContext * context,vlVaBuffer * buf)328 handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf)
329 {
330    switch (u_reduce_video_profile(context->templat.profile)) {
331    case PIPE_VIDEO_FORMAT_MPEG12:
332       vlVaHandleSliceParameterBufferMPEG12(context, buf);
333       break;
334 
335    case PIPE_VIDEO_FORMAT_VC1:
336       vlVaHandleSliceParameterBufferVC1(context, buf);
337       break;
338 
339    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
340       vlVaHandleSliceParameterBufferH264(context, buf);
341       break;
342 
343    case PIPE_VIDEO_FORMAT_MPEG4:
344       vlVaHandleSliceParameterBufferMPEG4(context, buf);
345       break;
346 
347    case PIPE_VIDEO_FORMAT_HEVC:
348       vlVaHandleSliceParameterBufferHEVC(context, buf);
349       break;
350 
351    case PIPE_VIDEO_FORMAT_JPEG:
352       vlVaHandleSliceParameterBufferMJPEG(context, buf);
353       break;
354 
355    case PIPE_VIDEO_FORMAT_VP9:
356       vlVaHandleSliceParameterBufferVP9(context, buf);
357       break;
358 
359    case PIPE_VIDEO_FORMAT_AV1:
360       vlVaHandleSliceParameterBufferAV1(context, buf);
361       break;
362 
363    default:
364       break;
365    }
366 }
367 
368 static unsigned int
bufHasStartcode(vlVaBuffer * buf,unsigned int code,unsigned int bits)369 bufHasStartcode(vlVaBuffer *buf, unsigned int code, unsigned int bits)
370 {
371    struct vl_vlc vlc = {0};
372    int i;
373 
374    /* search the first 64 bytes for a startcode */
375    vl_vlc_init(&vlc, 1, (const void * const*)&buf->data, &buf->size);
376    for (i = 0; i < 64 && vl_vlc_bits_left(&vlc) >= bits; ++i) {
377       if (vl_vlc_peekbits(&vlc, bits) == code)
378          return 1;
379       vl_vlc_eatbits(&vlc, 8);
380       vl_vlc_fillbits(&vlc);
381    }
382 
383    return 0;
384 }
385 
386 static void
handleVAProtectedSliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)387 handleVAProtectedSliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
388 {
389 	uint8_t* encrypted_data = (uint8_t*) buf->data;
390         uint8_t* drm_key;
391 
392 	unsigned int drm_key_size = buf->size;
393 
394         drm_key = REALLOC(context->desc.base.decrypt_key,
395                           context->desc.base.key_size, drm_key_size);
396         if (!drm_key)
397             return;
398         context->desc.base.decrypt_key = drm_key;
399 	memcpy(context->desc.base.decrypt_key, encrypted_data, drm_key_size);
400 	context->desc.base.key_size = drm_key_size;
401 	context->desc.base.protected_playback = true;
402 }
403 
404 static VAStatus
handleVASliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)405 handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
406 {
407    enum pipe_video_format format = u_reduce_video_profile(context->templat.profile);
408    static const uint8_t start_code_h264[] = { 0x00, 0x00, 0x01 };
409    static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };
410    static const uint8_t start_code_vc1_frame[] = { 0x00, 0x00, 0x01, 0x0d };
411    static const uint8_t start_code_vc1_field[] = { 0x00, 0x00, 0x01, 0x0c };
412    static const uint8_t eoi_jpeg[] = { 0xff, 0xd9 };
413 
414    if (!context->decoder)
415       return VA_STATUS_ERROR_INVALID_CONTEXT;
416 
417    if (context->bs.allocated_size - context->bs.num_buffers < 3) {
418       context->bs.buffers = REALLOC(context->bs.buffers,
419                                     context->bs.allocated_size * sizeof(*context->bs.buffers),
420                                     (context->bs.allocated_size + 3) * sizeof(*context->bs.buffers));
421       context->bs.sizes = REALLOC(context->bs.sizes,
422                                   context->bs.allocated_size * sizeof(*context->bs.sizes),
423                                   (context->bs.allocated_size + 3) * sizeof(*context->bs.sizes));
424       context->bs.allocated_size += 3;
425    }
426 
427    format = u_reduce_video_profile(context->templat.profile);
428    if (!context->desc.base.protected_playback) {
429       switch (format) {
430       case PIPE_VIDEO_FORMAT_MPEG4_AVC:
431          if (bufHasStartcode(buf, 0x000001, 24))
432             break;
433 
434          context->bs.buffers[context->bs.num_buffers] = (void *const)&start_code_h264;
435          context->bs.sizes[context->bs.num_buffers++] = sizeof(start_code_h264);
436          break;
437       case PIPE_VIDEO_FORMAT_HEVC:
438          if (bufHasStartcode(buf, 0x000001, 24))
439             break;
440 
441          context->bs.buffers[context->bs.num_buffers] = (void *const)&start_code_h265;
442          context->bs.sizes[context->bs.num_buffers++] = sizeof(start_code_h265);
443          break;
444       case PIPE_VIDEO_FORMAT_VC1:
445          if (bufHasStartcode(buf, 0x000001, 24))
446             break;
447 
448          if (context->decoder->profile == PIPE_VIDEO_PROFILE_VC1_ADVANCED) {
449             const uint8_t *start_code =
450                context->desc.vc1.is_first_field ? start_code_vc1_frame : start_code_vc1_field;
451             context->bs.buffers[context->bs.num_buffers] = (void *const)start_code;
452             context->bs.sizes[context->bs.num_buffers++] = sizeof(start_code_vc1_frame);
453          }
454          break;
455       case PIPE_VIDEO_FORMAT_MPEG4:
456          if (bufHasStartcode(buf, 0x000001, 24))
457             break;
458 
459          vlVaDecoderFixMPEG4Startcode(context);
460          context->bs.buffers[context->bs.num_buffers] = (void *)context->mpeg4.start_code;
461          context->bs.sizes[context->bs.num_buffers++] = context->mpeg4.start_code_size;
462          break;
463       case PIPE_VIDEO_FORMAT_JPEG:
464          if (bufHasStartcode(buf, 0xffd8ffdb, 32))
465             break;
466 
467          vlVaGetJpegSliceHeader(context);
468          context->bs.buffers[context->bs.num_buffers] = (void *)context->mjpeg.slice_header;
469          context->bs.sizes[context->bs.num_buffers++] = context->mjpeg.slice_header_size;
470          break;
471       case PIPE_VIDEO_FORMAT_VP9:
472          if (false == context->desc.base.protected_playback)
473             vlVaDecoderVP9BitstreamHeader(context, buf);
474          break;
475       case PIPE_VIDEO_FORMAT_AV1:
476          break;
477       default:
478          break;
479       }
480    }
481 
482    context->bs.buffers[context->bs.num_buffers] = buf->data;
483    context->bs.sizes[context->bs.num_buffers++] = buf->size;
484 
485    if (format == PIPE_VIDEO_FORMAT_JPEG) {
486       context->bs.buffers[context->bs.num_buffers] = (void *const)&eoi_jpeg;
487       context->bs.sizes[context->bs.num_buffers++] = sizeof(eoi_jpeg);
488    }
489 
490    if (context->needs_begin_frame) {
491       context->decoder->begin_frame(context->decoder, context->target,
492          &context->desc.base);
493       context->needs_begin_frame = false;
494    }
495    return VA_STATUS_SUCCESS;
496 }
497 
498 static VAStatus
handleVAEncMiscParameterTypeRateControl(vlVaContext * context,VAEncMiscParameterBuffer * misc)499 handleVAEncMiscParameterTypeRateControl(vlVaContext *context, VAEncMiscParameterBuffer *misc)
500 {
501    VAStatus status = VA_STATUS_SUCCESS;
502 
503    switch (u_reduce_video_profile(context->templat.profile)) {
504    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
505       status = vlVaHandleVAEncMiscParameterTypeRateControlH264(context, misc);
506       break;
507 
508    case PIPE_VIDEO_FORMAT_HEVC:
509       status = vlVaHandleVAEncMiscParameterTypeRateControlHEVC(context, misc);
510       break;
511 
512 #if VA_CHECK_VERSION(1, 16, 0)
513    case PIPE_VIDEO_FORMAT_AV1:
514       status = vlVaHandleVAEncMiscParameterTypeRateControlAV1(context, misc);
515       break;
516 #endif
517    default:
518       break;
519    }
520 
521    return status;
522 }
523 
524 static VAStatus
handleVAEncMiscParameterTypeFrameRate(vlVaContext * context,VAEncMiscParameterBuffer * misc)525 handleVAEncMiscParameterTypeFrameRate(vlVaContext *context, VAEncMiscParameterBuffer *misc)
526 {
527    VAStatus status = VA_STATUS_SUCCESS;
528 
529    switch (u_reduce_video_profile(context->templat.profile)) {
530    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
531       status = vlVaHandleVAEncMiscParameterTypeFrameRateH264(context, misc);
532       break;
533 
534    case PIPE_VIDEO_FORMAT_HEVC:
535       status = vlVaHandleVAEncMiscParameterTypeFrameRateHEVC(context, misc);
536       break;
537 
538 #if VA_CHECK_VERSION(1, 16, 0)
539    case PIPE_VIDEO_FORMAT_AV1:
540       status = vlVaHandleVAEncMiscParameterTypeFrameRateAV1(context, misc);
541       break;
542 #endif
543    default:
544       break;
545    }
546 
547    return status;
548 }
549 
550 static VAStatus
handleVAEncMiscParameterTypeTemporalLayer(vlVaContext * context,VAEncMiscParameterBuffer * misc)551 handleVAEncMiscParameterTypeTemporalLayer(vlVaContext *context, VAEncMiscParameterBuffer *misc)
552 {
553    VAStatus status = VA_STATUS_SUCCESS;
554 
555    switch (u_reduce_video_profile(context->templat.profile)) {
556    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
557       status = vlVaHandleVAEncMiscParameterTypeTemporalLayerH264(context, misc);
558       break;
559 
560    case PIPE_VIDEO_FORMAT_HEVC:
561       status = vlVaHandleVAEncMiscParameterTypeTemporalLayerHEVC(context, misc);
562       break;
563 
564    default:
565       break;
566    }
567 
568    return status;
569 }
570 
571 static VAStatus
handleVAEncSequenceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)572 handleVAEncSequenceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
573 {
574    VAStatus status = VA_STATUS_SUCCESS;
575 
576    switch (u_reduce_video_profile(context->templat.profile)) {
577    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
578       status = vlVaHandleVAEncSequenceParameterBufferTypeH264(drv, context, buf);
579       break;
580 
581    case PIPE_VIDEO_FORMAT_HEVC:
582       status = vlVaHandleVAEncSequenceParameterBufferTypeHEVC(drv, context, buf);
583       break;
584 
585 #if VA_CHECK_VERSION(1, 16, 0)
586    case PIPE_VIDEO_FORMAT_AV1:
587       status = vlVaHandleVAEncSequenceParameterBufferTypeAV1(drv, context, buf);
588       break;
589 #endif
590 
591    default:
592       break;
593    }
594 
595    return status;
596 }
597 
598 static VAStatus
handleVAEncMiscParameterTypeQualityLevel(vlVaContext * context,VAEncMiscParameterBuffer * misc)599 handleVAEncMiscParameterTypeQualityLevel(vlVaContext *context, VAEncMiscParameterBuffer *misc)
600 {
601    VAStatus status = VA_STATUS_SUCCESS;
602 
603    switch (u_reduce_video_profile(context->templat.profile)) {
604    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
605       status = vlVaHandleVAEncMiscParameterTypeQualityLevelH264(context, misc);
606       break;
607 
608    case PIPE_VIDEO_FORMAT_HEVC:
609       status = vlVaHandleVAEncMiscParameterTypeQualityLevelHEVC(context, misc);
610       break;
611 
612 #if VA_CHECK_VERSION(1, 16, 0)
613    case PIPE_VIDEO_FORMAT_AV1:
614       status = vlVaHandleVAEncMiscParameterTypeQualityLevelAV1(context, misc);
615       break;
616 #endif
617 
618    default:
619       break;
620    }
621 
622    return status;
623 }
624 
625 static VAStatus
handleVAEncMiscParameterTypeMaxFrameSize(vlVaContext * context,VAEncMiscParameterBuffer * misc)626 handleVAEncMiscParameterTypeMaxFrameSize(vlVaContext *context, VAEncMiscParameterBuffer *misc)
627 {
628    VAStatus status = VA_STATUS_SUCCESS;
629 
630    switch (u_reduce_video_profile(context->templat.profile)) {
631    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
632       status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeH264(context, misc);
633       break;
634 
635    case PIPE_VIDEO_FORMAT_HEVC:
636       status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeHEVC(context, misc);
637       break;
638 
639 #if VA_CHECK_VERSION(1, 16, 0)
640    case PIPE_VIDEO_FORMAT_AV1:
641       status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeAV1(context, misc);
642       break;
643 #endif
644 
645    default:
646       break;
647    }
648 
649    return status;
650 }
651 static VAStatus
handleVAEncMiscParameterTypeHRD(vlVaContext * context,VAEncMiscParameterBuffer * misc)652 handleVAEncMiscParameterTypeHRD(vlVaContext *context, VAEncMiscParameterBuffer *misc)
653 {
654    VAStatus status = VA_STATUS_SUCCESS;
655 
656    switch (u_reduce_video_profile(context->templat.profile)) {
657    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
658       status = vlVaHandleVAEncMiscParameterTypeHRDH264(context, misc);
659       break;
660 
661    case PIPE_VIDEO_FORMAT_HEVC:
662       status = vlVaHandleVAEncMiscParameterTypeHRDHEVC(context, misc);
663       break;
664 
665 #if VA_CHECK_VERSION(1, 16, 0)
666    case PIPE_VIDEO_FORMAT_AV1:
667       status = vlVaHandleVAEncMiscParameterTypeHRDAV1(context, misc);
668       break;
669 #endif
670 
671    default:
672       break;
673    }
674 
675    return status;
676 }
677 
678 static VAStatus
handleVAEncMiscParameterTypeMaxSliceSize(vlVaContext * context,VAEncMiscParameterBuffer * misc)679 handleVAEncMiscParameterTypeMaxSliceSize(vlVaContext *context, VAEncMiscParameterBuffer *misc)
680 {
681    VAStatus status = VA_STATUS_SUCCESS;
682    VAEncMiscParameterMaxSliceSize *max_slice_size_buffer = (VAEncMiscParameterMaxSliceSize *)misc->data;
683    switch (u_reduce_video_profile(context->templat.profile)) {
684       case PIPE_VIDEO_FORMAT_MPEG4_AVC:
685       {
686          context->desc.h264enc.slice_mode = PIPE_VIDEO_SLICE_MODE_MAX_SLICE_SIZE;
687          context->desc.h264enc.max_slice_bytes = max_slice_size_buffer->max_slice_size;
688       } break;
689       case PIPE_VIDEO_FORMAT_HEVC:
690       {
691          context->desc.h265enc.slice_mode = PIPE_VIDEO_SLICE_MODE_MAX_SLICE_SIZE;
692          context->desc.h265enc.max_slice_bytes = max_slice_size_buffer->max_slice_size;
693       } break;
694       default:
695          break;
696    }
697    return status;
698 }
699 
700 static VAStatus
handleVAEncMiscParameterTypeRIR(vlVaContext * context,VAEncMiscParameterBuffer * misc)701 handleVAEncMiscParameterTypeRIR(vlVaContext *context, VAEncMiscParameterBuffer *misc)
702 {
703    VAStatus status = VA_STATUS_SUCCESS;
704    struct pipe_enc_intra_refresh *p_intra_refresh = NULL;
705 
706    switch (u_reduce_video_profile(context->templat.profile)) {
707       case PIPE_VIDEO_FORMAT_MPEG4_AVC:
708          p_intra_refresh = &context->desc.h264enc.intra_refresh;
709          break;
710       case PIPE_VIDEO_FORMAT_HEVC:
711          p_intra_refresh = &context->desc.h265enc.intra_refresh;
712          break;
713 #if VA_CHECK_VERSION(1, 16, 0)
714       case PIPE_VIDEO_FORMAT_AV1:
715          p_intra_refresh = &context->desc.av1enc.intra_refresh;
716          break;
717 #endif
718       default:
719          return status;
720    };
721 
722    VAEncMiscParameterRIR *ir = (VAEncMiscParameterRIR *)misc->data;
723 
724    if (ir->rir_flags.value == VA_ENC_INTRA_REFRESH_ROLLING_ROW)
725       p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_ROWS;
726    else if (ir->rir_flags.value == VA_ENC_INTRA_REFRESH_ROLLING_COLUMN)
727       p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_COLUMNS;
728    else if (ir->rir_flags.value) /* if any other values to use the default one*/
729       p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_COLUMNS;
730    else /* if no mode specified then no intra-refresh */
731       p_intra_refresh->mode = INTRA_REFRESH_MODE_NONE;
732 
733    /* intra refresh should be started with sequence level headers */
734    p_intra_refresh->need_sequence_header = 0;
735    if (p_intra_refresh->mode) {
736       p_intra_refresh->region_size = ir->intra_insert_size;
737       p_intra_refresh->offset = ir->intra_insertion_location;
738       if (p_intra_refresh->offset == 0)
739          p_intra_refresh->need_sequence_header = 1;
740    }
741 
742    return status;
743 }
744 
745 static VAStatus
handleVAEncMiscParameterTypeROI(vlVaContext * context,VAEncMiscParameterBuffer * misc)746 handleVAEncMiscParameterTypeROI(vlVaContext *context, VAEncMiscParameterBuffer *misc)
747 {
748    VAStatus status = VA_STATUS_SUCCESS;
749    struct pipe_enc_roi *proi= NULL;
750    switch (u_reduce_video_profile(context->templat.profile)) {
751       case PIPE_VIDEO_FORMAT_MPEG4_AVC:
752          proi = &context->desc.h264enc.roi;
753          break;
754       case PIPE_VIDEO_FORMAT_HEVC:
755          proi = &context->desc.h265enc.roi;
756          break;
757 #if VA_CHECK_VERSION(1, 16, 0)
758       case PIPE_VIDEO_FORMAT_AV1:
759          proi = &context->desc.av1enc.roi;
760          break;
761 #endif
762       default:
763          break;
764    };
765 
766    if (proi) {
767       VAEncMiscParameterBufferROI *roi = (VAEncMiscParameterBufferROI *)misc->data;
768       /* do not support priority type, and the maximum region is 32  */
769       if ((roi->num_roi > 0 && roi->roi_flags.bits.roi_value_is_qp_delta == 0)
770            || roi->num_roi > PIPE_ENC_ROI_REGION_NUM_MAX)
771          status = VA_STATUS_ERROR_FLAG_NOT_SUPPORTED;
772       else {
773          uint32_t i;
774          VAEncROI *src = roi->roi;
775 
776          proi->num = roi->num_roi;
777          for (i = 0; i < roi->num_roi; i++) {
778             proi->region[i].valid = true;
779             proi->region[i].x = src->roi_rectangle.x;
780             proi->region[i].y = src->roi_rectangle.y;
781             proi->region[i].width = src->roi_rectangle.width;
782             proi->region[i].height = src->roi_rectangle.height;
783             proi->region[i].qp_value = (int32_t)CLAMP(src->roi_value, roi->min_delta_qp, roi->max_delta_qp);
784             src++;
785          }
786 
787          for (; i < PIPE_ENC_ROI_REGION_NUM_MAX; i++)
788             proi->region[i].valid = false;
789       }
790    }
791 
792    return status;
793 }
794 
795 static VAStatus
handleVAEncMiscParameterBufferType(vlVaContext * context,vlVaBuffer * buf)796 handleVAEncMiscParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
797 {
798    VAStatus vaStatus = VA_STATUS_SUCCESS;
799    VAEncMiscParameterBuffer *misc;
800    misc = buf->data;
801 
802    switch (misc->type) {
803    case VAEncMiscParameterTypeRateControl:
804       vaStatus = handleVAEncMiscParameterTypeRateControl(context, misc);
805       break;
806 
807    case VAEncMiscParameterTypeFrameRate:
808       vaStatus = handleVAEncMiscParameterTypeFrameRate(context, misc);
809       break;
810 
811    case VAEncMiscParameterTypeTemporalLayerStructure:
812       vaStatus = handleVAEncMiscParameterTypeTemporalLayer(context, misc);
813       break;
814 
815    case VAEncMiscParameterTypeQualityLevel:
816       vaStatus = handleVAEncMiscParameterTypeQualityLevel(context, misc);
817       break;
818 
819    case VAEncMiscParameterTypeMaxFrameSize:
820       vaStatus = handleVAEncMiscParameterTypeMaxFrameSize(context, misc);
821       break;
822 
823    case VAEncMiscParameterTypeHRD:
824       vaStatus = handleVAEncMiscParameterTypeHRD(context, misc);
825       break;
826 
827    case VAEncMiscParameterTypeRIR:
828       vaStatus = handleVAEncMiscParameterTypeRIR(context, misc);
829       break;
830 
831    case VAEncMiscParameterTypeMaxSliceSize:
832       vaStatus = handleVAEncMiscParameterTypeMaxSliceSize(context, misc);
833       break;
834 
835    case VAEncMiscParameterTypeROI:
836       vaStatus = handleVAEncMiscParameterTypeROI(context, misc);
837       break;
838 
839    default:
840       break;
841    }
842 
843    return vaStatus;
844 }
845 
846 static VAStatus
handleVAEncPictureParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)847 handleVAEncPictureParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
848 {
849    VAStatus status = VA_STATUS_SUCCESS;
850 
851    switch (u_reduce_video_profile(context->templat.profile)) {
852    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
853       status = vlVaHandleVAEncPictureParameterBufferTypeH264(drv, context, buf);
854       break;
855 
856    case PIPE_VIDEO_FORMAT_HEVC:
857       status = vlVaHandleVAEncPictureParameterBufferTypeHEVC(drv, context, buf);
858       break;
859 
860 #if VA_CHECK_VERSION(1, 16, 0)
861    case PIPE_VIDEO_FORMAT_AV1:
862       status = vlVaHandleVAEncPictureParameterBufferTypeAV1(drv, context, buf);
863       break;
864 #endif
865 
866    default:
867       break;
868    }
869 
870    return status;
871 }
872 
873 static VAStatus
handleVAEncSliceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)874 handleVAEncSliceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
875 {
876    VAStatus status = VA_STATUS_SUCCESS;
877 
878    switch (u_reduce_video_profile(context->templat.profile)) {
879    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
880       status = vlVaHandleVAEncSliceParameterBufferTypeH264(drv, context, buf);
881       break;
882 
883    case PIPE_VIDEO_FORMAT_HEVC:
884       status = vlVaHandleVAEncSliceParameterBufferTypeHEVC(drv, context, buf);
885       break;
886 
887 #if VA_CHECK_VERSION(1, 16, 0)
888    case PIPE_VIDEO_FORMAT_AV1:
889       status = vlVaHandleVAEncSliceParameterBufferTypeAV1(drv, context, buf);
890       break;
891 #endif
892 
893    default:
894       break;
895    }
896 
897    return status;
898 }
899 
900 static VAStatus
handleVAEncPackedHeaderParameterBufferType(vlVaContext * context,vlVaBuffer * buf)901 handleVAEncPackedHeaderParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
902 {
903    VAEncPackedHeaderParameterBuffer *param = buf->data;
904 
905    context->packed_header_emulation_bytes = param->has_emulation_bytes;
906    context->packed_header_type = param->type;
907 
908    return VA_STATUS_SUCCESS;
909 }
910 
911 static VAStatus
handleVAEncPackedHeaderDataBufferType(vlVaContext * context,vlVaBuffer * buf)912 handleVAEncPackedHeaderDataBufferType(vlVaContext *context, vlVaBuffer *buf)
913 {
914    VAStatus status = VA_STATUS_SUCCESS;
915 
916    switch (u_reduce_video_profile(context->templat.profile)) {
917    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
918       status = vlVaHandleVAEncPackedHeaderDataBufferTypeH264(context, buf);
919       break;
920 
921    case PIPE_VIDEO_FORMAT_HEVC:
922       status = vlVaHandleVAEncPackedHeaderDataBufferTypeHEVC(context, buf);
923       break;
924 
925 #if VA_CHECK_VERSION(1, 16, 0)
926    case PIPE_VIDEO_FORMAT_AV1:
927       status = vlVaHandleVAEncPackedHeaderDataBufferTypeAV1(context, buf);
928       break;
929 #endif
930 
931    default:
932       break;
933    }
934 
935    return status;
936 }
937 
938 static VAStatus
handleVAStatsStatisticsBufferType(VADriverContextP ctx,vlVaContext * context,vlVaBuffer * buf)939 handleVAStatsStatisticsBufferType(VADriverContextP ctx, vlVaContext *context, vlVaBuffer *buf)
940 {
941    if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
942       return VA_STATUS_ERROR_UNIMPLEMENTED;
943 
944    vlVaDriver *drv;
945    drv = VL_VA_DRIVER(ctx);
946 
947    if (!drv)
948       return VA_STATUS_ERROR_INVALID_CONTEXT;
949 
950    if (!buf->derived_surface.resource)
951       buf->derived_surface.resource = pipe_buffer_create(drv->pipe->screen, PIPE_BIND_VERTEX_BUFFER,
952                                             PIPE_USAGE_STREAM, buf->size);
953 
954    context->target->statistics_data = buf->derived_surface.resource;
955 
956    return VA_STATUS_SUCCESS;
957 }
958 
959 VAStatus
vlVaRenderPicture(VADriverContextP ctx,VAContextID context_id,VABufferID * buffers,int num_buffers)960 vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buffers, int num_buffers)
961 {
962    vlVaDriver *drv;
963    vlVaContext *context;
964    VAStatus vaStatus = VA_STATUS_SUCCESS;
965 
966    unsigned i;
967 
968    if (!ctx)
969       return VA_STATUS_ERROR_INVALID_CONTEXT;
970 
971    drv = VL_VA_DRIVER(ctx);
972    if (!drv)
973       return VA_STATUS_ERROR_INVALID_CONTEXT;
974 
975    mtx_lock(&drv->mutex);
976    context = handle_table_get(drv->htab, context_id);
977    if (!context) {
978       mtx_unlock(&drv->mutex);
979       return VA_STATUS_ERROR_INVALID_CONTEXT;
980    }
981 
982    if (!context->target_id) {
983       mtx_unlock(&drv->mutex);
984       return VA_STATUS_ERROR_OPERATION_FAILED;
985    }
986 
987    /* Always process VAProtectedSliceDataBufferType first because it changes the state */
988    for (i = 0; i < num_buffers; ++i) {
989       vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
990       if (!buf) {
991          mtx_unlock(&drv->mutex);
992          return VA_STATUS_ERROR_INVALID_BUFFER;
993       }
994 
995       if (buf->type == VAProtectedSliceDataBufferType)
996          handleVAProtectedSliceDataBufferType(context, buf);
997    }
998 
999    for (i = 0; i < num_buffers && vaStatus == VA_STATUS_SUCCESS; ++i) {
1000       vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
1001       if (!buf) {
1002          mtx_unlock(&drv->mutex);
1003          return VA_STATUS_ERROR_INVALID_BUFFER;
1004       }
1005 
1006       switch (buf->type) {
1007       case VAPictureParameterBufferType:
1008          vaStatus = handlePictureParameterBuffer(drv, context, buf);
1009          break;
1010 
1011       case VAIQMatrixBufferType:
1012          handleIQMatrixBuffer(context, buf);
1013          break;
1014 
1015       case VASliceParameterBufferType:
1016          handleSliceParameterBuffer(context, buf);
1017          context->have_slice_params = true;
1018          break;
1019 
1020       case VASliceDataBufferType:
1021          vaStatus = handleVASliceDataBufferType(context, buf);
1022          /* Workaround for apps sending single slice data buffer followed
1023           * by multiple slice parameter buffers. */
1024          if (context->have_slice_params)
1025             context->slice_data_offset += buf->size;
1026          break;
1027 
1028       case VAProcPipelineParameterBufferType:
1029          vaStatus = vlVaHandleVAProcPipelineParameterBufferType(drv, context, buf);
1030          break;
1031 
1032       case VAEncSequenceParameterBufferType:
1033          vaStatus = handleVAEncSequenceParameterBufferType(drv, context, buf);
1034          break;
1035 
1036       case VAEncMiscParameterBufferType:
1037          vaStatus = handleVAEncMiscParameterBufferType(context, buf);
1038          break;
1039 
1040       case VAEncPictureParameterBufferType:
1041          vaStatus = handleVAEncPictureParameterBufferType(drv, context, buf);
1042          break;
1043 
1044       case VAEncSliceParameterBufferType:
1045          vaStatus = handleVAEncSliceParameterBufferType(drv, context, buf);
1046          break;
1047 
1048       case VAHuffmanTableBufferType:
1049          vlVaHandleHuffmanTableBufferType(context, buf);
1050          break;
1051 
1052       case VAEncPackedHeaderParameterBufferType:
1053          handleVAEncPackedHeaderParameterBufferType(context, buf);
1054          break;
1055       case VAEncPackedHeaderDataBufferType:
1056          handleVAEncPackedHeaderDataBufferType(context, buf);
1057          break;
1058 
1059       case VAStatsStatisticsBufferType:
1060          handleVAStatsStatisticsBufferType(ctx, context, buf);
1061          break;
1062 
1063       default:
1064          break;
1065       }
1066    }
1067 
1068    if (context->decoder &&
1069        context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM &&
1070        context->bs.num_buffers) {
1071       context->decoder->decode_bitstream(context->decoder, context->target, &context->desc.base,
1072          context->bs.num_buffers, (const void * const*)context->bs.buffers, context->bs.sizes);
1073       context->bs.num_buffers = 0;
1074    }
1075 
1076    mtx_unlock(&drv->mutex);
1077 
1078    return vaStatus;
1079 }
1080 
vlVaQueryApplyFilmGrainAV1(vlVaContext * context,int * output_id,struct pipe_video_buffer *** out_target)1081 static bool vlVaQueryApplyFilmGrainAV1(vlVaContext *context,
1082                                  int *output_id,
1083                                  struct pipe_video_buffer ***out_target)
1084 {
1085    struct pipe_av1_picture_desc *av1 = NULL;
1086 
1087    if (u_reduce_video_profile(context->templat.profile) != PIPE_VIDEO_FORMAT_AV1 ||
1088        context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
1089       return false;
1090 
1091    av1 = &context->desc.av1;
1092    if (!av1->picture_parameter.film_grain_info.film_grain_info_fields.apply_grain)
1093       return false;
1094 
1095    *output_id = av1->picture_parameter.current_frame_id;
1096    *out_target = &av1->film_grain_target;
1097    return true;
1098 }
1099 
vlVaClearRawHeaders(struct util_dynarray * headers)1100 static void vlVaClearRawHeaders(struct util_dynarray *headers)
1101 {
1102    util_dynarray_foreach(headers, struct pipe_enc_raw_header, header)
1103       FREE(header->buffer);
1104    util_dynarray_clear(headers);
1105 }
1106 
1107 VAStatus
vlVaEndPicture(VADriverContextP ctx,VAContextID context_id)1108 vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
1109 {
1110    vlVaDriver *drv;
1111    vlVaContext *context;
1112    vlVaBuffer *coded_buf;
1113    vlVaSurface *surf;
1114    void *feedback = NULL;
1115    struct pipe_screen *screen;
1116    bool apply_av1_fg = false;
1117    struct pipe_video_buffer **out_target;
1118    int output_id;
1119    enum pipe_format target_format;
1120 
1121    if (!ctx)
1122       return VA_STATUS_ERROR_INVALID_CONTEXT;
1123 
1124    drv = VL_VA_DRIVER(ctx);
1125    if (!drv)
1126       return VA_STATUS_ERROR_INVALID_CONTEXT;
1127 
1128    mtx_lock(&drv->mutex);
1129    context = handle_table_get(drv->htab, context_id);
1130    if (!context) {
1131       mtx_unlock(&drv->mutex);
1132       return VA_STATUS_ERROR_INVALID_CONTEXT;
1133    }
1134 
1135    if (!context->target_id) {
1136       mtx_unlock(&drv->mutex);
1137       return VA_STATUS_ERROR_OPERATION_FAILED;
1138    }
1139 
1140    output_id = context->target_id;
1141    context->target_id = 0;
1142 
1143    if (!context->decoder) {
1144       if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN) {
1145          mtx_unlock(&drv->mutex);
1146          return VA_STATUS_ERROR_INVALID_CONTEXT;
1147       }
1148 
1149       /* VPP */
1150       mtx_unlock(&drv->mutex);
1151       return VA_STATUS_SUCCESS;
1152    }
1153 
1154    if (context->needs_begin_frame) {
1155       mtx_unlock(&drv->mutex);
1156       return VA_STATUS_ERROR_OPERATION_FAILED;
1157    }
1158 
1159    out_target = &context->target;
1160    apply_av1_fg = vlVaQueryApplyFilmGrainAV1(context, &output_id, &out_target);
1161 
1162    surf = handle_table_get(drv->htab, output_id);
1163    if (surf && !surf->buffer && context->desc.base.protected_playback)
1164       surf->templat.bind |= PIPE_BIND_PROTECTED;
1165    vlVaGetSurfaceBuffer(drv, surf);
1166    if (!surf || !surf->buffer) {
1167       mtx_unlock(&drv->mutex);
1168       return VA_STATUS_ERROR_INVALID_SURFACE;
1169    }
1170 
1171    if (apply_av1_fg) {
1172       vlVaSetSurfaceContext(drv, surf, context);
1173       *out_target = surf->buffer;
1174    }
1175 
1176    context->mpeg4.frame_num++;
1177 
1178    screen = context->decoder->context->screen;
1179 
1180    if ((bool)(surf->templat.bind & PIPE_BIND_PROTECTED) != context->desc.base.protected_playback) {
1181       mtx_unlock(&drv->mutex);
1182       return VA_STATUS_ERROR_INVALID_SURFACE;
1183    }
1184 
1185    target_format = context->target->buffer_format;
1186 
1187    if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
1188       coded_buf = context->coded_buf;
1189       context->desc.base.fence = &coded_buf->fence;
1190       if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC)
1191          context->desc.h264enc.frame_num_cnt++;
1192 
1193       if (surf->efc_surface) {
1194          assert(surf == drv->last_efc_surface);
1195          context->target = surf->efc_surface->buffer;
1196          context->desc.base.input_format = surf->efc_surface->buffer->buffer_format;
1197          context->desc.base.output_format = surf->buffer->buffer_format;
1198          surf->efc_surface = NULL;
1199          drv->last_efc_surface = NULL;
1200       } else {
1201          context->desc.base.input_format = surf->buffer->buffer_format;
1202          context->desc.base.output_format = surf->buffer->buffer_format;
1203       }
1204       context->desc.base.input_full_range = surf->full_range;
1205       target_format = context->desc.base.output_format;
1206 
1207       if (coded_buf->coded_surf)
1208          coded_buf->coded_surf->coded_buf = NULL;
1209       vlVaGetBufferFeedback(coded_buf);
1210       vlVaSetBufferContext(drv, coded_buf, context);
1211 
1212       int driver_metadata_support = drv->pipe->screen->get_video_param(drv->pipe->screen,
1213                                                                        context->decoder->profile,
1214                                                                        context->decoder->entrypoint,
1215                                                                        PIPE_VIDEO_CAP_ENC_SUPPORTS_FEEDBACK_METADATA);
1216       if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC)
1217          context->desc.h264enc.requested_metadata = driver_metadata_support;
1218       else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
1219          context->desc.h265enc.requested_metadata = driver_metadata_support;
1220       else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_AV1)
1221          context->desc.av1enc.requested_metadata = driver_metadata_support;
1222 
1223       context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
1224       context->decoder->encode_bitstream(context->decoder, context->target,
1225                                          coded_buf->derived_surface.resource, &feedback);
1226       coded_buf->feedback = feedback;
1227       coded_buf->coded_surf = surf;
1228       surf->coded_buf = coded_buf;
1229    } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
1230       context->desc.base.fence = &surf->fence;
1231    } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_PROCESSING) {
1232       context->desc.base.fence = &surf->fence;
1233    }
1234 
1235    if (screen->is_video_target_buffer_supported &&
1236        !screen->is_video_target_buffer_supported(screen,
1237                                                  target_format,
1238                                                  context->target,
1239                                                  context->decoder->profile,
1240                                                  context->decoder->entrypoint)) {
1241       mtx_unlock(&drv->mutex);
1242       return VA_STATUS_ERROR_INVALID_SURFACE;
1243    }
1244 
1245    /* when there are external handles, we can't set PIPE_FLUSH_ASYNC */
1246    if (context->desc.base.fence)
1247       context->desc.base.flush_flags = drv->has_external_handles ? 0 : PIPE_FLUSH_ASYNC;
1248 
1249    if (context->decoder->end_frame(context->decoder, context->target, &context->desc.base) != 0) {
1250       mtx_unlock(&drv->mutex);
1251       return VA_STATUS_ERROR_OPERATION_FAILED;
1252    }
1253 
1254    if (drv->pipe->screen->get_video_param(drv->pipe->screen,
1255                            context->decoder->profile,
1256                            context->decoder->entrypoint,
1257                            PIPE_VIDEO_CAP_REQUIRES_FLUSH_ON_END_FRAME))
1258       context->decoder->flush(context->decoder);
1259 
1260    if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
1261       switch (u_reduce_video_profile(context->templat.profile)) {
1262       case PIPE_VIDEO_FORMAT_AV1:
1263          context->desc.av1enc.frame_num++;
1264          vlVaClearRawHeaders(&context->desc.av1enc.raw_headers);
1265          break;
1266       case PIPE_VIDEO_FORMAT_HEVC:
1267          context->desc.h265enc.frame_num++;
1268          vlVaClearRawHeaders(&context->desc.h265enc.raw_headers);
1269          break;
1270       case PIPE_VIDEO_FORMAT_MPEG4_AVC:
1271          if (!context->desc.h264enc.not_referenced)
1272             context->desc.h264enc.frame_num++;
1273          vlVaClearRawHeaders(&context->desc.h264enc.raw_headers);
1274          break;
1275       default:
1276          break;
1277       }
1278    }
1279 
1280    mtx_unlock(&drv->mutex);
1281    return VA_STATUS_SUCCESS;
1282 }
1283 
1284 void
vlVaAddRawHeader(struct util_dynarray * headers,uint8_t type,uint32_t size,uint8_t * buf,bool is_slice,uint32_t emulation_bytes_start)1285 vlVaAddRawHeader(struct util_dynarray *headers, uint8_t type, uint32_t size,
1286                  uint8_t *buf, bool is_slice, uint32_t emulation_bytes_start)
1287 {
1288    struct pipe_enc_raw_header header = {
1289       .type = type,
1290       .is_slice = is_slice,
1291    };
1292    if (emulation_bytes_start) {
1293       uint32_t pos = emulation_bytes_start, num_zeros = 0;
1294       header.buffer = MALLOC(size * 3 / 2);
1295       memcpy(header.buffer, buf, emulation_bytes_start);
1296       for (uint32_t i = emulation_bytes_start; i < size; i++) {
1297          uint8_t byte = buf[i];
1298          if (num_zeros >= 2 && byte <= 0x03) {
1299             header.buffer[pos++] = 0x03;
1300             num_zeros = 0;
1301          }
1302          header.buffer[pos++] = byte;
1303          num_zeros = byte == 0x00 ? num_zeros + 1 : 0;
1304       }
1305       header.size = pos;
1306    } else {
1307       header.size = size;
1308       header.buffer = MALLOC(header.size);
1309       memcpy(header.buffer, buf, size);
1310    }
1311    util_dynarray_append(headers, struct pipe_enc_raw_header, header);
1312 }
1313