• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**************************************************************************
2  *
3  * Copyright 2010 Thomas Balling Sørensen & Orasanu Lucian.
4  * Copyright 2014 Advanced Micro Devices, Inc.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the
9  * "Software"), to deal in the Software without restriction, including
10  * without limitation the rights to use, copy, modify, merge, publish,
11  * distribute, sub license, and/or sell copies of the Software, and to
12  * permit persons to whom the Software is furnished to do so, subject to
13  * the following conditions:
14  *
15  * The above copyright notice and this permission notice (including the
16  * next paragraph) shall be included in all copies or substantial portions
17  * of the Software.
18  *
19  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22  * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR
23  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26  *
27  **************************************************************************/
28 
29 #include "pipe/p_video_codec.h"
30 
31 #include "util/u_handle_table.h"
32 #include "util/u_video.h"
33 #include "util/u_memory.h"
34 #include "util/set.h"
35 
36 #include "util/vl_vlc.h"
37 #include "vl/vl_winsys.h"
38 
39 #include "va_private.h"
40 
41 static void
vlVaSetSurfaceContext(vlVaDriver * drv,vlVaSurface * surf,vlVaContext * context)42 vlVaSetSurfaceContext(vlVaDriver *drv, vlVaSurface *surf, vlVaContext *context)
43 {
44    if (surf->ctx == context)
45       return;
46 
47    if (surf->ctx) {
48       assert(_mesa_set_search(surf->ctx->surfaces, surf));
49       _mesa_set_remove_key(surf->ctx->surfaces, surf);
50 
51       /* Only drivers supporting PIPE_VIDEO_ENTRYPOINT_PROCESSING will create
52        * decoder for postproc context and thus be able to wait on and destroy
53        * the surface fence. On other drivers we need to destroy the fence here
54        * otherwise vaQuerySurfaceStatus/vaSyncSurface will fail and we'll also
55        * potentially leak the fence.
56        */
57       if (surf->fence && !context->decoder &&
58           context->templat.entrypoint == PIPE_VIDEO_ENTRYPOINT_PROCESSING &&
59           surf->ctx->decoder && surf->ctx->decoder->destroy_fence &&
60           !drv->pipe->screen->get_video_param(drv->pipe->screen,
61                                               PIPE_VIDEO_PROFILE_UNKNOWN,
62                                               PIPE_VIDEO_ENTRYPOINT_PROCESSING,
63                                               PIPE_VIDEO_CAP_SUPPORTED)) {
64          surf->ctx->decoder->destroy_fence(surf->ctx->decoder, surf->fence);
65          surf->fence = NULL;
66       }
67    }
68 
69    surf->ctx = context;
70    _mesa_set_add(surf->ctx->surfaces, surf);
71 }
72 
73 VAStatus
vlVaBeginPicture(VADriverContextP ctx,VAContextID context_id,VASurfaceID render_target)74 vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID render_target)
75 {
76    vlVaDriver *drv;
77    vlVaContext *context;
78    vlVaSurface *surf;
79 
80    if (!ctx)
81       return VA_STATUS_ERROR_INVALID_CONTEXT;
82 
83    drv = VL_VA_DRIVER(ctx);
84    if (!drv)
85       return VA_STATUS_ERROR_INVALID_CONTEXT;
86 
87    mtx_lock(&drv->mutex);
88    context = handle_table_get(drv->htab, context_id);
89    if (!context) {
90       mtx_unlock(&drv->mutex);
91       return VA_STATUS_ERROR_INVALID_CONTEXT;
92    }
93 
94    if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG12) {
95       context->desc.mpeg12.intra_matrix = NULL;
96       context->desc.mpeg12.non_intra_matrix = NULL;
97    }
98 
99    surf = handle_table_get(drv->htab, render_target);
100    if (!surf || !surf->buffer) {
101       mtx_unlock(&drv->mutex);
102       return VA_STATUS_ERROR_INVALID_SURFACE;
103    }
104 
105    context->target_id = render_target;
106    vlVaSetSurfaceContext(drv, surf, context);
107    context->target = surf->buffer;
108    context->mjpeg.sampling_factor = 0;
109 
110    if (!context->decoder) {
111 
112       /* VPP */
113       if (context->templat.profile == PIPE_VIDEO_PROFILE_UNKNOWN &&
114           context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&
115           context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM &&
116           context->target->buffer_format != PIPE_FORMAT_B8G8R8X8_UNORM &&
117           context->target->buffer_format != PIPE_FORMAT_R8G8B8X8_UNORM &&
118           context->target->buffer_format != PIPE_FORMAT_NV12 &&
119           context->target->buffer_format != PIPE_FORMAT_P010 &&
120           context->target->buffer_format != PIPE_FORMAT_P016) {
121          mtx_unlock(&drv->mutex);
122          return VA_STATUS_ERROR_UNIMPLEMENTED;
123       }
124 
125       if (drv->pipe->screen->get_video_param(drv->pipe->screen,
126                               PIPE_VIDEO_PROFILE_UNKNOWN,
127                               PIPE_VIDEO_ENTRYPOINT_PROCESSING,
128                               PIPE_VIDEO_CAP_SUPPORTED)) {
129          context->needs_begin_frame = true;
130       }
131 
132       mtx_unlock(&drv->mutex);
133       return VA_STATUS_SUCCESS;
134    }
135 
136    if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
137       context->needs_begin_frame = true;
138 
139    mtx_unlock(&drv->mutex);
140    return VA_STATUS_SUCCESS;
141 }
142 
143 void
vlVaGetReferenceFrame(vlVaDriver * drv,VASurfaceID surface_id,struct pipe_video_buffer ** ref_frame)144 vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
145                       struct pipe_video_buffer **ref_frame)
146 {
147    vlVaSurface *surf = handle_table_get(drv->htab, surface_id);
148    if (surf)
149       *ref_frame = surf->buffer;
150    else
151       *ref_frame = NULL;
152 }
153 /*
154  * in->quality = 0; without any settings, it is using speed preset
155  *                  and no preencode and no vbaq. It is the fastest setting.
156  * in->quality = 1; suggested setting, with balanced preset, and
157  *                  preencode and vbaq
158  * in->quality = others; it is the customized setting
159  *                  with valid bit (bit #0) set to "1"
160  *                  for example:
161  *
162  *                  0x3  (balance preset, no pre-encoding, no vbaq)
163  *                  0x13 (balanced preset, no pre-encoding, vbaq)
164  *                  0x13 (balanced preset, no pre-encoding, vbaq)
165  *                  0x9  (speed preset, pre-encoding, no vbaq)
166  *                  0x19 (speed preset, pre-encoding, vbaq)
167  *
168  *                  The quality value has to be treated as a combination
169  *                  of preset mode, pre-encoding and vbaq settings.
170  *                  The quality and speed could be vary according to
171  *                  different settings,
172  */
173 void
vlVaHandleVAEncMiscParameterTypeQualityLevel(struct pipe_enc_quality_modes * p,vlVaQualityBits * in)174 vlVaHandleVAEncMiscParameterTypeQualityLevel(struct pipe_enc_quality_modes *p, vlVaQualityBits *in)
175 {
176    if (!in->quality) {
177       p->level = 0;
178       p->preset_mode = PRESET_MODE_SPEED;
179       p->pre_encode_mode = PREENCODING_MODE_DISABLE;
180       p->vbaq_mode = VBAQ_DISABLE;
181 
182       return;
183    }
184 
185    if (p->level != in->quality) {
186       if (in->quality == 1) {
187          p->preset_mode = PRESET_MODE_BALANCE;
188          p->pre_encode_mode = PREENCODING_MODE_DEFAULT;
189          p->vbaq_mode = VBAQ_AUTO;
190       } else {
191          p->preset_mode = in->preset_mode > PRESET_MODE_HIGH_QUALITY
192             ? PRESET_MODE_HIGH_QUALITY : in->preset_mode;
193          p->pre_encode_mode = in->pre_encode_mode;
194          p->vbaq_mode = in->vbaq_mode;
195       }
196    }
197    p->level = in->quality;
198 }
199 
200 static VAStatus
handlePictureParameterBuffer(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)201 handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
202 {
203    VAStatus vaStatus = VA_STATUS_SUCCESS;
204    enum pipe_video_format format =
205       u_reduce_video_profile(context->templat.profile);
206 
207    switch (format) {
208    case PIPE_VIDEO_FORMAT_MPEG12:
209       vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);
210       break;
211 
212    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
213       vlVaHandlePictureParameterBufferH264(drv, context, buf);
214       break;
215 
216    case PIPE_VIDEO_FORMAT_VC1:
217       vlVaHandlePictureParameterBufferVC1(drv, context, buf);
218       break;
219 
220    case PIPE_VIDEO_FORMAT_MPEG4:
221       vlVaHandlePictureParameterBufferMPEG4(drv, context, buf);
222       break;
223 
224    case PIPE_VIDEO_FORMAT_HEVC:
225       vlVaHandlePictureParameterBufferHEVC(drv, context, buf);
226       break;
227 
228    case PIPE_VIDEO_FORMAT_JPEG:
229       vlVaHandlePictureParameterBufferMJPEG(drv, context, buf);
230       break;
231 
232    case PIPE_VIDEO_FORMAT_VP9:
233       vlVaHandlePictureParameterBufferVP9(drv, context, buf);
234       break;
235 
236    case PIPE_VIDEO_FORMAT_AV1:
237       vlVaHandlePictureParameterBufferAV1(drv, context, buf);
238       break;
239 
240    default:
241       break;
242    }
243 
244    /* Create the decoder once max_references is known. */
245    if (!context->decoder) {
246       if (!context->target)
247          return VA_STATUS_ERROR_INVALID_CONTEXT;
248 
249       if (format == PIPE_VIDEO_FORMAT_MPEG4_AVC)
250          context->templat.level = u_get_h264_level(context->templat.width,
251             context->templat.height, &context->templat.max_references);
252 
253       context->decoder = drv->pipe->create_video_codec(drv->pipe,
254          &context->templat);
255 
256       if (!context->decoder)
257          return VA_STATUS_ERROR_ALLOCATION_FAILED;
258 
259       context->needs_begin_frame = true;
260    }
261 
262    if (format == PIPE_VIDEO_FORMAT_VP9) {
263       context->decoder->width =
264          context->desc.vp9.picture_parameter.frame_width;
265       context->decoder->height =
266          context->desc.vp9.picture_parameter.frame_height;
267    }
268 
269    return vaStatus;
270 }
271 
272 static void
handleIQMatrixBuffer(vlVaContext * context,vlVaBuffer * buf)273 handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
274 {
275    switch (u_reduce_video_profile(context->templat.profile)) {
276    case PIPE_VIDEO_FORMAT_MPEG12:
277       vlVaHandleIQMatrixBufferMPEG12(context, buf);
278       break;
279 
280    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
281       vlVaHandleIQMatrixBufferH264(context, buf);
282       break;
283 
284    case PIPE_VIDEO_FORMAT_MPEG4:
285       vlVaHandleIQMatrixBufferMPEG4(context, buf);
286       break;
287 
288    case PIPE_VIDEO_FORMAT_HEVC:
289       vlVaHandleIQMatrixBufferHEVC(context, buf);
290       break;
291 
292    case PIPE_VIDEO_FORMAT_JPEG:
293       vlVaHandleIQMatrixBufferMJPEG(context, buf);
294       break;
295 
296    default:
297       break;
298    }
299 }
300 
301 static void
handleSliceParameterBuffer(vlVaContext * context,vlVaBuffer * buf,unsigned num_slices)302 handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf, unsigned num_slices)
303 {
304    switch (u_reduce_video_profile(context->templat.profile)) {
305    case PIPE_VIDEO_FORMAT_MPEG12:
306       vlVaHandleSliceParameterBufferMPEG12(context, buf);
307       break;
308 
309    case PIPE_VIDEO_FORMAT_VC1:
310       vlVaHandleSliceParameterBufferVC1(context, buf);
311       break;
312 
313    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
314       vlVaHandleSliceParameterBufferH264(context, buf);
315       break;
316 
317    case PIPE_VIDEO_FORMAT_MPEG4:
318       vlVaHandleSliceParameterBufferMPEG4(context, buf);
319       break;
320 
321    case PIPE_VIDEO_FORMAT_HEVC:
322       vlVaHandleSliceParameterBufferHEVC(context, buf);
323       break;
324 
325    case PIPE_VIDEO_FORMAT_JPEG:
326       vlVaHandleSliceParameterBufferMJPEG(context, buf);
327       break;
328 
329    case PIPE_VIDEO_FORMAT_VP9:
330       vlVaHandleSliceParameterBufferVP9(context, buf);
331       break;
332 
333    case PIPE_VIDEO_FORMAT_AV1:
334       vlVaHandleSliceParameterBufferAV1(context, buf, num_slices);
335       break;
336 
337    default:
338       break;
339    }
340 }
341 
342 static unsigned int
bufHasStartcode(vlVaBuffer * buf,unsigned int code,unsigned int bits)343 bufHasStartcode(vlVaBuffer *buf, unsigned int code, unsigned int bits)
344 {
345    struct vl_vlc vlc = {0};
346    int i;
347 
348    /* search the first 64 bytes for a startcode */
349    vl_vlc_init(&vlc, 1, (const void * const*)&buf->data, &buf->size);
350    for (i = 0; i < 64 && vl_vlc_bits_left(&vlc) >= bits; ++i) {
351       if (vl_vlc_peekbits(&vlc, bits) == code)
352          return 1;
353       vl_vlc_eatbits(&vlc, 8);
354       vl_vlc_fillbits(&vlc);
355    }
356 
357    return 0;
358 }
359 
360 static void
handleVAProtectedSliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)361 handleVAProtectedSliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
362 {
363 	uint8_t* encrypted_data = (uint8_t*) buf->data;
364         uint8_t* drm_key;
365 
366 	unsigned int drm_key_size = buf->size;
367 
368         drm_key = REALLOC(context->desc.base.decrypt_key,
369                           context->desc.base.key_size, drm_key_size);
370         if (!drm_key)
371             return;
372         context->desc.base.decrypt_key = drm_key;
373 	memcpy(context->desc.base.decrypt_key, encrypted_data, drm_key_size);
374 	context->desc.base.key_size = drm_key_size;
375 	context->desc.base.protected_playback = true;
376 }
377 
378 static VAStatus
handleVASliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)379 handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
380 {
381    enum pipe_video_format format = u_reduce_video_profile(context->templat.profile);
382    unsigned num_buffers = 0;
383    void * const *buffers[3];
384    unsigned sizes[3];
385    static const uint8_t start_code_h264[] = { 0x00, 0x00, 0x01 };
386    static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };
387    static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d };
388    static const uint8_t eoi_jpeg[] = { 0xff, 0xd9 };
389 
390    if (!context->decoder)
391       return VA_STATUS_ERROR_INVALID_CONTEXT;
392 
393    format = u_reduce_video_profile(context->templat.profile);
394    if (!context->desc.base.protected_playback) {
395       switch (format) {
396       case PIPE_VIDEO_FORMAT_MPEG4_AVC:
397          if (bufHasStartcode(buf, 0x000001, 24))
398             break;
399 
400          buffers[num_buffers] = (void *const)&start_code_h264;
401          sizes[num_buffers++] = sizeof(start_code_h264);
402          break;
403       case PIPE_VIDEO_FORMAT_HEVC:
404          if (bufHasStartcode(buf, 0x000001, 24))
405             break;
406 
407          buffers[num_buffers] = (void *const)&start_code_h265;
408          sizes[num_buffers++] = sizeof(start_code_h265);
409          break;
410       case PIPE_VIDEO_FORMAT_VC1:
411          if (bufHasStartcode(buf, 0x0000010d, 32) ||
412              bufHasStartcode(buf, 0x0000010c, 32) ||
413              bufHasStartcode(buf, 0x0000010b, 32))
414             break;
415 
416          if (context->decoder->profile == PIPE_VIDEO_PROFILE_VC1_ADVANCED) {
417             buffers[num_buffers] = (void *const)&start_code_vc1;
418             sizes[num_buffers++] = sizeof(start_code_vc1);
419          }
420          break;
421       case PIPE_VIDEO_FORMAT_MPEG4:
422          if (bufHasStartcode(buf, 0x000001, 24))
423             break;
424 
425          vlVaDecoderFixMPEG4Startcode(context);
426          buffers[num_buffers] = (void *)context->mpeg4.start_code;
427          sizes[num_buffers++] = context->mpeg4.start_code_size;
428          break;
429       case PIPE_VIDEO_FORMAT_JPEG:
430          if (bufHasStartcode(buf, 0xffd8ffdb, 32))
431             break;
432 
433          vlVaGetJpegSliceHeader(context);
434          buffers[num_buffers] = (void *)context->mjpeg.slice_header;
435          sizes[num_buffers++] = context->mjpeg.slice_header_size;
436          break;
437       case PIPE_VIDEO_FORMAT_VP9:
438          if (false == context->desc.base.protected_playback)
439             vlVaDecoderVP9BitstreamHeader(context, buf);
440          break;
441       case PIPE_VIDEO_FORMAT_AV1:
442          break;
443       default:
444          break;
445       }
446    }
447 
448    buffers[num_buffers] = buf->data;
449    sizes[num_buffers] = buf->size;
450    ++num_buffers;
451 
452    if (format == PIPE_VIDEO_FORMAT_JPEG) {
453       buffers[num_buffers] = (void *const)&eoi_jpeg;
454       sizes[num_buffers++] = sizeof(eoi_jpeg);
455    }
456 
457    if (context->needs_begin_frame) {
458       context->decoder->begin_frame(context->decoder, context->target,
459          &context->desc.base);
460       context->needs_begin_frame = false;
461    }
462    context->decoder->decode_bitstream(context->decoder, context->target, &context->desc.base,
463       num_buffers, (const void * const*)buffers, sizes);
464    return VA_STATUS_SUCCESS;
465 }
466 
467 static VAStatus
handleVAEncMiscParameterTypeRateControl(vlVaContext * context,VAEncMiscParameterBuffer * misc)468 handleVAEncMiscParameterTypeRateControl(vlVaContext *context, VAEncMiscParameterBuffer *misc)
469 {
470    VAStatus status = VA_STATUS_SUCCESS;
471 
472    switch (u_reduce_video_profile(context->templat.profile)) {
473    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
474       status = vlVaHandleVAEncMiscParameterTypeRateControlH264(context, misc);
475       break;
476 
477    case PIPE_VIDEO_FORMAT_HEVC:
478       status = vlVaHandleVAEncMiscParameterTypeRateControlHEVC(context, misc);
479       break;
480 
481 #if VA_CHECK_VERSION(1, 16, 0)
482    case PIPE_VIDEO_FORMAT_AV1:
483       status = vlVaHandleVAEncMiscParameterTypeRateControlAV1(context, misc);
484       break;
485 #endif
486    default:
487       break;
488    }
489 
490    return status;
491 }
492 
493 static VAStatus
handleVAEncMiscParameterTypeFrameRate(vlVaContext * context,VAEncMiscParameterBuffer * misc)494 handleVAEncMiscParameterTypeFrameRate(vlVaContext *context, VAEncMiscParameterBuffer *misc)
495 {
496    VAStatus status = VA_STATUS_SUCCESS;
497 
498    switch (u_reduce_video_profile(context->templat.profile)) {
499    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
500       status = vlVaHandleVAEncMiscParameterTypeFrameRateH264(context, misc);
501       break;
502 
503    case PIPE_VIDEO_FORMAT_HEVC:
504       status = vlVaHandleVAEncMiscParameterTypeFrameRateHEVC(context, misc);
505       break;
506 
507 #if VA_CHECK_VERSION(1, 16, 0)
508    case PIPE_VIDEO_FORMAT_AV1:
509       status = vlVaHandleVAEncMiscParameterTypeFrameRateAV1(context, misc);
510       break;
511 #endif
512    default:
513       break;
514    }
515 
516    return status;
517 }
518 
519 static VAStatus
handleVAEncMiscParameterTypeTemporalLayer(vlVaContext * context,VAEncMiscParameterBuffer * misc)520 handleVAEncMiscParameterTypeTemporalLayer(vlVaContext *context, VAEncMiscParameterBuffer *misc)
521 {
522    VAStatus status = VA_STATUS_SUCCESS;
523 
524    switch (u_reduce_video_profile(context->templat.profile)) {
525    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
526       status = vlVaHandleVAEncMiscParameterTypeTemporalLayerH264(context, misc);
527       break;
528 
529    case PIPE_VIDEO_FORMAT_HEVC:
530       break;
531 
532    default:
533       break;
534    }
535 
536    return status;
537 }
538 
539 static VAStatus
handleVAEncSequenceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)540 handleVAEncSequenceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
541 {
542    VAStatus status = VA_STATUS_SUCCESS;
543 
544    switch (u_reduce_video_profile(context->templat.profile)) {
545    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
546       status = vlVaHandleVAEncSequenceParameterBufferTypeH264(drv, context, buf);
547       break;
548 
549    case PIPE_VIDEO_FORMAT_HEVC:
550       status = vlVaHandleVAEncSequenceParameterBufferTypeHEVC(drv, context, buf);
551       break;
552 
553 #if VA_CHECK_VERSION(1, 16, 0)
554    case PIPE_VIDEO_FORMAT_AV1:
555       status = vlVaHandleVAEncSequenceParameterBufferTypeAV1(drv, context, buf);
556       break;
557 #endif
558 
559    default:
560       break;
561    }
562 
563    return status;
564 }
565 
566 static VAStatus
handleVAEncMiscParameterTypeQualityLevel(vlVaContext * context,VAEncMiscParameterBuffer * misc)567 handleVAEncMiscParameterTypeQualityLevel(vlVaContext *context, VAEncMiscParameterBuffer *misc)
568 {
569    VAStatus status = VA_STATUS_SUCCESS;
570 
571    switch (u_reduce_video_profile(context->templat.profile)) {
572    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
573       status = vlVaHandleVAEncMiscParameterTypeQualityLevelH264(context, misc);
574       break;
575 
576    case PIPE_VIDEO_FORMAT_HEVC:
577       status = vlVaHandleVAEncMiscParameterTypeQualityLevelHEVC(context, misc);
578       break;
579 
580 #if VA_CHECK_VERSION(1, 16, 0)
581    case PIPE_VIDEO_FORMAT_AV1:
582       status = vlVaHandleVAEncMiscParameterTypeQualityLevelAV1(context, misc);
583       break;
584 #endif
585 
586    default:
587       break;
588    }
589 
590    return status;
591 }
592 
593 static VAStatus
handleVAEncMiscParameterTypeMaxFrameSize(vlVaContext * context,VAEncMiscParameterBuffer * misc)594 handleVAEncMiscParameterTypeMaxFrameSize(vlVaContext *context, VAEncMiscParameterBuffer *misc)
595 {
596    VAStatus status = VA_STATUS_SUCCESS;
597 
598    switch (u_reduce_video_profile(context->templat.profile)) {
599    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
600       status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeH264(context, misc);
601       break;
602 
603    case PIPE_VIDEO_FORMAT_HEVC:
604       status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeHEVC(context, misc);
605       break;
606 
607 #if VA_CHECK_VERSION(1, 16, 0)
608    case PIPE_VIDEO_FORMAT_AV1:
609       status = vlVaHandleVAEncMiscParameterTypeMaxFrameSizeAV1(context, misc);
610       break;
611 #endif
612 
613    default:
614       break;
615    }
616 
617    return status;
618 }
619 static VAStatus
handleVAEncMiscParameterTypeHRD(vlVaContext * context,VAEncMiscParameterBuffer * misc)620 handleVAEncMiscParameterTypeHRD(vlVaContext *context, VAEncMiscParameterBuffer *misc)
621 {
622    VAStatus status = VA_STATUS_SUCCESS;
623 
624    switch (u_reduce_video_profile(context->templat.profile)) {
625    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
626       status = vlVaHandleVAEncMiscParameterTypeHRDH264(context, misc);
627       break;
628 
629    case PIPE_VIDEO_FORMAT_HEVC:
630       status = vlVaHandleVAEncMiscParameterTypeHRDHEVC(context, misc);
631       break;
632 
633 #if VA_CHECK_VERSION(1, 16, 0)
634    case PIPE_VIDEO_FORMAT_AV1:
635       status = vlVaHandleVAEncMiscParameterTypeHRDAV1(context, misc);
636       break;
637 #endif
638 
639    default:
640       break;
641    }
642 
643    return status;
644 }
645 
646 static VAStatus
handleVAEncMiscParameterTypeMaxSliceSize(vlVaContext * context,VAEncMiscParameterBuffer * misc)647 handleVAEncMiscParameterTypeMaxSliceSize(vlVaContext *context, VAEncMiscParameterBuffer *misc)
648 {
649    VAStatus status = VA_STATUS_SUCCESS;
650    VAEncMiscParameterMaxSliceSize *max_slice_size_buffer = (VAEncMiscParameterMaxSliceSize *)misc->data;
651    switch (u_reduce_video_profile(context->templat.profile)) {
652       case PIPE_VIDEO_FORMAT_MPEG4_AVC:
653       {
654          context->desc.h264enc.slice_mode = PIPE_VIDEO_SLICE_MODE_MAX_SLICE_SIZE;
655          context->desc.h264enc.max_slice_bytes = max_slice_size_buffer->max_slice_size;
656       } break;
657       case PIPE_VIDEO_FORMAT_HEVC:
658       {
659          context->desc.h265enc.slice_mode = PIPE_VIDEO_SLICE_MODE_MAX_SLICE_SIZE;
660          context->desc.h265enc.max_slice_bytes = max_slice_size_buffer->max_slice_size;
661       } break;
662       default:
663          break;
664    }
665    return status;
666 }
667 
668 static VAStatus
handleVAEncMiscParameterTypeRIR(vlVaContext * context,VAEncMiscParameterBuffer * misc)669 handleVAEncMiscParameterTypeRIR(vlVaContext *context, VAEncMiscParameterBuffer *misc)
670 {
671    VAStatus status = VA_STATUS_SUCCESS;
672    struct pipe_enc_intra_refresh *p_intra_refresh = NULL;
673 
674    switch (u_reduce_video_profile(context->templat.profile)) {
675       case PIPE_VIDEO_FORMAT_MPEG4_AVC:
676          p_intra_refresh = &context->desc.h264enc.intra_refresh;
677          break;
678       case PIPE_VIDEO_FORMAT_HEVC:
679          p_intra_refresh = &context->desc.h265enc.intra_refresh;
680          break;
681 #if VA_CHECK_VERSION(1, 16, 0)
682       case PIPE_VIDEO_FORMAT_AV1:
683          p_intra_refresh = &context->desc.av1enc.intra_refresh;
684          break;
685 #endif
686       default:
687          p_intra_refresh = NULL;
688          break;
689    };
690 
691    if (p_intra_refresh) {
692       VAEncMiscParameterRIR *ir = (VAEncMiscParameterRIR *)misc->data;
693 
694       if (ir->rir_flags.value == VA_ENC_INTRA_REFRESH_ROLLING_ROW)
695          p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_ROWS;
696       else if (ir->rir_flags.value == VA_ENC_INTRA_REFRESH_ROLLING_COLUMN)
697          p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_COLUMNS;
698       else if (ir->rir_flags.value) /* if any other values to use the default one*/
699          p_intra_refresh->mode = INTRA_REFRESH_MODE_UNIT_COLUMNS;
700       else /* if no mode specified then no intra-refresh */
701          p_intra_refresh->mode = INTRA_REFRESH_MODE_NONE;
702 
703       /* intra refresh should be started with sequence level headers */
704       p_intra_refresh->need_sequence_header = 0;
705       if (p_intra_refresh->mode) {
706          p_intra_refresh->region_size = ir->intra_insert_size;
707          p_intra_refresh->offset = ir->intra_insertion_location;
708          if (p_intra_refresh->offset == 0)
709             p_intra_refresh->need_sequence_header = 1;
710       }
711    } else {
712       p_intra_refresh->mode = INTRA_REFRESH_MODE_NONE;
713       p_intra_refresh->region_size = 0;
714       p_intra_refresh->offset = 0;
715       p_intra_refresh->need_sequence_header = 0;
716    }
717 
718    return status;
719 }
720 
721 static VAStatus
handleVAEncMiscParameterTypeROI(vlVaContext * context,VAEncMiscParameterBuffer * misc)722 handleVAEncMiscParameterTypeROI(vlVaContext *context, VAEncMiscParameterBuffer *misc)
723 {
724    VAStatus status = VA_STATUS_SUCCESS;
725    struct pipe_enc_roi *proi= NULL;
726    switch (u_reduce_video_profile(context->templat.profile)) {
727       case PIPE_VIDEO_FORMAT_MPEG4_AVC:
728          proi = &context->desc.h264enc.roi;
729          break;
730       case PIPE_VIDEO_FORMAT_HEVC:
731          proi = &context->desc.h265enc.roi;
732          break;
733 #if VA_CHECK_VERSION(1, 16, 0)
734       case PIPE_VIDEO_FORMAT_AV1:
735          proi = &context->desc.av1enc.roi;
736          break;
737 #endif
738       default:
739          break;
740    };
741 
742    if (proi) {
743       VAEncMiscParameterBufferROI *roi = (VAEncMiscParameterBufferROI *)misc->data;
744       /* do not support priority type, and the maximum region is 32  */
745       if ((roi->num_roi > 0 && roi->roi_flags.bits.roi_value_is_qp_delta == 0)
746            || roi->num_roi > PIPE_ENC_ROI_REGION_NUM_MAX)
747          status = VA_STATUS_ERROR_FLAG_NOT_SUPPORTED;
748       else {
749          uint32_t i;
750          VAEncROI *src = roi->roi;
751 
752          proi->num = roi->num_roi;
753          for (i = 0; i < roi->num_roi; i++) {
754             proi->region[i].valid = true;
755             proi->region[i].x = src->roi_rectangle.x;
756             proi->region[i].y = src->roi_rectangle.y;
757             proi->region[i].width = src->roi_rectangle.width;
758             proi->region[i].height = src->roi_rectangle.height;
759             proi->region[i].qp_value = (int32_t)CLAMP(src->roi_value, roi->min_delta_qp, roi->max_delta_qp);
760             src++;
761          }
762 
763          for (; i < PIPE_ENC_ROI_REGION_NUM_MAX; i++)
764             proi->region[i].valid = false;
765       }
766    }
767 
768    return status;
769 }
770 
771 static VAStatus
handleVAEncMiscParameterBufferType(vlVaContext * context,vlVaBuffer * buf)772 handleVAEncMiscParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
773 {
774    VAStatus vaStatus = VA_STATUS_SUCCESS;
775    VAEncMiscParameterBuffer *misc;
776    misc = buf->data;
777 
778    switch (misc->type) {
779    case VAEncMiscParameterTypeRateControl:
780       vaStatus = handleVAEncMiscParameterTypeRateControl(context, misc);
781       break;
782 
783    case VAEncMiscParameterTypeFrameRate:
784       vaStatus = handleVAEncMiscParameterTypeFrameRate(context, misc);
785       break;
786 
787    case VAEncMiscParameterTypeTemporalLayerStructure:
788       vaStatus = handleVAEncMiscParameterTypeTemporalLayer(context, misc);
789       break;
790 
791    case VAEncMiscParameterTypeQualityLevel:
792       vaStatus = handleVAEncMiscParameterTypeQualityLevel(context, misc);
793       break;
794 
795    case VAEncMiscParameterTypeMaxFrameSize:
796       vaStatus = handleVAEncMiscParameterTypeMaxFrameSize(context, misc);
797       break;
798 
799    case VAEncMiscParameterTypeHRD:
800       vaStatus = handleVAEncMiscParameterTypeHRD(context, misc);
801       break;
802 
803    case VAEncMiscParameterTypeRIR:
804       vaStatus = handleVAEncMiscParameterTypeRIR(context, misc);
805       break;
806 
807    case VAEncMiscParameterTypeMaxSliceSize:
808       vaStatus = handleVAEncMiscParameterTypeMaxSliceSize(context, misc);
809       break;
810 
811    case VAEncMiscParameterTypeROI:
812       vaStatus = handleVAEncMiscParameterTypeROI(context, misc);
813       break;
814 
815    default:
816       break;
817    }
818 
819    return vaStatus;
820 }
821 
822 static VAStatus
handleVAEncPictureParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)823 handleVAEncPictureParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
824 {
825    VAStatus status = VA_STATUS_SUCCESS;
826 
827    switch (u_reduce_video_profile(context->templat.profile)) {
828    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
829       status = vlVaHandleVAEncPictureParameterBufferTypeH264(drv, context, buf);
830       break;
831 
832    case PIPE_VIDEO_FORMAT_HEVC:
833       status = vlVaHandleVAEncPictureParameterBufferTypeHEVC(drv, context, buf);
834       break;
835 
836 #if VA_CHECK_VERSION(1, 16, 0)
837    case PIPE_VIDEO_FORMAT_AV1:
838       status = vlVaHandleVAEncPictureParameterBufferTypeAV1(drv, context, buf);
839       break;
840 #endif
841 
842    default:
843       break;
844    }
845 
846    return status;
847 }
848 
849 static VAStatus
handleVAEncSliceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)850 handleVAEncSliceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
851 {
852    VAStatus status = VA_STATUS_SUCCESS;
853 
854    switch (u_reduce_video_profile(context->templat.profile)) {
855    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
856       status = vlVaHandleVAEncSliceParameterBufferTypeH264(drv, context, buf);
857       break;
858 
859    case PIPE_VIDEO_FORMAT_HEVC:
860       status = vlVaHandleVAEncSliceParameterBufferTypeHEVC(drv, context, buf);
861       break;
862 
863 #if VA_CHECK_VERSION(1, 16, 0)
864    case PIPE_VIDEO_FORMAT_AV1:
865       status = vlVaHandleVAEncSliceParameterBufferTypeAV1(drv, context, buf);
866       break;
867 #endif
868 
869    default:
870       break;
871    }
872 
873    return status;
874 }
875 
876 static VAStatus
handleVAEncPackedHeaderParameterBufferType(vlVaContext * context,vlVaBuffer * buf)877 handleVAEncPackedHeaderParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
878 {
879    VAStatus status = VA_STATUS_SUCCESS;
880    VAEncPackedHeaderParameterBuffer *param = buf->data;
881 
882    context->packed_header_emulation_bytes = param->has_emulation_bytes;
883 
884    switch (u_reduce_video_profile(context->templat.profile)) {
885    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
886       if (param->type == VAEncPackedHeaderSequence)
887          context->packed_header_type = param->type;
888       else
889          status = VA_STATUS_ERROR_UNIMPLEMENTED;
890       break;
891    case PIPE_VIDEO_FORMAT_HEVC:
892       if (param->type == VAEncPackedHeaderSequence)
893          context->packed_header_type = param->type;
894       else
895          status = VA_STATUS_ERROR_UNIMPLEMENTED;
896       break;
897    case PIPE_VIDEO_FORMAT_AV1:
898          context->packed_header_type = param->type;
899       break;
900 
901    default:
902       return VA_STATUS_ERROR_UNIMPLEMENTED;
903    }
904 
905    return status;
906 }
907 
908 static VAStatus
handleVAEncPackedHeaderDataBufferType(vlVaContext * context,vlVaBuffer * buf)909 handleVAEncPackedHeaderDataBufferType(vlVaContext *context, vlVaBuffer *buf)
910 {
911    VAStatus status = VA_STATUS_SUCCESS;
912 
913    switch (u_reduce_video_profile(context->templat.profile)) {
914    case PIPE_VIDEO_FORMAT_MPEG4_AVC:
915       if (context->packed_header_type != VAEncPackedHeaderSequence)
916          return VA_STATUS_ERROR_UNIMPLEMENTED;
917 
918       status = vlVaHandleVAEncPackedHeaderDataBufferTypeH264(context, buf);
919       break;
920 
921    case PIPE_VIDEO_FORMAT_HEVC:
922       if (context->packed_header_type != VAEncPackedHeaderSequence)
923          return VA_STATUS_ERROR_UNIMPLEMENTED;
924 
925       status = vlVaHandleVAEncPackedHeaderDataBufferTypeHEVC(context, buf);
926       break;
927 
928 #if VA_CHECK_VERSION(1, 16, 0)
929    case PIPE_VIDEO_FORMAT_AV1:
930       status = vlVaHandleVAEncPackedHeaderDataBufferTypeAV1(context, buf);
931       break;
932 #endif
933 
934    default:
935       break;
936    }
937 
938    return status;
939 }
940 
941 static VAStatus
handleVAStatsStatisticsBufferType(VADriverContextP ctx,vlVaContext * context,vlVaBuffer * buf)942 handleVAStatsStatisticsBufferType(VADriverContextP ctx, vlVaContext *context, vlVaBuffer *buf)
943 {
944    if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
945       return VA_STATUS_ERROR_UNIMPLEMENTED;
946 
947    vlVaDriver *drv;
948    drv = VL_VA_DRIVER(ctx);
949 
950    if (!drv)
951       return VA_STATUS_ERROR_INVALID_CONTEXT;
952 
953    if (!buf->derived_surface.resource)
954       buf->derived_surface.resource = pipe_buffer_create(drv->pipe->screen, PIPE_BIND_VERTEX_BUFFER,
955                                             PIPE_USAGE_STREAM, buf->size);
956 
957    context->target->statistics_data = buf->derived_surface.resource;
958 
959    return VA_STATUS_SUCCESS;
960 }
961 
962 VAStatus
vlVaRenderPicture(VADriverContextP ctx,VAContextID context_id,VABufferID * buffers,int num_buffers)963 vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buffers, int num_buffers)
964 {
965    vlVaDriver *drv;
966    vlVaContext *context;
967    VAStatus vaStatus = VA_STATUS_SUCCESS;
968 
969    unsigned i;
970    unsigned slice_idx = 0;
971    vlVaBuffer *seq_param_buf = NULL;
972 
973    if (!ctx)
974       return VA_STATUS_ERROR_INVALID_CONTEXT;
975 
976    drv = VL_VA_DRIVER(ctx);
977    if (!drv)
978       return VA_STATUS_ERROR_INVALID_CONTEXT;
979 
980    mtx_lock(&drv->mutex);
981    context = handle_table_get(drv->htab, context_id);
982    if (!context) {
983       mtx_unlock(&drv->mutex);
984       return VA_STATUS_ERROR_INVALID_CONTEXT;
985    }
986 
987    /* Always process VAProtectedSliceDataBufferType first because it changes the state */
988    for (i = 0; i < num_buffers; ++i) {
989       vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
990       if (!buf) {
991          mtx_unlock(&drv->mutex);
992          return VA_STATUS_ERROR_INVALID_BUFFER;
993       }
994 
995       if (buf->type == VAProtectedSliceDataBufferType)
996          handleVAProtectedSliceDataBufferType(context, buf);
997       else if (buf->type == VAEncSequenceParameterBufferType)
998          seq_param_buf = buf;
999    }
1000 
1001    /* Now process VAEncSequenceParameterBufferType where the encoder is created
1002     * and some default parameters are set to make sure it won't overwrite
1003     * parameters already set by application from earlier buffers. */
1004    if (seq_param_buf)
1005       vaStatus = handleVAEncSequenceParameterBufferType(drv, context, seq_param_buf);
1006 
1007    for (i = 0; i < num_buffers && vaStatus == VA_STATUS_SUCCESS; ++i) {
1008       vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
1009 
1010       switch (buf->type) {
1011       case VAPictureParameterBufferType:
1012          vaStatus = handlePictureParameterBuffer(drv, context, buf);
1013          break;
1014 
1015       case VAIQMatrixBufferType:
1016          handleIQMatrixBuffer(context, buf);
1017          break;
1018 
1019       case VASliceParameterBufferType:
1020       {
1021          /* Some apps like gstreamer send all the slices at once
1022             and some others send individual VASliceParameterBufferType buffers
1023 
1024             slice_idx is the zero based number of total slices received
1025                before this call to handleSliceParameterBuffer
1026          */
1027          handleSliceParameterBuffer(context, buf, slice_idx);
1028          slice_idx += buf->num_elements;
1029       } break;
1030 
1031       case VASliceDataBufferType:
1032          vaStatus = handleVASliceDataBufferType(context, buf);
1033          break;
1034 
1035       case VAProcPipelineParameterBufferType:
1036          vaStatus = vlVaHandleVAProcPipelineParameterBufferType(drv, context, buf);
1037          break;
1038 
1039       case VAEncMiscParameterBufferType:
1040          vaStatus = handleVAEncMiscParameterBufferType(context, buf);
1041          break;
1042 
1043       case VAEncPictureParameterBufferType:
1044          vaStatus = handleVAEncPictureParameterBufferType(drv, context, buf);
1045          break;
1046 
1047       case VAEncSliceParameterBufferType:
1048          vaStatus = handleVAEncSliceParameterBufferType(drv, context, buf);
1049          break;
1050 
1051       case VAHuffmanTableBufferType:
1052          vlVaHandleHuffmanTableBufferType(context, buf);
1053          break;
1054 
1055       case VAEncPackedHeaderParameterBufferType:
1056          handleVAEncPackedHeaderParameterBufferType(context, buf);
1057          break;
1058       case VAEncPackedHeaderDataBufferType:
1059          handleVAEncPackedHeaderDataBufferType(context, buf);
1060          break;
1061 
1062       case VAStatsStatisticsBufferType:
1063          handleVAStatsStatisticsBufferType(ctx, context, buf);
1064          break;
1065 
1066       default:
1067          break;
1068       }
1069    }
1070    mtx_unlock(&drv->mutex);
1071 
1072    return vaStatus;
1073 }
1074 
vlVaQueryApplyFilmGrainAV1(vlVaContext * context,int * output_id,struct pipe_video_buffer *** out_target)1075 static bool vlVaQueryApplyFilmGrainAV1(vlVaContext *context,
1076                                  int *output_id,
1077                                  struct pipe_video_buffer ***out_target)
1078 {
1079    struct pipe_av1_picture_desc *av1 = NULL;
1080 
1081    if (u_reduce_video_profile(context->templat.profile) != PIPE_VIDEO_FORMAT_AV1 ||
1082        context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_BITSTREAM)
1083       return false;
1084 
1085    av1 = &context->desc.av1;
1086    if (!av1->picture_parameter.film_grain_info.film_grain_info_fields.apply_grain)
1087       return false;
1088 
1089    *output_id = av1->picture_parameter.current_frame_id;
1090    *out_target = &av1->film_grain_target;
1091    return true;
1092 }
1093 
1094 VAStatus
vlVaEndPicture(VADriverContextP ctx,VAContextID context_id)1095 vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
1096 {
1097    vlVaDriver *drv;
1098    vlVaContext *context;
1099    vlVaBuffer *coded_buf;
1100    vlVaSurface *surf;
1101    void *feedback = NULL;
1102    struct pipe_screen *screen;
1103    bool supported;
1104    bool realloc = false;
1105    bool apply_av1_fg = false;
1106    enum pipe_format format;
1107    struct pipe_video_buffer **out_target;
1108    int output_id;
1109 
1110    if (!ctx)
1111       return VA_STATUS_ERROR_INVALID_CONTEXT;
1112 
1113    drv = VL_VA_DRIVER(ctx);
1114    if (!drv)
1115       return VA_STATUS_ERROR_INVALID_CONTEXT;
1116 
1117    mtx_lock(&drv->mutex);
1118    context = handle_table_get(drv->htab, context_id);
1119    mtx_unlock(&drv->mutex);
1120    if (!context)
1121       return VA_STATUS_ERROR_INVALID_CONTEXT;
1122 
1123    if (!context->decoder) {
1124       if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN)
1125          return VA_STATUS_ERROR_INVALID_CONTEXT;
1126 
1127       /* VPP */
1128       return VA_STATUS_SUCCESS;
1129    }
1130 
1131    output_id = context->target_id;
1132    out_target = &context->target;
1133    apply_av1_fg = vlVaQueryApplyFilmGrainAV1(context, &output_id, &out_target);
1134 
1135    mtx_lock(&drv->mutex);
1136    surf = handle_table_get(drv->htab, output_id);
1137    if (!surf || !surf->buffer) {
1138       mtx_unlock(&drv->mutex);
1139       return VA_STATUS_ERROR_INVALID_SURFACE;
1140    }
1141 
1142    if (apply_av1_fg) {
1143       vlVaSetSurfaceContext(drv, surf, context);
1144       *out_target = surf->buffer;
1145    }
1146 
1147    context->mpeg4.frame_num++;
1148 
1149    screen = context->decoder->context->screen;
1150    supported = screen->get_video_param(screen, context->decoder->profile,
1151                                        context->decoder->entrypoint,
1152                                        surf->buffer->interlaced ?
1153                                        PIPE_VIDEO_CAP_SUPPORTS_INTERLACED :
1154                                        PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE);
1155 
1156    if (!supported) {
1157       surf->templat.interlaced = screen->get_video_param(screen,
1158                                        context->decoder->profile,
1159                                        context->decoder->entrypoint,
1160                                        PIPE_VIDEO_CAP_PREFERS_INTERLACED);
1161       realloc = true;
1162    }
1163 
1164    format = screen->get_video_param(screen, context->decoder->profile,
1165                                     context->decoder->entrypoint,
1166                                     PIPE_VIDEO_CAP_PREFERED_FORMAT);
1167 
1168    if (surf->buffer->buffer_format != format &&
1169        surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
1170       /* check originally as NV12 only */
1171       surf->templat.buffer_format = format;
1172       realloc = true;
1173    }
1174 
1175    if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_JPEG) {
1176       if (surf->buffer->buffer_format == PIPE_FORMAT_NV12 &&
1177           context->mjpeg.sampling_factor != MJPEG_SAMPLING_FACTOR_NV12) {
1178          /* workaround to reallocate surface buffer with right format
1179           * if it doesnt match with sampling_factor. ffmpeg doesnt
1180           * use VASurfaceAttribPixelFormat and defaults to NV12.
1181           */
1182          switch (context->mjpeg.sampling_factor) {
1183             case MJPEG_SAMPLING_FACTOR_YUV422:
1184             case MJPEG_SAMPLING_FACTOR_YUY2:
1185                surf->templat.buffer_format = PIPE_FORMAT_YUYV;
1186                break;
1187             case MJPEG_SAMPLING_FACTOR_YUV444:
1188                surf->templat.buffer_format = PIPE_FORMAT_Y8_U8_V8_444_UNORM;
1189                break;
1190             case MJPEG_SAMPLING_FACTOR_YUV400:
1191                surf->templat.buffer_format = PIPE_FORMAT_Y8_400_UNORM;
1192                break;
1193             default:
1194                mtx_unlock(&drv->mutex);
1195                return VA_STATUS_ERROR_INVALID_SURFACE;
1196          }
1197          realloc = true;
1198       }
1199       /* check if format is supported before proceeding with realloc,
1200        * also avoid submission if hardware doesnt support the format and
1201        * applcation failed to check the supported rt_formats.
1202        */
1203       if (!screen->is_video_format_supported(screen, surf->templat.buffer_format,
1204           PIPE_VIDEO_PROFILE_JPEG_BASELINE, PIPE_VIDEO_ENTRYPOINT_BITSTREAM)) {
1205          mtx_unlock(&drv->mutex);
1206          return VA_STATUS_ERROR_INVALID_SURFACE;
1207       }
1208    }
1209 
1210    if ((bool)(surf->templat.bind & PIPE_BIND_PROTECTED) != context->desc.base.protected_playback) {
1211       if (context->desc.base.protected_playback) {
1212          surf->templat.bind |= PIPE_BIND_PROTECTED;
1213       }
1214       else
1215          surf->templat.bind &= ~PIPE_BIND_PROTECTED;
1216       realloc = true;
1217    }
1218 
1219    if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_AV1 &&
1220        surf->buffer->buffer_format == PIPE_FORMAT_NV12 &&
1221        context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
1222       if (context->desc.av1.picture_parameter.bit_depth_idx == 1) {
1223          surf->templat.buffer_format = PIPE_FORMAT_P010;
1224          realloc = true;
1225       }
1226    }
1227 
1228    if (realloc) {
1229       struct pipe_video_buffer *old_buf = surf->buffer;
1230 
1231       if (vlVaHandleSurfaceAllocate(drv, surf, &surf->templat, NULL, 0) != VA_STATUS_SUCCESS) {
1232          mtx_unlock(&drv->mutex);
1233          return VA_STATUS_ERROR_ALLOCATION_FAILED;
1234       }
1235 
1236       if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
1237          if (old_buf->interlaced) {
1238             struct u_rect src_rect, dst_rect;
1239 
1240             dst_rect.x0 = src_rect.x0 = 0;
1241             dst_rect.y0 = src_rect.y0 = 0;
1242             dst_rect.x1 = src_rect.x1 = surf->templat.width;
1243             dst_rect.y1 = src_rect.y1 = surf->templat.height;
1244             vl_compositor_yuv_deint_full(&drv->cstate, &drv->compositor,
1245                                          old_buf, surf->buffer,
1246                                          &src_rect, &dst_rect, VL_COMPOSITOR_WEAVE);
1247          } else {
1248             /* Can't convert from progressive to interlaced yet */
1249             mtx_unlock(&drv->mutex);
1250             return VA_STATUS_ERROR_INVALID_SURFACE;
1251          }
1252       }
1253 
1254       old_buf->destroy(old_buf);
1255       *out_target = surf->buffer;
1256    }
1257 
1258    if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
1259       context->desc.base.fence = &surf->fence;
1260       struct pipe_screen *screen = context->decoder->context->screen;
1261       coded_buf = context->coded_buf;
1262       if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC)
1263          context->desc.h264enc.frame_num_cnt++;
1264 
1265       /* keep other path the same way */
1266       if (!screen->get_video_param(screen, context->templat.profile,
1267                                   context->decoder->entrypoint,
1268                                   PIPE_VIDEO_CAP_ENC_QUALITY_LEVEL)) {
1269 
1270          if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC)
1271             getEncParamPresetH264(context);
1272          else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
1273             getEncParamPresetH265(context);
1274       }
1275 
1276       context->desc.base.input_format = surf->buffer->buffer_format;
1277       context->desc.base.input_full_range = surf->full_range;
1278       context->desc.base.output_format = surf->encoder_format;
1279 
1280       int driver_metadata_support = drv->pipe->screen->get_video_param(drv->pipe->screen,
1281                                                                        context->decoder->profile,
1282                                                                        context->decoder->entrypoint,
1283                                                                        PIPE_VIDEO_CAP_ENC_SUPPORTS_FEEDBACK_METADATA);
1284       if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC)
1285          context->desc.h264enc.requested_metadata = driver_metadata_support;
1286       else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
1287          context->desc.h265enc.requested_metadata = driver_metadata_support;
1288       else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_AV1)
1289          context->desc.av1enc.requested_metadata = driver_metadata_support;
1290 
1291       context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
1292       context->decoder->encode_bitstream(context->decoder, context->target,
1293                                          coded_buf->derived_surface.resource, &feedback);
1294       coded_buf->feedback = feedback;
1295       coded_buf->ctx = context_id;
1296       surf->feedback = feedback;
1297       surf->coded_buf = coded_buf;
1298       coded_buf->associated_encode_input_surf = context->target_id;
1299    } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_BITSTREAM) {
1300       context->desc.base.fence = &surf->fence;
1301    } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_PROCESSING) {
1302       context->desc.base.fence = &surf->fence;
1303    }
1304 
1305    context->decoder->end_frame(context->decoder, context->target, &context->desc.base);
1306 
1307    if (drv->pipe->screen->get_video_param(drv->pipe->screen,
1308                            context->decoder->profile,
1309                            context->decoder->entrypoint,
1310                            PIPE_VIDEO_CAP_REQUIRES_FLUSH_ON_END_FRAME))
1311       context->decoder->flush(context->decoder);
1312    else {
1313       if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
1314          u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
1315          int idr_period = context->desc.h264enc.gop_size / context->gop_coeff;
1316          int p_remain_in_idr = idr_period - context->desc.h264enc.frame_num;
1317          surf->frame_num_cnt = context->desc.h264enc.frame_num_cnt;
1318          surf->force_flushed = false;
1319          if (context->first_single_submitted) {
1320             context->decoder->flush(context->decoder);
1321             context->first_single_submitted = false;
1322             surf->force_flushed = true;
1323          }
1324          if (p_remain_in_idr == 1) {
1325             if ((context->desc.h264enc.frame_num_cnt % 2) != 0) {
1326                context->decoder->flush(context->decoder);
1327                context->first_single_submitted = true;
1328             }
1329             else
1330                context->first_single_submitted = false;
1331             surf->force_flushed = true;
1332          }
1333       }
1334    }
1335 
1336    if (context->decoder->get_feedback_fence &&
1337        !context->decoder->get_feedback_fence(context->decoder, feedback)) {
1338          mtx_unlock(&drv->mutex);
1339          return VA_STATUS_ERROR_OPERATION_FAILED;
1340    }
1341 
1342    /* Update frame_num disregarding PIPE_VIDEO_CAP_REQUIRES_FLUSH_ON_END_FRAME check above */
1343    if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
1344       if ((u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC)
1345          && (!context->desc.h264enc.not_referenced))
1346          context->desc.h264enc.frame_num++;
1347       else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
1348          context->desc.h265enc.frame_num++;
1349       else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_AV1)
1350          context->desc.av1enc.frame_num++;
1351    }
1352 
1353    mtx_unlock(&drv->mutex);
1354    return VA_STATUS_SUCCESS;
1355 }
1356