1 /**************************************************************************
2 *
3 * Copyright 2010 Thomas Balling Sørensen & Orasanu Lucian.
4 * Copyright 2014 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "pipe/p_video_codec.h"
30
31 #include "util/u_handle_table.h"
32 #include "util/u_video.h"
33 #include "util/u_memory.h"
34
35 #include "vl/vl_vlc.h"
36 #include "vl/vl_winsys.h"
37
38 #include "va_private.h"
39
40 VAStatus
vlVaBeginPicture(VADriverContextP ctx,VAContextID context_id,VASurfaceID render_target)41 vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID render_target)
42 {
43 vlVaDriver *drv;
44 vlVaContext *context;
45 vlVaSurface *surf;
46
47 if (!ctx)
48 return VA_STATUS_ERROR_INVALID_CONTEXT;
49
50 drv = VL_VA_DRIVER(ctx);
51 if (!drv)
52 return VA_STATUS_ERROR_INVALID_CONTEXT;
53
54 mtx_lock(&drv->mutex);
55 context = handle_table_get(drv->htab, context_id);
56 if (!context) {
57 mtx_unlock(&drv->mutex);
58 return VA_STATUS_ERROR_INVALID_CONTEXT;
59 }
60
61 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG12) {
62 context->desc.mpeg12.intra_matrix = NULL;
63 context->desc.mpeg12.non_intra_matrix = NULL;
64 }
65
66 surf = handle_table_get(drv->htab, render_target);
67 mtx_unlock(&drv->mutex);
68 if (!surf || !surf->buffer)
69 return VA_STATUS_ERROR_INVALID_SURFACE;
70
71 context->target_id = render_target;
72 surf->ctx = context_id;
73 context->target = surf->buffer;
74 context->mjpeg.sampling_factor = 0;
75
76 if (!context->decoder) {
77
78 /* VPP */
79 if (context->templat.profile == PIPE_VIDEO_PROFILE_UNKNOWN &&
80 context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&
81 context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM &&
82 context->target->buffer_format != PIPE_FORMAT_B8G8R8X8_UNORM &&
83 context->target->buffer_format != PIPE_FORMAT_R8G8B8X8_UNORM &&
84 context->target->buffer_format != PIPE_FORMAT_NV12 &&
85 context->target->buffer_format != PIPE_FORMAT_P010 &&
86 context->target->buffer_format != PIPE_FORMAT_P016)
87 return VA_STATUS_ERROR_UNIMPLEMENTED;
88
89 return VA_STATUS_SUCCESS;
90 }
91
92 if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
93 context->needs_begin_frame = true;
94
95 return VA_STATUS_SUCCESS;
96 }
97
98 void
vlVaGetReferenceFrame(vlVaDriver * drv,VASurfaceID surface_id,struct pipe_video_buffer ** ref_frame)99 vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
100 struct pipe_video_buffer **ref_frame)
101 {
102 vlVaSurface *surf = handle_table_get(drv->htab, surface_id);
103 if (surf)
104 *ref_frame = surf->buffer;
105 else
106 *ref_frame = NULL;
107 }
108
109 static VAStatus
handlePictureParameterBuffer(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)110 handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
111 {
112 VAStatus vaStatus = VA_STATUS_SUCCESS;
113 enum pipe_video_format format =
114 u_reduce_video_profile(context->templat.profile);
115
116 switch (format) {
117 case PIPE_VIDEO_FORMAT_MPEG12:
118 vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);
119 break;
120
121 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
122 vlVaHandlePictureParameterBufferH264(drv, context, buf);
123 break;
124
125 case PIPE_VIDEO_FORMAT_VC1:
126 vlVaHandlePictureParameterBufferVC1(drv, context, buf);
127 break;
128
129 case PIPE_VIDEO_FORMAT_MPEG4:
130 vlVaHandlePictureParameterBufferMPEG4(drv, context, buf);
131 break;
132
133 case PIPE_VIDEO_FORMAT_HEVC:
134 vlVaHandlePictureParameterBufferHEVC(drv, context, buf);
135 break;
136
137 case PIPE_VIDEO_FORMAT_JPEG:
138 vlVaHandlePictureParameterBufferMJPEG(drv, context, buf);
139 break;
140
141 case PIPE_VIDEO_FORMAT_VP9:
142 vlVaHandlePictureParameterBufferVP9(drv, context, buf);
143 break;
144
145 case PIPE_VIDEO_FORMAT_AV1:
146 vlVaHandlePictureParameterBufferAV1(drv, context, buf);
147 break;
148
149 default:
150 break;
151 }
152
153 /* Create the decoder once max_references is known. */
154 if (!context->decoder) {
155 if (!context->target)
156 return VA_STATUS_ERROR_INVALID_CONTEXT;
157
158 if (format == PIPE_VIDEO_FORMAT_MPEG4_AVC)
159 context->templat.level = u_get_h264_level(context->templat.width,
160 context->templat.height, &context->templat.max_references);
161
162 context->decoder = drv->pipe->create_video_codec(drv->pipe,
163 &context->templat);
164
165 if (!context->decoder)
166 return VA_STATUS_ERROR_ALLOCATION_FAILED;
167
168 context->needs_begin_frame = true;
169 }
170
171 if (format == PIPE_VIDEO_FORMAT_VP9) {
172 context->decoder->width =
173 context->desc.vp9.picture_parameter.frame_width;
174 context->decoder->height =
175 context->desc.vp9.picture_parameter.frame_height;
176 }
177
178 return vaStatus;
179 }
180
181 static void
handleIQMatrixBuffer(vlVaContext * context,vlVaBuffer * buf)182 handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
183 {
184 switch (u_reduce_video_profile(context->templat.profile)) {
185 case PIPE_VIDEO_FORMAT_MPEG12:
186 vlVaHandleIQMatrixBufferMPEG12(context, buf);
187 break;
188
189 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
190 vlVaHandleIQMatrixBufferH264(context, buf);
191 break;
192
193 case PIPE_VIDEO_FORMAT_MPEG4:
194 vlVaHandleIQMatrixBufferMPEG4(context, buf);
195 break;
196
197 case PIPE_VIDEO_FORMAT_HEVC:
198 vlVaHandleIQMatrixBufferHEVC(context, buf);
199 break;
200
201 case PIPE_VIDEO_FORMAT_JPEG:
202 vlVaHandleIQMatrixBufferMJPEG(context, buf);
203 break;
204
205 default:
206 break;
207 }
208 }
209
210 static void
handleSliceParameterBuffer(vlVaContext * context,vlVaBuffer * buf,unsigned num)211 handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf, unsigned num)
212 {
213 switch (u_reduce_video_profile(context->templat.profile)) {
214 case PIPE_VIDEO_FORMAT_MPEG12:
215 vlVaHandleSliceParameterBufferMPEG12(context, buf);
216 break;
217
218 case PIPE_VIDEO_FORMAT_VC1:
219 vlVaHandleSliceParameterBufferVC1(context, buf);
220 break;
221
222 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
223 vlVaHandleSliceParameterBufferH264(context, buf);
224 break;
225
226 case PIPE_VIDEO_FORMAT_MPEG4:
227 vlVaHandleSliceParameterBufferMPEG4(context, buf);
228 break;
229
230 case PIPE_VIDEO_FORMAT_HEVC:
231 vlVaHandleSliceParameterBufferHEVC(context, buf);
232 break;
233
234 case PIPE_VIDEO_FORMAT_JPEG:
235 vlVaHandleSliceParameterBufferMJPEG(context, buf);
236 break;
237
238 case PIPE_VIDEO_FORMAT_VP9:
239 vlVaHandleSliceParameterBufferVP9(context, buf);
240 break;
241
242 case PIPE_VIDEO_FORMAT_AV1:
243 vlVaHandleSliceParameterBufferAV1(context, buf, num);
244 break;
245
246 default:
247 break;
248 }
249 }
250
251 static unsigned int
bufHasStartcode(vlVaBuffer * buf,unsigned int code,unsigned int bits)252 bufHasStartcode(vlVaBuffer *buf, unsigned int code, unsigned int bits)
253 {
254 struct vl_vlc vlc = {0};
255 int i;
256
257 /* search the first 64 bytes for a startcode */
258 vl_vlc_init(&vlc, 1, (const void * const*)&buf->data, &buf->size);
259 for (i = 0; i < 64 && vl_vlc_bits_left(&vlc) >= bits; ++i) {
260 if (vl_vlc_peekbits(&vlc, bits) == code)
261 return 1;
262 vl_vlc_eatbits(&vlc, 8);
263 vl_vlc_fillbits(&vlc);
264 }
265
266 return 0;
267 }
268
269 static void
handleVAProtectedSliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)270 handleVAProtectedSliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
271 {
272 uint8_t* encrypted_data = (uint8_t*) buf->data;
273
274 unsigned int drm_key_size = buf->size;
275
276 context->desc.base.decrypt_key = CALLOC(1, drm_key_size);
277 memcpy(context->desc.base.decrypt_key, encrypted_data, drm_key_size);
278 context->desc.base.protected_playback = true;
279 }
280
281 static void
handleVASliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)282 handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
283 {
284 enum pipe_video_format format = u_reduce_video_profile(context->templat.profile);
285 unsigned num_buffers = 0;
286 void * const *buffers[3];
287 unsigned sizes[3];
288 static const uint8_t start_code_h264[] = { 0x00, 0x00, 0x01 };
289 static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };
290 static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d };
291 static const uint8_t eoi_jpeg[] = { 0xff, 0xd9 };
292
293 format = u_reduce_video_profile(context->templat.profile);
294 if (!context->desc.base.protected_playback) {
295 switch (format) {
296 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
297 if (bufHasStartcode(buf, 0x000001, 24))
298 break;
299
300 buffers[num_buffers] = (void *const)&start_code_h264;
301 sizes[num_buffers++] = sizeof(start_code_h264);
302 break;
303 case PIPE_VIDEO_FORMAT_HEVC:
304 if (bufHasStartcode(buf, 0x000001, 24))
305 break;
306
307 buffers[num_buffers] = (void *const)&start_code_h265;
308 sizes[num_buffers++] = sizeof(start_code_h265);
309 break;
310 case PIPE_VIDEO_FORMAT_VC1:
311 if (bufHasStartcode(buf, 0x0000010d, 32) ||
312 bufHasStartcode(buf, 0x0000010c, 32) ||
313 bufHasStartcode(buf, 0x0000010b, 32))
314 break;
315
316 if (context->decoder->profile == PIPE_VIDEO_PROFILE_VC1_ADVANCED) {
317 buffers[num_buffers] = (void *const)&start_code_vc1;
318 sizes[num_buffers++] = sizeof(start_code_vc1);
319 }
320 break;
321 case PIPE_VIDEO_FORMAT_MPEG4:
322 if (bufHasStartcode(buf, 0x000001, 24))
323 break;
324
325 vlVaDecoderFixMPEG4Startcode(context);
326 buffers[num_buffers] = (void *)context->mpeg4.start_code;
327 sizes[num_buffers++] = context->mpeg4.start_code_size;
328 break;
329 case PIPE_VIDEO_FORMAT_JPEG:
330 vlVaGetJpegSliceHeader(context);
331 buffers[num_buffers] = (void *)context->mjpeg.slice_header;
332 sizes[num_buffers++] = context->mjpeg.slice_header_size;
333 break;
334 case PIPE_VIDEO_FORMAT_VP9:
335 if (false == context->desc.base.protected_playback)
336 vlVaDecoderVP9BitstreamHeader(context, buf);
337 break;
338 case PIPE_VIDEO_FORMAT_AV1:
339 break;
340 default:
341 break;
342 }
343 }
344
345 buffers[num_buffers] = buf->data;
346 sizes[num_buffers] = buf->size;
347 ++num_buffers;
348
349 if (format == PIPE_VIDEO_FORMAT_JPEG) {
350 buffers[num_buffers] = (void *const)&eoi_jpeg;
351 sizes[num_buffers++] = sizeof(eoi_jpeg);
352 }
353
354 if (context->needs_begin_frame) {
355 context->decoder->begin_frame(context->decoder, context->target,
356 &context->desc.base);
357 context->needs_begin_frame = false;
358 }
359 context->decoder->decode_bitstream(context->decoder, context->target, &context->desc.base,
360 num_buffers, (const void * const*)buffers, sizes);
361 }
362
363 static VAStatus
handleVAEncMiscParameterTypeRateControl(vlVaContext * context,VAEncMiscParameterBuffer * misc)364 handleVAEncMiscParameterTypeRateControl(vlVaContext *context, VAEncMiscParameterBuffer *misc)
365 {
366 VAStatus status = VA_STATUS_SUCCESS;
367
368 switch (u_reduce_video_profile(context->templat.profile)) {
369 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
370 status = vlVaHandleVAEncMiscParameterTypeRateControlH264(context, misc);
371 break;
372
373 case PIPE_VIDEO_FORMAT_HEVC:
374 status = vlVaHandleVAEncMiscParameterTypeRateControlHEVC(context, misc);
375 break;
376
377 default:
378 break;
379 }
380
381 return status;
382 }
383
384 static VAStatus
handleVAEncMiscParameterTypeFrameRate(vlVaContext * context,VAEncMiscParameterBuffer * misc)385 handleVAEncMiscParameterTypeFrameRate(vlVaContext *context, VAEncMiscParameterBuffer *misc)
386 {
387 VAStatus status = VA_STATUS_SUCCESS;
388
389 switch (u_reduce_video_profile(context->templat.profile)) {
390 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
391 status = vlVaHandleVAEncMiscParameterTypeFrameRateH264(context, misc);
392 break;
393
394 case PIPE_VIDEO_FORMAT_HEVC:
395 status = vlVaHandleVAEncMiscParameterTypeFrameRateHEVC(context, misc);
396 break;
397
398 default:
399 break;
400 }
401
402 return status;
403 }
404
405 static VAStatus
handleVAEncMiscParameterTypeTemporalLayer(vlVaContext * context,VAEncMiscParameterBuffer * misc)406 handleVAEncMiscParameterTypeTemporalLayer(vlVaContext *context, VAEncMiscParameterBuffer *misc)
407 {
408 VAStatus status = VA_STATUS_SUCCESS;
409
410 switch (u_reduce_video_profile(context->templat.profile)) {
411 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
412 status = vlVaHandleVAEncMiscParameterTypeTemporalLayerH264(context, misc);
413 break;
414
415 case PIPE_VIDEO_FORMAT_HEVC:
416 break;
417
418 default:
419 break;
420 }
421
422 return status;
423 }
424
425 static VAStatus
handleVAEncSequenceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)426 handleVAEncSequenceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
427 {
428 VAStatus status = VA_STATUS_SUCCESS;
429
430 switch (u_reduce_video_profile(context->templat.profile)) {
431 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
432 status = vlVaHandleVAEncSequenceParameterBufferTypeH264(drv, context, buf);
433 break;
434
435 case PIPE_VIDEO_FORMAT_HEVC:
436 status = vlVaHandleVAEncSequenceParameterBufferTypeHEVC(drv, context, buf);
437 break;
438
439 default:
440 break;
441 }
442
443 return status;
444 }
445
446 static VAStatus
handleVAEncMiscParameterBufferType(vlVaContext * context,vlVaBuffer * buf)447 handleVAEncMiscParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
448 {
449 VAStatus vaStatus = VA_STATUS_SUCCESS;
450 VAEncMiscParameterBuffer *misc;
451 misc = buf->data;
452
453 switch (misc->type) {
454 case VAEncMiscParameterTypeRateControl:
455 vaStatus = handleVAEncMiscParameterTypeRateControl(context, misc);
456 break;
457
458 case VAEncMiscParameterTypeFrameRate:
459 vaStatus = handleVAEncMiscParameterTypeFrameRate(context, misc);
460 break;
461
462 case VAEncMiscParameterTypeTemporalLayerStructure:
463 vaStatus = handleVAEncMiscParameterTypeTemporalLayer(context, misc);
464 break;
465
466 default:
467 break;
468 }
469
470 return vaStatus;
471 }
472
473 static VAStatus
handleVAEncPictureParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)474 handleVAEncPictureParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
475 {
476 VAStatus status = VA_STATUS_SUCCESS;
477
478 switch (u_reduce_video_profile(context->templat.profile)) {
479 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
480 status = vlVaHandleVAEncPictureParameterBufferTypeH264(drv, context, buf);
481 break;
482
483 case PIPE_VIDEO_FORMAT_HEVC:
484 status = vlVaHandleVAEncPictureParameterBufferTypeHEVC(drv, context, buf);
485 break;
486
487 default:
488 break;
489 }
490
491 return status;
492 }
493
494 static VAStatus
handleVAEncSliceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)495 handleVAEncSliceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
496 {
497 VAStatus status = VA_STATUS_SUCCESS;
498
499 switch (u_reduce_video_profile(context->templat.profile)) {
500 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
501 status = vlVaHandleVAEncSliceParameterBufferTypeH264(drv, context, buf);
502 break;
503
504 case PIPE_VIDEO_FORMAT_HEVC:
505 status = vlVaHandleVAEncSliceParameterBufferTypeHEVC(drv, context, buf);
506 break;
507
508 default:
509 break;
510 }
511
512 return status;
513 }
514
515 static VAStatus
handleVAEncPackedHeaderParameterBufferType(vlVaContext * context,vlVaBuffer * buf)516 handleVAEncPackedHeaderParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
517 {
518 VAStatus status = VA_STATUS_SUCCESS;
519
520 switch (u_reduce_video_profile(context->templat.profile)) {
521 case PIPE_VIDEO_FORMAT_HEVC:
522 break;
523
524 default:
525 return VA_STATUS_ERROR_UNIMPLEMENTED;
526 }
527
528 VAEncPackedHeaderParameterBuffer *param = (VAEncPackedHeaderParameterBuffer *)buf->data;
529 if (param->type == VAEncPackedHeaderSequence)
530 context->packed_header_type = param->type;
531 else
532 status = VA_STATUS_ERROR_UNIMPLEMENTED;
533
534 return status;
535 }
536
537 static VAStatus
handleVAEncPackedHeaderDataBufferType(vlVaContext * context,vlVaBuffer * buf)538 handleVAEncPackedHeaderDataBufferType(vlVaContext *context, vlVaBuffer *buf)
539 {
540 VAStatus status = VA_STATUS_SUCCESS;
541
542 if (context->packed_header_type != VAEncPackedHeaderSequence)
543 return VA_STATUS_ERROR_UNIMPLEMENTED;
544
545 switch (u_reduce_video_profile(context->templat.profile)) {
546 case PIPE_VIDEO_FORMAT_HEVC:
547 status = vlVaHandleVAEncPackedHeaderDataBufferTypeHEVC(context, buf);
548 break;
549
550 default:
551 break;
552 }
553
554 return status;
555 }
556
557 VAStatus
vlVaRenderPicture(VADriverContextP ctx,VAContextID context_id,VABufferID * buffers,int num_buffers)558 vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buffers, int num_buffers)
559 {
560 vlVaDriver *drv;
561 vlVaContext *context;
562 VAStatus vaStatus = VA_STATUS_SUCCESS;
563
564 unsigned i;
565
566 if (!ctx)
567 return VA_STATUS_ERROR_INVALID_CONTEXT;
568
569 drv = VL_VA_DRIVER(ctx);
570 if (!drv)
571 return VA_STATUS_ERROR_INVALID_CONTEXT;
572
573 mtx_lock(&drv->mutex);
574 context = handle_table_get(drv->htab, context_id);
575 if (!context) {
576 mtx_unlock(&drv->mutex);
577 return VA_STATUS_ERROR_INVALID_CONTEXT;
578 }
579
580 /* Always process VAProtectedSliceDataBufferType first because it changes the state */
581 for (i = 0; i < num_buffers; ++i) {
582 vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
583 if (!buf) {
584 mtx_unlock(&drv->mutex);
585 return VA_STATUS_ERROR_INVALID_BUFFER;
586 }
587
588 if (buf->type == VAProtectedSliceDataBufferType)
589 handleVAProtectedSliceDataBufferType(context, buf);
590 }
591
592 for (i = 0; i < num_buffers && vaStatus == VA_STATUS_SUCCESS; ++i) {
593 vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
594
595 switch (buf->type) {
596 case VAPictureParameterBufferType:
597 vaStatus = handlePictureParameterBuffer(drv, context, buf);
598 break;
599
600 case VAIQMatrixBufferType:
601 handleIQMatrixBuffer(context, buf);
602 break;
603
604 case VASliceParameterBufferType:
605 handleSliceParameterBuffer(context, buf, i);
606 break;
607
608 case VASliceDataBufferType:
609 handleVASliceDataBufferType(context, buf);
610 break;
611
612 case VAProcPipelineParameterBufferType:
613 vaStatus = vlVaHandleVAProcPipelineParameterBufferType(drv, context, buf);
614 break;
615
616 case VAEncSequenceParameterBufferType:
617 vaStatus = handleVAEncSequenceParameterBufferType(drv, context, buf);
618 break;
619
620 case VAEncMiscParameterBufferType:
621 vaStatus = handleVAEncMiscParameterBufferType(context, buf);
622 break;
623
624 case VAEncPictureParameterBufferType:
625 vaStatus = handleVAEncPictureParameterBufferType(drv, context, buf);
626 break;
627
628 case VAEncSliceParameterBufferType:
629 vaStatus = handleVAEncSliceParameterBufferType(drv, context, buf);
630 break;
631
632 case VAHuffmanTableBufferType:
633 vlVaHandleHuffmanTableBufferType(context, buf);
634 break;
635
636 case VAEncPackedHeaderParameterBufferType:
637 handleVAEncPackedHeaderParameterBufferType(context, buf);
638 break;
639 case VAEncPackedHeaderDataBufferType:
640 handleVAEncPackedHeaderDataBufferType(context, buf);
641 break;
642
643 default:
644 break;
645 }
646 }
647 mtx_unlock(&drv->mutex);
648
649 return vaStatus;
650 }
651
652 VAStatus
vlVaEndPicture(VADriverContextP ctx,VAContextID context_id)653 vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
654 {
655 vlVaDriver *drv;
656 vlVaContext *context;
657 vlVaBuffer *coded_buf;
658 vlVaSurface *surf;
659 void *feedback;
660 struct pipe_screen *screen;
661 bool supported;
662 bool realloc = false;
663 enum pipe_format format;
664
665 if (!ctx)
666 return VA_STATUS_ERROR_INVALID_CONTEXT;
667
668 drv = VL_VA_DRIVER(ctx);
669 if (!drv)
670 return VA_STATUS_ERROR_INVALID_CONTEXT;
671
672 mtx_lock(&drv->mutex);
673 context = handle_table_get(drv->htab, context_id);
674 mtx_unlock(&drv->mutex);
675 if (!context)
676 return VA_STATUS_ERROR_INVALID_CONTEXT;
677
678 if (!context->decoder) {
679 if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN)
680 return VA_STATUS_ERROR_INVALID_CONTEXT;
681
682 /* VPP */
683 return VA_STATUS_SUCCESS;
684 }
685
686 mtx_lock(&drv->mutex);
687 surf = handle_table_get(drv->htab, context->target_id);
688 context->mpeg4.frame_num++;
689
690 screen = context->decoder->context->screen;
691 supported = screen->get_video_param(screen, context->decoder->profile,
692 context->decoder->entrypoint,
693 surf->buffer->interlaced ?
694 PIPE_VIDEO_CAP_SUPPORTS_INTERLACED :
695 PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE);
696
697 if (!supported) {
698 surf->templat.interlaced = screen->get_video_param(screen,
699 context->decoder->profile,
700 context->decoder->entrypoint,
701 PIPE_VIDEO_CAP_PREFERS_INTERLACED);
702 realloc = true;
703 }
704
705 format = screen->get_video_param(screen, context->decoder->profile,
706 context->decoder->entrypoint,
707 PIPE_VIDEO_CAP_PREFERED_FORMAT);
708
709 if (surf->buffer->buffer_format != format &&
710 surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
711 /* check originally as NV12 only */
712 surf->templat.buffer_format = format;
713 realloc = true;
714 }
715
716 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_JPEG &&
717 surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
718 if (context->mjpeg.sampling_factor == 0x211111 ||
719 context->mjpeg.sampling_factor == 0x221212) {
720 surf->templat.buffer_format = PIPE_FORMAT_YUYV;
721 realloc = true;
722 } else if (context->mjpeg.sampling_factor != 0x221111) {
723 /* Not NV12 either */
724 mtx_unlock(&drv->mutex);
725 return VA_STATUS_ERROR_INVALID_SURFACE;
726 }
727 }
728
729 if ((bool)(surf->templat.bind & PIPE_BIND_PROTECTED) != context->desc.base.protected_playback) {
730 if (context->desc.base.protected_playback) {
731 surf->templat.bind |= PIPE_BIND_PROTECTED;
732 }
733 else
734 surf->templat.bind &= ~PIPE_BIND_PROTECTED;
735 realloc = true;
736 }
737
738 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_AV1 &&
739 surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
740 if (context->desc.av1.picture_parameter.bit_depth_idx == 1) {
741 surf->templat.buffer_format = PIPE_FORMAT_P010;
742 realloc = true;
743 }
744 }
745
746 if (realloc) {
747 struct pipe_video_buffer *old_buf = surf->buffer;
748
749 if (vlVaHandleSurfaceAllocate(drv, surf, &surf->templat, NULL, 0) != VA_STATUS_SUCCESS) {
750 mtx_unlock(&drv->mutex);
751 return VA_STATUS_ERROR_ALLOCATION_FAILED;
752 }
753
754 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
755 if (old_buf->interlaced) {
756 struct u_rect src_rect, dst_rect;
757
758 dst_rect.x0 = src_rect.x0 = 0;
759 dst_rect.y0 = src_rect.y0 = 0;
760 dst_rect.x1 = src_rect.x1 = surf->templat.width;
761 dst_rect.y1 = src_rect.y1 = surf->templat.height;
762 vl_compositor_yuv_deint_full(&drv->cstate, &drv->compositor,
763 old_buf, surf->buffer,
764 &src_rect, &dst_rect, VL_COMPOSITOR_WEAVE);
765 } else {
766 /* Can't convert from progressive to interlaced yet */
767 mtx_unlock(&drv->mutex);
768 return VA_STATUS_ERROR_INVALID_SURFACE;
769 }
770 }
771
772 old_buf->destroy(old_buf);
773 context->target = surf->buffer;
774 }
775
776 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
777 coded_buf = context->coded_buf;
778 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
779 getEncParamPresetH264(context);
780 context->desc.h264enc.frame_num_cnt++;
781 } else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
782 getEncParamPresetH265(context);
783 context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
784 context->decoder->encode_bitstream(context->decoder, context->target,
785 coded_buf->derived_surface.resource, &feedback);
786 surf->feedback = feedback;
787 surf->coded_buf = coded_buf;
788 }
789
790 context->decoder->end_frame(context->decoder, context->target, &context->desc.base);
791 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
792 u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
793 int idr_period = context->desc.h264enc.gop_size / context->gop_coeff;
794 int p_remain_in_idr = idr_period - context->desc.h264enc.frame_num;
795 surf->frame_num_cnt = context->desc.h264enc.frame_num_cnt;
796 surf->force_flushed = false;
797 if (context->first_single_submitted) {
798 context->decoder->flush(context->decoder);
799 context->first_single_submitted = false;
800 surf->force_flushed = true;
801 }
802 if (p_remain_in_idr == 1) {
803 if ((context->desc.h264enc.frame_num_cnt % 2) != 0) {
804 context->decoder->flush(context->decoder);
805 context->first_single_submitted = true;
806 }
807 else
808 context->first_single_submitted = false;
809 surf->force_flushed = true;
810 }
811 } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
812 u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
813 context->desc.h265enc.frame_num++;
814 mtx_unlock(&drv->mutex);
815 return VA_STATUS_SUCCESS;
816 }
817