1 /**************************************************************************
2 *
3 * Copyright 2010 Thomas Balling Sørensen & Orasanu Lucian.
4 * Copyright 2014 Advanced Micro Devices, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "pipe/p_video_codec.h"
30
31 #include "util/u_handle_table.h"
32 #include "util/u_video.h"
33 #include "util/u_memory.h"
34
35 #include "util/vl_vlc.h"
36 #include "vl/vl_winsys.h"
37
38 #include "va_private.h"
39
40 VAStatus
vlVaBeginPicture(VADriverContextP ctx,VAContextID context_id,VASurfaceID render_target)41 vlVaBeginPicture(VADriverContextP ctx, VAContextID context_id, VASurfaceID render_target)
42 {
43 vlVaDriver *drv;
44 vlVaContext *context;
45 vlVaSurface *surf;
46
47 if (!ctx)
48 return VA_STATUS_ERROR_INVALID_CONTEXT;
49
50 drv = VL_VA_DRIVER(ctx);
51 if (!drv)
52 return VA_STATUS_ERROR_INVALID_CONTEXT;
53
54 mtx_lock(&drv->mutex);
55 context = handle_table_get(drv->htab, context_id);
56 if (!context) {
57 mtx_unlock(&drv->mutex);
58 return VA_STATUS_ERROR_INVALID_CONTEXT;
59 }
60
61 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG12) {
62 context->desc.mpeg12.intra_matrix = NULL;
63 context->desc.mpeg12.non_intra_matrix = NULL;
64 }
65
66 surf = handle_table_get(drv->htab, render_target);
67 mtx_unlock(&drv->mutex);
68 if (!surf || !surf->buffer)
69 return VA_STATUS_ERROR_INVALID_SURFACE;
70
71 context->target_id = render_target;
72 surf->ctx = context_id;
73 context->target = surf->buffer;
74 context->mjpeg.sampling_factor = 0;
75
76 if (!context->decoder) {
77
78 /* VPP */
79 if (context->templat.profile == PIPE_VIDEO_PROFILE_UNKNOWN &&
80 context->target->buffer_format != PIPE_FORMAT_B8G8R8A8_UNORM &&
81 context->target->buffer_format != PIPE_FORMAT_R8G8B8A8_UNORM &&
82 context->target->buffer_format != PIPE_FORMAT_B8G8R8X8_UNORM &&
83 context->target->buffer_format != PIPE_FORMAT_R8G8B8X8_UNORM &&
84 context->target->buffer_format != PIPE_FORMAT_NV12 &&
85 context->target->buffer_format != PIPE_FORMAT_P010 &&
86 context->target->buffer_format != PIPE_FORMAT_P016)
87 return VA_STATUS_ERROR_UNIMPLEMENTED;
88
89 if (drv->pipe->screen->get_video_param(drv->pipe->screen,
90 PIPE_VIDEO_PROFILE_UNKNOWN,
91 PIPE_VIDEO_ENTRYPOINT_PROCESSING,
92 PIPE_VIDEO_CAP_SUPPORTED)) {
93 context->needs_begin_frame = true;
94 context->vpp_needs_flush_on_endpic = true;
95 }
96
97 return VA_STATUS_SUCCESS;
98 }
99
100 if (context->decoder->entrypoint != PIPE_VIDEO_ENTRYPOINT_ENCODE)
101 context->needs_begin_frame = true;
102
103 return VA_STATUS_SUCCESS;
104 }
105
106 void
vlVaGetReferenceFrame(vlVaDriver * drv,VASurfaceID surface_id,struct pipe_video_buffer ** ref_frame)107 vlVaGetReferenceFrame(vlVaDriver *drv, VASurfaceID surface_id,
108 struct pipe_video_buffer **ref_frame)
109 {
110 vlVaSurface *surf = handle_table_get(drv->htab, surface_id);
111 if (surf)
112 *ref_frame = surf->buffer;
113 else
114 *ref_frame = NULL;
115 }
116
117 static VAStatus
handlePictureParameterBuffer(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)118 handlePictureParameterBuffer(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
119 {
120 VAStatus vaStatus = VA_STATUS_SUCCESS;
121 enum pipe_video_format format =
122 u_reduce_video_profile(context->templat.profile);
123
124 switch (format) {
125 case PIPE_VIDEO_FORMAT_MPEG12:
126 vlVaHandlePictureParameterBufferMPEG12(drv, context, buf);
127 break;
128
129 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
130 vlVaHandlePictureParameterBufferH264(drv, context, buf);
131 break;
132
133 case PIPE_VIDEO_FORMAT_VC1:
134 vlVaHandlePictureParameterBufferVC1(drv, context, buf);
135 break;
136
137 case PIPE_VIDEO_FORMAT_MPEG4:
138 vlVaHandlePictureParameterBufferMPEG4(drv, context, buf);
139 break;
140
141 case PIPE_VIDEO_FORMAT_HEVC:
142 vlVaHandlePictureParameterBufferHEVC(drv, context, buf);
143 break;
144
145 case PIPE_VIDEO_FORMAT_JPEG:
146 vlVaHandlePictureParameterBufferMJPEG(drv, context, buf);
147 break;
148
149 case PIPE_VIDEO_FORMAT_VP9:
150 vlVaHandlePictureParameterBufferVP9(drv, context, buf);
151 break;
152
153 case PIPE_VIDEO_FORMAT_AV1:
154 vlVaHandlePictureParameterBufferAV1(drv, context, buf);
155 break;
156
157 default:
158 break;
159 }
160
161 /* Create the decoder once max_references is known. */
162 if (!context->decoder) {
163 if (!context->target)
164 return VA_STATUS_ERROR_INVALID_CONTEXT;
165
166 if (format == PIPE_VIDEO_FORMAT_MPEG4_AVC)
167 context->templat.level = u_get_h264_level(context->templat.width,
168 context->templat.height, &context->templat.max_references);
169
170 context->decoder = drv->pipe->create_video_codec(drv->pipe,
171 &context->templat);
172
173 if (!context->decoder)
174 return VA_STATUS_ERROR_ALLOCATION_FAILED;
175
176 context->needs_begin_frame = true;
177 }
178
179 if (format == PIPE_VIDEO_FORMAT_VP9) {
180 context->decoder->width =
181 context->desc.vp9.picture_parameter.frame_width;
182 context->decoder->height =
183 context->desc.vp9.picture_parameter.frame_height;
184 }
185
186 return vaStatus;
187 }
188
189 static void
handleIQMatrixBuffer(vlVaContext * context,vlVaBuffer * buf)190 handleIQMatrixBuffer(vlVaContext *context, vlVaBuffer *buf)
191 {
192 switch (u_reduce_video_profile(context->templat.profile)) {
193 case PIPE_VIDEO_FORMAT_MPEG12:
194 vlVaHandleIQMatrixBufferMPEG12(context, buf);
195 break;
196
197 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
198 vlVaHandleIQMatrixBufferH264(context, buf);
199 break;
200
201 case PIPE_VIDEO_FORMAT_MPEG4:
202 vlVaHandleIQMatrixBufferMPEG4(context, buf);
203 break;
204
205 case PIPE_VIDEO_FORMAT_HEVC:
206 vlVaHandleIQMatrixBufferHEVC(context, buf);
207 break;
208
209 case PIPE_VIDEO_FORMAT_JPEG:
210 vlVaHandleIQMatrixBufferMJPEG(context, buf);
211 break;
212
213 default:
214 break;
215 }
216 }
217
218 static void
handleSliceParameterBuffer(vlVaContext * context,vlVaBuffer * buf,unsigned num)219 handleSliceParameterBuffer(vlVaContext *context, vlVaBuffer *buf, unsigned num)
220 {
221 switch (u_reduce_video_profile(context->templat.profile)) {
222 case PIPE_VIDEO_FORMAT_MPEG12:
223 vlVaHandleSliceParameterBufferMPEG12(context, buf);
224 break;
225
226 case PIPE_VIDEO_FORMAT_VC1:
227 vlVaHandleSliceParameterBufferVC1(context, buf);
228 break;
229
230 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
231 vlVaHandleSliceParameterBufferH264(context, buf);
232 break;
233
234 case PIPE_VIDEO_FORMAT_MPEG4:
235 vlVaHandleSliceParameterBufferMPEG4(context, buf);
236 break;
237
238 case PIPE_VIDEO_FORMAT_HEVC:
239 vlVaHandleSliceParameterBufferHEVC(context, buf);
240 break;
241
242 case PIPE_VIDEO_FORMAT_JPEG:
243 vlVaHandleSliceParameterBufferMJPEG(context, buf);
244 break;
245
246 case PIPE_VIDEO_FORMAT_VP9:
247 vlVaHandleSliceParameterBufferVP9(context, buf);
248 break;
249
250 case PIPE_VIDEO_FORMAT_AV1:
251 vlVaHandleSliceParameterBufferAV1(context, buf, num);
252 break;
253
254 default:
255 break;
256 }
257 }
258
259 static unsigned int
bufHasStartcode(vlVaBuffer * buf,unsigned int code,unsigned int bits)260 bufHasStartcode(vlVaBuffer *buf, unsigned int code, unsigned int bits)
261 {
262 struct vl_vlc vlc = {0};
263 int i;
264
265 /* search the first 64 bytes for a startcode */
266 vl_vlc_init(&vlc, 1, (const void * const*)&buf->data, &buf->size);
267 for (i = 0; i < 64 && vl_vlc_bits_left(&vlc) >= bits; ++i) {
268 if (vl_vlc_peekbits(&vlc, bits) == code)
269 return 1;
270 vl_vlc_eatbits(&vlc, 8);
271 vl_vlc_fillbits(&vlc);
272 }
273
274 return 0;
275 }
276
277 static void
handleVAProtectedSliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)278 handleVAProtectedSliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
279 {
280 uint8_t* encrypted_data = (uint8_t*) buf->data;
281
282 unsigned int drm_key_size = buf->size;
283
284 context->desc.base.decrypt_key = CALLOC(1, drm_key_size);
285 memcpy(context->desc.base.decrypt_key, encrypted_data, drm_key_size);
286 context->desc.base.protected_playback = true;
287 }
288
289 static VAStatus
handleVASliceDataBufferType(vlVaContext * context,vlVaBuffer * buf)290 handleVASliceDataBufferType(vlVaContext *context, vlVaBuffer *buf)
291 {
292 enum pipe_video_format format = u_reduce_video_profile(context->templat.profile);
293 unsigned num_buffers = 0;
294 void * const *buffers[3];
295 unsigned sizes[3];
296 static const uint8_t start_code_h264[] = { 0x00, 0x00, 0x01 };
297 static const uint8_t start_code_h265[] = { 0x00, 0x00, 0x01 };
298 static const uint8_t start_code_vc1[] = { 0x00, 0x00, 0x01, 0x0d };
299 static const uint8_t eoi_jpeg[] = { 0xff, 0xd9 };
300
301 if (!context->decoder)
302 return VA_STATUS_ERROR_INVALID_CONTEXT;
303
304 format = u_reduce_video_profile(context->templat.profile);
305 if (!context->desc.base.protected_playback) {
306 switch (format) {
307 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
308 if (bufHasStartcode(buf, 0x000001, 24))
309 break;
310
311 buffers[num_buffers] = (void *const)&start_code_h264;
312 sizes[num_buffers++] = sizeof(start_code_h264);
313 break;
314 case PIPE_VIDEO_FORMAT_HEVC:
315 if (bufHasStartcode(buf, 0x000001, 24))
316 break;
317
318 buffers[num_buffers] = (void *const)&start_code_h265;
319 sizes[num_buffers++] = sizeof(start_code_h265);
320 break;
321 case PIPE_VIDEO_FORMAT_VC1:
322 if (bufHasStartcode(buf, 0x0000010d, 32) ||
323 bufHasStartcode(buf, 0x0000010c, 32) ||
324 bufHasStartcode(buf, 0x0000010b, 32))
325 break;
326
327 if (context->decoder->profile == PIPE_VIDEO_PROFILE_VC1_ADVANCED) {
328 buffers[num_buffers] = (void *const)&start_code_vc1;
329 sizes[num_buffers++] = sizeof(start_code_vc1);
330 }
331 break;
332 case PIPE_VIDEO_FORMAT_MPEG4:
333 if (bufHasStartcode(buf, 0x000001, 24))
334 break;
335
336 vlVaDecoderFixMPEG4Startcode(context);
337 buffers[num_buffers] = (void *)context->mpeg4.start_code;
338 sizes[num_buffers++] = context->mpeg4.start_code_size;
339 break;
340 case PIPE_VIDEO_FORMAT_JPEG:
341 vlVaGetJpegSliceHeader(context);
342 buffers[num_buffers] = (void *)context->mjpeg.slice_header;
343 sizes[num_buffers++] = context->mjpeg.slice_header_size;
344 break;
345 case PIPE_VIDEO_FORMAT_VP9:
346 if (false == context->desc.base.protected_playback)
347 vlVaDecoderVP9BitstreamHeader(context, buf);
348 break;
349 case PIPE_VIDEO_FORMAT_AV1:
350 break;
351 default:
352 break;
353 }
354 }
355
356 buffers[num_buffers] = buf->data;
357 sizes[num_buffers] = buf->size;
358 ++num_buffers;
359
360 if (format == PIPE_VIDEO_FORMAT_JPEG) {
361 buffers[num_buffers] = (void *const)&eoi_jpeg;
362 sizes[num_buffers++] = sizeof(eoi_jpeg);
363 }
364
365 if (context->needs_begin_frame) {
366 context->decoder->begin_frame(context->decoder, context->target,
367 &context->desc.base);
368 context->needs_begin_frame = false;
369 }
370 context->decoder->decode_bitstream(context->decoder, context->target, &context->desc.base,
371 num_buffers, (const void * const*)buffers, sizes);
372 return VA_STATUS_SUCCESS;
373 }
374
375 static VAStatus
handleVAEncMiscParameterTypeRateControl(vlVaContext * context,VAEncMiscParameterBuffer * misc)376 handleVAEncMiscParameterTypeRateControl(vlVaContext *context, VAEncMiscParameterBuffer *misc)
377 {
378 VAStatus status = VA_STATUS_SUCCESS;
379
380 switch (u_reduce_video_profile(context->templat.profile)) {
381 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
382 status = vlVaHandleVAEncMiscParameterTypeRateControlH264(context, misc);
383 break;
384
385 case PIPE_VIDEO_FORMAT_HEVC:
386 status = vlVaHandleVAEncMiscParameterTypeRateControlHEVC(context, misc);
387 break;
388
389 default:
390 break;
391 }
392
393 return status;
394 }
395
396 static VAStatus
handleVAEncMiscParameterTypeFrameRate(vlVaContext * context,VAEncMiscParameterBuffer * misc)397 handleVAEncMiscParameterTypeFrameRate(vlVaContext *context, VAEncMiscParameterBuffer *misc)
398 {
399 VAStatus status = VA_STATUS_SUCCESS;
400
401 switch (u_reduce_video_profile(context->templat.profile)) {
402 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
403 status = vlVaHandleVAEncMiscParameterTypeFrameRateH264(context, misc);
404 break;
405
406 case PIPE_VIDEO_FORMAT_HEVC:
407 status = vlVaHandleVAEncMiscParameterTypeFrameRateHEVC(context, misc);
408 break;
409
410 default:
411 break;
412 }
413
414 return status;
415 }
416
417 static VAStatus
handleVAEncMiscParameterTypeTemporalLayer(vlVaContext * context,VAEncMiscParameterBuffer * misc)418 handleVAEncMiscParameterTypeTemporalLayer(vlVaContext *context, VAEncMiscParameterBuffer *misc)
419 {
420 VAStatus status = VA_STATUS_SUCCESS;
421
422 switch (u_reduce_video_profile(context->templat.profile)) {
423 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
424 status = vlVaHandleVAEncMiscParameterTypeTemporalLayerH264(context, misc);
425 break;
426
427 case PIPE_VIDEO_FORMAT_HEVC:
428 break;
429
430 default:
431 break;
432 }
433
434 return status;
435 }
436
437 static VAStatus
handleVAEncSequenceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)438 handleVAEncSequenceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
439 {
440 VAStatus status = VA_STATUS_SUCCESS;
441
442 switch (u_reduce_video_profile(context->templat.profile)) {
443 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
444 status = vlVaHandleVAEncSequenceParameterBufferTypeH264(drv, context, buf);
445 break;
446
447 case PIPE_VIDEO_FORMAT_HEVC:
448 status = vlVaHandleVAEncSequenceParameterBufferTypeHEVC(drv, context, buf);
449 break;
450
451 default:
452 break;
453 }
454
455 return status;
456 }
457
458 static VAStatus
handleVAEncMiscParameterBufferType(vlVaContext * context,vlVaBuffer * buf)459 handleVAEncMiscParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
460 {
461 VAStatus vaStatus = VA_STATUS_SUCCESS;
462 VAEncMiscParameterBuffer *misc;
463 misc = buf->data;
464
465 switch (misc->type) {
466 case VAEncMiscParameterTypeRateControl:
467 vaStatus = handleVAEncMiscParameterTypeRateControl(context, misc);
468 break;
469
470 case VAEncMiscParameterTypeFrameRate:
471 vaStatus = handleVAEncMiscParameterTypeFrameRate(context, misc);
472 break;
473
474 case VAEncMiscParameterTypeTemporalLayerStructure:
475 vaStatus = handleVAEncMiscParameterTypeTemporalLayer(context, misc);
476 break;
477
478 default:
479 break;
480 }
481
482 return vaStatus;
483 }
484
485 static VAStatus
handleVAEncPictureParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)486 handleVAEncPictureParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
487 {
488 VAStatus status = VA_STATUS_SUCCESS;
489
490 switch (u_reduce_video_profile(context->templat.profile)) {
491 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
492 status = vlVaHandleVAEncPictureParameterBufferTypeH264(drv, context, buf);
493 break;
494
495 case PIPE_VIDEO_FORMAT_HEVC:
496 status = vlVaHandleVAEncPictureParameterBufferTypeHEVC(drv, context, buf);
497 break;
498
499 default:
500 break;
501 }
502
503 return status;
504 }
505
506 static VAStatus
handleVAEncSliceParameterBufferType(vlVaDriver * drv,vlVaContext * context,vlVaBuffer * buf)507 handleVAEncSliceParameterBufferType(vlVaDriver *drv, vlVaContext *context, vlVaBuffer *buf)
508 {
509 VAStatus status = VA_STATUS_SUCCESS;
510
511 switch (u_reduce_video_profile(context->templat.profile)) {
512 case PIPE_VIDEO_FORMAT_MPEG4_AVC:
513 status = vlVaHandleVAEncSliceParameterBufferTypeH264(drv, context, buf);
514 break;
515
516 case PIPE_VIDEO_FORMAT_HEVC:
517 status = vlVaHandleVAEncSliceParameterBufferTypeHEVC(drv, context, buf);
518 break;
519
520 default:
521 break;
522 }
523
524 return status;
525 }
526
527 static VAStatus
handleVAEncPackedHeaderParameterBufferType(vlVaContext * context,vlVaBuffer * buf)528 handleVAEncPackedHeaderParameterBufferType(vlVaContext *context, vlVaBuffer *buf)
529 {
530 VAStatus status = VA_STATUS_SUCCESS;
531
532 switch (u_reduce_video_profile(context->templat.profile)) {
533 case PIPE_VIDEO_FORMAT_HEVC:
534 break;
535
536 default:
537 return VA_STATUS_ERROR_UNIMPLEMENTED;
538 }
539
540 VAEncPackedHeaderParameterBuffer *param = (VAEncPackedHeaderParameterBuffer *)buf->data;
541 if (param->type == VAEncPackedHeaderSequence)
542 context->packed_header_type = param->type;
543 else
544 status = VA_STATUS_ERROR_UNIMPLEMENTED;
545
546 return status;
547 }
548
549 static VAStatus
handleVAEncPackedHeaderDataBufferType(vlVaContext * context,vlVaBuffer * buf)550 handleVAEncPackedHeaderDataBufferType(vlVaContext *context, vlVaBuffer *buf)
551 {
552 VAStatus status = VA_STATUS_SUCCESS;
553
554 if (context->packed_header_type != VAEncPackedHeaderSequence)
555 return VA_STATUS_ERROR_UNIMPLEMENTED;
556
557 switch (u_reduce_video_profile(context->templat.profile)) {
558 case PIPE_VIDEO_FORMAT_HEVC:
559 status = vlVaHandleVAEncPackedHeaderDataBufferTypeHEVC(context, buf);
560 break;
561
562 default:
563 break;
564 }
565
566 return status;
567 }
568
569 VAStatus
vlVaRenderPicture(VADriverContextP ctx,VAContextID context_id,VABufferID * buffers,int num_buffers)570 vlVaRenderPicture(VADriverContextP ctx, VAContextID context_id, VABufferID *buffers, int num_buffers)
571 {
572 vlVaDriver *drv;
573 vlVaContext *context;
574 VAStatus vaStatus = VA_STATUS_SUCCESS;
575
576 unsigned i;
577 unsigned slice_param_idx = 0;
578
579 if (!ctx)
580 return VA_STATUS_ERROR_INVALID_CONTEXT;
581
582 drv = VL_VA_DRIVER(ctx);
583 if (!drv)
584 return VA_STATUS_ERROR_INVALID_CONTEXT;
585
586 mtx_lock(&drv->mutex);
587 context = handle_table_get(drv->htab, context_id);
588 if (!context) {
589 mtx_unlock(&drv->mutex);
590 return VA_STATUS_ERROR_INVALID_CONTEXT;
591 }
592
593 /* Always process VAProtectedSliceDataBufferType first because it changes the state */
594 for (i = 0; i < num_buffers; ++i) {
595 vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
596 if (!buf) {
597 mtx_unlock(&drv->mutex);
598 return VA_STATUS_ERROR_INVALID_BUFFER;
599 }
600
601 if (buf->type == VAProtectedSliceDataBufferType)
602 handleVAProtectedSliceDataBufferType(context, buf);
603 }
604
605 for (i = 0; i < num_buffers && vaStatus == VA_STATUS_SUCCESS; ++i) {
606 vlVaBuffer *buf = handle_table_get(drv->htab, buffers[i]);
607
608 switch (buf->type) {
609 case VAPictureParameterBufferType:
610 vaStatus = handlePictureParameterBuffer(drv, context, buf);
611 break;
612
613 case VAIQMatrixBufferType:
614 handleIQMatrixBuffer(context, buf);
615 break;
616
617 case VASliceParameterBufferType:
618 handleSliceParameterBuffer(context, buf, slice_param_idx++);
619 break;
620
621 case VASliceDataBufferType:
622 vaStatus = handleVASliceDataBufferType(context, buf);
623 break;
624
625 case VAProcPipelineParameterBufferType:
626 vaStatus = vlVaHandleVAProcPipelineParameterBufferType(drv, context, buf);
627 break;
628
629 case VAEncSequenceParameterBufferType:
630 vaStatus = handleVAEncSequenceParameterBufferType(drv, context, buf);
631 break;
632
633 case VAEncMiscParameterBufferType:
634 vaStatus = handleVAEncMiscParameterBufferType(context, buf);
635 break;
636
637 case VAEncPictureParameterBufferType:
638 vaStatus = handleVAEncPictureParameterBufferType(drv, context, buf);
639 break;
640
641 case VAEncSliceParameterBufferType:
642 vaStatus = handleVAEncSliceParameterBufferType(drv, context, buf);
643 break;
644
645 case VAHuffmanTableBufferType:
646 vlVaHandleHuffmanTableBufferType(context, buf);
647 break;
648
649 case VAEncPackedHeaderParameterBufferType:
650 handleVAEncPackedHeaderParameterBufferType(context, buf);
651 break;
652 case VAEncPackedHeaderDataBufferType:
653 handleVAEncPackedHeaderDataBufferType(context, buf);
654 break;
655
656 default:
657 break;
658 }
659 }
660 mtx_unlock(&drv->mutex);
661
662 return vaStatus;
663 }
664
665 VAStatus
vlVaEndPicture(VADriverContextP ctx,VAContextID context_id)666 vlVaEndPicture(VADriverContextP ctx, VAContextID context_id)
667 {
668 vlVaDriver *drv;
669 vlVaContext *context;
670 vlVaBuffer *coded_buf;
671 vlVaSurface *surf;
672 void *feedback;
673 struct pipe_screen *screen;
674 bool supported;
675 bool realloc = false;
676 enum pipe_format format;
677
678 if (!ctx)
679 return VA_STATUS_ERROR_INVALID_CONTEXT;
680
681 drv = VL_VA_DRIVER(ctx);
682 if (!drv)
683 return VA_STATUS_ERROR_INVALID_CONTEXT;
684
685 mtx_lock(&drv->mutex);
686 context = handle_table_get(drv->htab, context_id);
687 mtx_unlock(&drv->mutex);
688 if (!context)
689 return VA_STATUS_ERROR_INVALID_CONTEXT;
690
691 if (!context->decoder) {
692 if (context->templat.profile != PIPE_VIDEO_PROFILE_UNKNOWN)
693 return VA_STATUS_ERROR_INVALID_CONTEXT;
694
695 /* VPP */
696 return VA_STATUS_SUCCESS;
697 }
698
699 mtx_lock(&drv->mutex);
700 surf = handle_table_get(drv->htab, context->target_id);
701 context->mpeg4.frame_num++;
702
703 screen = context->decoder->context->screen;
704 supported = screen->get_video_param(screen, context->decoder->profile,
705 context->decoder->entrypoint,
706 surf->buffer->interlaced ?
707 PIPE_VIDEO_CAP_SUPPORTS_INTERLACED :
708 PIPE_VIDEO_CAP_SUPPORTS_PROGRESSIVE);
709
710 if (!supported) {
711 surf->templat.interlaced = screen->get_video_param(screen,
712 context->decoder->profile,
713 context->decoder->entrypoint,
714 PIPE_VIDEO_CAP_PREFERS_INTERLACED);
715 realloc = true;
716 }
717
718 format = screen->get_video_param(screen, context->decoder->profile,
719 context->decoder->entrypoint,
720 PIPE_VIDEO_CAP_PREFERED_FORMAT);
721
722 if (surf->buffer->buffer_format != format &&
723 surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
724 /* check originally as NV12 only */
725 surf->templat.buffer_format = format;
726 realloc = true;
727 }
728
729 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_JPEG &&
730 surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
731 if (context->mjpeg.sampling_factor == 0x211111 ||
732 context->mjpeg.sampling_factor == 0x221212) {
733 surf->templat.buffer_format = PIPE_FORMAT_YUYV;
734 realloc = true;
735 } else if (context->mjpeg.sampling_factor != 0x221111) {
736 /* Not NV12 either */
737 mtx_unlock(&drv->mutex);
738 return VA_STATUS_ERROR_INVALID_SURFACE;
739 }
740 }
741
742 if ((bool)(surf->templat.bind & PIPE_BIND_PROTECTED) != context->desc.base.protected_playback) {
743 if (context->desc.base.protected_playback) {
744 surf->templat.bind |= PIPE_BIND_PROTECTED;
745 }
746 else
747 surf->templat.bind &= ~PIPE_BIND_PROTECTED;
748 realloc = true;
749 }
750
751 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_AV1 &&
752 surf->buffer->buffer_format == PIPE_FORMAT_NV12) {
753 if (context->desc.av1.picture_parameter.bit_depth_idx == 1) {
754 surf->templat.buffer_format = PIPE_FORMAT_P010;
755 realloc = true;
756 }
757 }
758
759 if (realloc) {
760 struct pipe_video_buffer *old_buf = surf->buffer;
761
762 if (vlVaHandleSurfaceAllocate(drv, surf, &surf->templat, NULL, 0) != VA_STATUS_SUCCESS) {
763 mtx_unlock(&drv->mutex);
764 return VA_STATUS_ERROR_ALLOCATION_FAILED;
765 }
766
767 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
768 if (old_buf->interlaced) {
769 struct u_rect src_rect, dst_rect;
770
771 dst_rect.x0 = src_rect.x0 = 0;
772 dst_rect.y0 = src_rect.y0 = 0;
773 dst_rect.x1 = src_rect.x1 = surf->templat.width;
774 dst_rect.y1 = src_rect.y1 = surf->templat.height;
775 vl_compositor_yuv_deint_full(&drv->cstate, &drv->compositor,
776 old_buf, surf->buffer,
777 &src_rect, &dst_rect, VL_COMPOSITOR_WEAVE);
778 } else {
779 /* Can't convert from progressive to interlaced yet */
780 mtx_unlock(&drv->mutex);
781 return VA_STATUS_ERROR_INVALID_SURFACE;
782 }
783 }
784
785 old_buf->destroy(old_buf);
786 context->target = surf->buffer;
787 }
788
789 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE) {
790 coded_buf = context->coded_buf;
791 if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
792 getEncParamPresetH264(context);
793 context->desc.h264enc.frame_num_cnt++;
794 } else if (u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
795 getEncParamPresetH265(context);
796
797 context->desc.base.input_format = surf->buffer->buffer_format;
798 context->desc.base.output_format = surf->encoder_format;
799
800 context->decoder->begin_frame(context->decoder, context->target, &context->desc.base);
801 context->decoder->encode_bitstream(context->decoder, context->target,
802 coded_buf->derived_surface.resource, &feedback);
803 surf->feedback = feedback;
804 surf->coded_buf = coded_buf;
805 }
806
807 context->decoder->end_frame(context->decoder, context->target, &context->desc.base);
808 if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
809 u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_MPEG4_AVC) {
810 int idr_period = context->desc.h264enc.gop_size / context->gop_coeff;
811 int p_remain_in_idr = idr_period - context->desc.h264enc.frame_num;
812 surf->frame_num_cnt = context->desc.h264enc.frame_num_cnt;
813 surf->force_flushed = false;
814 if (context->first_single_submitted) {
815 context->decoder->flush(context->decoder);
816 context->first_single_submitted = false;
817 surf->force_flushed = true;
818 }
819 if (p_remain_in_idr == 1) {
820 if ((context->desc.h264enc.frame_num_cnt % 2) != 0) {
821 context->decoder->flush(context->decoder);
822 context->first_single_submitted = true;
823 }
824 else
825 context->first_single_submitted = false;
826 surf->force_flushed = true;
827 }
828 if (!context->desc.h264enc.not_referenced)
829 context->desc.h264enc.frame_num++;
830 } else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_ENCODE &&
831 u_reduce_video_profile(context->templat.profile) == PIPE_VIDEO_FORMAT_HEVC)
832 context->desc.h265enc.frame_num++;
833 else if (context->decoder->entrypoint == PIPE_VIDEO_ENTRYPOINT_PROCESSING &&
834 context->vpp_needs_flush_on_endpic) {
835 context->decoder->flush(context->decoder);
836 context->vpp_needs_flush_on_endpic = false;
837 }
838
839 mtx_unlock(&drv->mutex);
840 return VA_STATUS_SUCCESS;
841 }
842