1 /*
2 * Ut Video decoder
3 * Copyright (c) 2011 Konstantin Shishkov
4 *
5 * This file is part of FFmpeg.
6 *
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20 */
21
22 /**
23 * @file
24 * Ut Video decoder
25 */
26
27 #include <inttypes.h>
28 #include <stdlib.h>
29
30 #define CACHED_BITSTREAM_READER !ARCH_X86_32
31 #define UNCHECKED_BITSTREAM_READER 1
32
33 #include "libavutil/intreadwrite.h"
34 #include "libavutil/pixdesc.h"
35 #include "avcodec.h"
36 #include "bswapdsp.h"
37 #include "bytestream.h"
38 #include "get_bits.h"
39 #include "internal.h"
40 #include "thread.h"
41 #include "utvideo.h"
42
build_huff10(const uint8_t * src,VLC * vlc,int * fsym)43 static int build_huff10(const uint8_t *src, VLC *vlc, int *fsym)
44 {
45 int i;
46 HuffEntry he[1024];
47 int last;
48 uint32_t codes[1024];
49 uint8_t bits[1024];
50 uint16_t syms[1024];
51 uint32_t code;
52
53 *fsym = -1;
54 for (i = 0; i < 1024; i++) {
55 he[i].sym = i;
56 he[i].len = *src++;
57 }
58 qsort(he, 1024, sizeof(*he), ff_ut10_huff_cmp_len);
59
60 if (!he[0].len) {
61 *fsym = he[0].sym;
62 return 0;
63 }
64
65 last = 1023;
66 while (he[last].len == 255 && last)
67 last--;
68
69 if (he[last].len > 32) {
70 return -1;
71 }
72
73 code = 1;
74 for (i = last; i >= 0; i--) {
75 codes[i] = code >> (32 - he[i].len);
76 bits[i] = he[i].len;
77 syms[i] = he[i].sym;
78 code += 0x80000000u >> (he[i].len - 1);
79 }
80 #define VLC_BITS 11
81 return ff_init_vlc_sparse(vlc, VLC_BITS, last + 1,
82 bits, sizeof(*bits), sizeof(*bits),
83 codes, sizeof(*codes), sizeof(*codes),
84 syms, sizeof(*syms), sizeof(*syms), 0);
85 }
86
build_huff(const uint8_t * src,VLC * vlc,int * fsym)87 static int build_huff(const uint8_t *src, VLC *vlc, int *fsym)
88 {
89 int i;
90 HuffEntry he[256];
91 int last;
92 uint32_t codes[256];
93 uint8_t bits[256];
94 uint8_t syms[256];
95 uint32_t code;
96
97 *fsym = -1;
98 for (i = 0; i < 256; i++) {
99 he[i].sym = i;
100 he[i].len = *src++;
101 }
102 qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
103
104 if (!he[0].len) {
105 *fsym = he[0].sym;
106 return 0;
107 }
108
109 last = 255;
110 while (he[last].len == 255 && last)
111 last--;
112
113 if (he[last].len > 32)
114 return -1;
115
116 code = 1;
117 for (i = last; i >= 0; i--) {
118 codes[i] = code >> (32 - he[i].len);
119 bits[i] = he[i].len;
120 syms[i] = he[i].sym;
121 code += 0x80000000u >> (he[i].len - 1);
122 }
123
124 return ff_init_vlc_sparse(vlc, VLC_BITS, last + 1,
125 bits, sizeof(*bits), sizeof(*bits),
126 codes, sizeof(*codes), sizeof(*codes),
127 syms, sizeof(*syms), sizeof(*syms), 0);
128 }
129
decode_plane10(UtvideoContext * c,int plane_no,uint16_t * dst,ptrdiff_t stride,int width,int height,const uint8_t * src,const uint8_t * huff,int use_pred)130 static int decode_plane10(UtvideoContext *c, int plane_no,
131 uint16_t *dst, ptrdiff_t stride,
132 int width, int height,
133 const uint8_t *src, const uint8_t *huff,
134 int use_pred)
135 {
136 int i, j, slice, pix, ret;
137 int sstart, send;
138 VLC vlc;
139 GetBitContext gb;
140 int prev, fsym;
141
142 if ((ret = build_huff10(huff, &vlc, &fsym)) < 0) {
143 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
144 return ret;
145 }
146 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
147 send = 0;
148 for (slice = 0; slice < c->slices; slice++) {
149 uint16_t *dest;
150
151 sstart = send;
152 send = (height * (slice + 1) / c->slices);
153 dest = dst + sstart * stride;
154
155 prev = 0x200;
156 for (j = sstart; j < send; j++) {
157 for (i = 0; i < width; i++) {
158 pix = fsym;
159 if (use_pred) {
160 prev += pix;
161 prev &= 0x3FF;
162 pix = prev;
163 }
164 dest[i] = pix;
165 }
166 dest += stride;
167 }
168 }
169 return 0;
170 }
171
172 send = 0;
173 for (slice = 0; slice < c->slices; slice++) {
174 uint16_t *dest;
175 int slice_data_start, slice_data_end, slice_size;
176
177 sstart = send;
178 send = (height * (slice + 1) / c->slices);
179 dest = dst + sstart * stride;
180
181 // slice offset and size validation was done earlier
182 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
183 slice_data_end = AV_RL32(src + slice * 4);
184 slice_size = slice_data_end - slice_data_start;
185
186 if (!slice_size) {
187 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
188 "yet a slice has a length of zero.\n");
189 goto fail;
190 }
191
192 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
193 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
194 (uint32_t *)(src + slice_data_start + c->slices * 4),
195 (slice_data_end - slice_data_start + 3) >> 2);
196 init_get_bits(&gb, c->slice_bits, slice_size * 8);
197
198 prev = 0x200;
199 for (j = sstart; j < send; j++) {
200 for (i = 0; i < width; i++) {
201 pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3);
202 if (pix < 0) {
203 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
204 goto fail;
205 }
206 if (use_pred) {
207 prev += pix;
208 prev &= 0x3FF;
209 pix = prev;
210 }
211 dest[i] = pix;
212 }
213 dest += stride;
214 if (get_bits_left(&gb) < 0) {
215 av_log(c->avctx, AV_LOG_ERROR,
216 "Slice decoding ran out of bits\n");
217 goto fail;
218 }
219 }
220 if (get_bits_left(&gb) > 32)
221 av_log(c->avctx, AV_LOG_WARNING,
222 "%d bits left after decoding slice\n", get_bits_left(&gb));
223 }
224
225 ff_free_vlc(&vlc);
226
227 return 0;
228 fail:
229 ff_free_vlc(&vlc);
230 return AVERROR_INVALIDDATA;
231 }
232
compute_cmask(int plane_no,int interlaced,enum AVPixelFormat pix_fmt)233 static int compute_cmask(int plane_no, int interlaced, enum AVPixelFormat pix_fmt)
234 {
235 const int is_luma = (pix_fmt == AV_PIX_FMT_YUV420P) && !plane_no;
236
237 if (interlaced)
238 return ~(1 + 2 * is_luma);
239
240 return ~is_luma;
241 }
242
decode_plane(UtvideoContext * c,int plane_no,uint8_t * dst,ptrdiff_t stride,int width,int height,const uint8_t * src,int use_pred)243 static int decode_plane(UtvideoContext *c, int plane_no,
244 uint8_t *dst, ptrdiff_t stride,
245 int width, int height,
246 const uint8_t *src, int use_pred)
247 {
248 int i, j, slice, pix;
249 int sstart, send;
250 VLC vlc;
251 GetBitContext gb;
252 int ret, prev, fsym;
253 const int cmask = compute_cmask(plane_no, c->interlaced, c->avctx->pix_fmt);
254
255 if (c->pack) {
256 send = 0;
257 for (slice = 0; slice < c->slices; slice++) {
258 GetBitContext cbit, pbit;
259 uint8_t *dest, *p;
260
261 ret = init_get_bits8_le(&cbit, c->control_stream[plane_no][slice], c->control_stream_size[plane_no][slice]);
262 if (ret < 0)
263 return ret;
264
265 ret = init_get_bits8_le(&pbit, c->packed_stream[plane_no][slice], c->packed_stream_size[plane_no][slice]);
266 if (ret < 0)
267 return ret;
268
269 sstart = send;
270 send = (height * (slice + 1) / c->slices) & cmask;
271 dest = dst + sstart * stride;
272
273 if (3 * ((dst + send * stride - dest + 7)/8) > get_bits_left(&cbit))
274 return AVERROR_INVALIDDATA;
275
276 for (p = dest; p < dst + send * stride; p += 8) {
277 int bits = get_bits_le(&cbit, 3);
278
279 if (bits == 0) {
280 *(uint64_t *) p = 0;
281 } else {
282 uint32_t sub = 0x80 >> (8 - (bits + 1)), add;
283 int k;
284
285 if ((bits + 1) * 8 > get_bits_left(&pbit))
286 return AVERROR_INVALIDDATA;
287
288 for (k = 0; k < 8; k++) {
289
290 p[k] = get_bits_le(&pbit, bits + 1);
291 add = (~p[k] & sub) << (8 - bits);
292 p[k] -= sub;
293 p[k] += add;
294 }
295 }
296 }
297 }
298
299 return 0;
300 }
301
302 if (build_huff(src, &vlc, &fsym)) {
303 av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
304 return AVERROR_INVALIDDATA;
305 }
306 if (fsym >= 0) { // build_huff reported a symbol to fill slices with
307 send = 0;
308 for (slice = 0; slice < c->slices; slice++) {
309 uint8_t *dest;
310
311 sstart = send;
312 send = (height * (slice + 1) / c->slices) & cmask;
313 dest = dst + sstart * stride;
314
315 prev = 0x80;
316 for (j = sstart; j < send; j++) {
317 for (i = 0; i < width; i++) {
318 pix = fsym;
319 if (use_pred) {
320 prev += (unsigned)pix;
321 pix = prev;
322 }
323 dest[i] = pix;
324 }
325 dest += stride;
326 }
327 }
328 return 0;
329 }
330
331 src += 256;
332
333 send = 0;
334 for (slice = 0; slice < c->slices; slice++) {
335 uint8_t *dest;
336 int slice_data_start, slice_data_end, slice_size;
337
338 sstart = send;
339 send = (height * (slice + 1) / c->slices) & cmask;
340 dest = dst + sstart * stride;
341
342 // slice offset and size validation was done earlier
343 slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0;
344 slice_data_end = AV_RL32(src + slice * 4);
345 slice_size = slice_data_end - slice_data_start;
346
347 if (!slice_size) {
348 av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol "
349 "yet a slice has a length of zero.\n");
350 goto fail;
351 }
352
353 memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
354 c->bdsp.bswap_buf((uint32_t *) c->slice_bits,
355 (uint32_t *)(src + slice_data_start + c->slices * 4),
356 (slice_data_end - slice_data_start + 3) >> 2);
357 init_get_bits(&gb, c->slice_bits, slice_size * 8);
358
359 prev = 0x80;
360 for (j = sstart; j < send; j++) {
361 for (i = 0; i < width; i++) {
362 pix = get_vlc2(&gb, vlc.table, VLC_BITS, 3);
363 if (pix < 0) {
364 av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n");
365 goto fail;
366 }
367 if (use_pred) {
368 prev += pix;
369 pix = prev;
370 }
371 dest[i] = pix;
372 }
373 if (get_bits_left(&gb) < 0) {
374 av_log(c->avctx, AV_LOG_ERROR,
375 "Slice decoding ran out of bits\n");
376 goto fail;
377 }
378 dest += stride;
379 }
380 if (get_bits_left(&gb) > 32)
381 av_log(c->avctx, AV_LOG_WARNING,
382 "%d bits left after decoding slice\n", get_bits_left(&gb));
383 }
384
385 ff_free_vlc(&vlc);
386
387 return 0;
388 fail:
389 ff_free_vlc(&vlc);
390 return AVERROR_INVALIDDATA;
391 }
392
393 #undef A
394 #undef B
395 #undef C
396
restore_median_planar(UtvideoContext * c,uint8_t * src,ptrdiff_t stride,int width,int height,int slices,int rmode)397 static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
398 int width, int height, int slices, int rmode)
399 {
400 int i, j, slice;
401 int A, B, C;
402 uint8_t *bsrc;
403 int slice_start, slice_height;
404 const int cmask = ~rmode;
405
406 for (slice = 0; slice < slices; slice++) {
407 slice_start = ((slice * height) / slices) & cmask;
408 slice_height = ((((slice + 1) * height) / slices) & cmask) -
409 slice_start;
410
411 if (!slice_height)
412 continue;
413 bsrc = src + slice_start * stride;
414
415 // first line - left neighbour prediction
416 bsrc[0] += 0x80;
417 c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
418 bsrc += stride;
419 if (slice_height <= 1)
420 continue;
421 // second line - first element has top prediction, the rest uses median
422 C = bsrc[-stride];
423 bsrc[0] += C;
424 A = bsrc[0];
425 for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
426 B = bsrc[i - stride];
427 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
428 C = B;
429 A = bsrc[i];
430 }
431 if (width > 16)
432 c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride + 16,
433 bsrc + 16, width - 16, &A, &B);
434
435 bsrc += stride;
436 // the rest of lines use continuous median prediction
437 for (j = 2; j < slice_height; j++) {
438 c->llviddsp.add_median_pred(bsrc, bsrc - stride,
439 bsrc, width, &A, &B);
440 bsrc += stride;
441 }
442 }
443 }
444
445 /* UtVideo interlaced mode treats every two lines as a single one,
446 * so restoring function should take care of possible padding between
447 * two parts of the same "line".
448 */
restore_median_planar_il(UtvideoContext * c,uint8_t * src,ptrdiff_t stride,int width,int height,int slices,int rmode)449 static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
450 int width, int height, int slices, int rmode)
451 {
452 int i, j, slice;
453 int A, B, C;
454 uint8_t *bsrc;
455 int slice_start, slice_height;
456 const int cmask = ~(rmode ? 3 : 1);
457 const ptrdiff_t stride2 = stride << 1;
458
459 for (slice = 0; slice < slices; slice++) {
460 slice_start = ((slice * height) / slices) & cmask;
461 slice_height = ((((slice + 1) * height) / slices) & cmask) -
462 slice_start;
463 slice_height >>= 1;
464 if (!slice_height)
465 continue;
466
467 bsrc = src + slice_start * stride;
468
469 // first line - left neighbour prediction
470 bsrc[0] += 0x80;
471 A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
472 c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
473 bsrc += stride2;
474 if (slice_height <= 1)
475 continue;
476 // second line - first element has top prediction, the rest uses median
477 C = bsrc[-stride2];
478 bsrc[0] += C;
479 A = bsrc[0];
480 for (i = 1; i < FFMIN(width, 16); i++) { /* scalar loop (DSP need align 16) */
481 B = bsrc[i - stride2];
482 bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C));
483 C = B;
484 A = bsrc[i];
485 }
486 if (width > 16)
487 c->llviddsp.add_median_pred(bsrc + 16, bsrc - stride2 + 16,
488 bsrc + 16, width - 16, &A, &B);
489
490 c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
491 bsrc + stride, width, &A, &B);
492 bsrc += stride2;
493 // the rest of lines use continuous median prediction
494 for (j = 2; j < slice_height; j++) {
495 c->llviddsp.add_median_pred(bsrc, bsrc - stride2,
496 bsrc, width, &A, &B);
497 c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride,
498 bsrc + stride, width, &A, &B);
499 bsrc += stride2;
500 }
501 }
502 }
503
restore_gradient_planar(UtvideoContext * c,uint8_t * src,ptrdiff_t stride,int width,int height,int slices,int rmode)504 static void restore_gradient_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
505 int width, int height, int slices, int rmode)
506 {
507 int i, j, slice;
508 int A, B, C;
509 uint8_t *bsrc;
510 int slice_start, slice_height;
511 const int cmask = ~rmode;
512 int min_width = FFMIN(width, 32);
513
514 for (slice = 0; slice < slices; slice++) {
515 slice_start = ((slice * height) / slices) & cmask;
516 slice_height = ((((slice + 1) * height) / slices) & cmask) -
517 slice_start;
518
519 if (!slice_height)
520 continue;
521 bsrc = src + slice_start * stride;
522
523 // first line - left neighbour prediction
524 bsrc[0] += 0x80;
525 c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
526 bsrc += stride;
527 if (slice_height <= 1)
528 continue;
529 for (j = 1; j < slice_height; j++) {
530 // second line - first element has top prediction, the rest uses gradient
531 bsrc[0] = (bsrc[0] + bsrc[-stride]) & 0xFF;
532 for (i = 1; i < min_width; i++) { /* dsp need align 32 */
533 A = bsrc[i - stride];
534 B = bsrc[i - (stride + 1)];
535 C = bsrc[i - 1];
536 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
537 }
538 if (width > 32)
539 c->llviddsp.add_gradient_pred(bsrc + 32, stride, width - 32);
540 bsrc += stride;
541 }
542 }
543 }
544
restore_gradient_planar_il(UtvideoContext * c,uint8_t * src,ptrdiff_t stride,int width,int height,int slices,int rmode)545 static void restore_gradient_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride,
546 int width, int height, int slices, int rmode)
547 {
548 int i, j, slice;
549 int A, B, C;
550 uint8_t *bsrc;
551 int slice_start, slice_height;
552 const int cmask = ~(rmode ? 3 : 1);
553 const ptrdiff_t stride2 = stride << 1;
554 int min_width = FFMIN(width, 32);
555
556 for (slice = 0; slice < slices; slice++) {
557 slice_start = ((slice * height) / slices) & cmask;
558 slice_height = ((((slice + 1) * height) / slices) & cmask) -
559 slice_start;
560 slice_height >>= 1;
561 if (!slice_height)
562 continue;
563
564 bsrc = src + slice_start * stride;
565
566 // first line - left neighbour prediction
567 bsrc[0] += 0x80;
568 A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0);
569 c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A);
570 bsrc += stride2;
571 if (slice_height <= 1)
572 continue;
573 for (j = 1; j < slice_height; j++) {
574 // second line - first element has top prediction, the rest uses gradient
575 bsrc[0] = (bsrc[0] + bsrc[-stride2]) & 0xFF;
576 for (i = 1; i < min_width; i++) { /* dsp need align 32 */
577 A = bsrc[i - stride2];
578 B = bsrc[i - (stride2 + 1)];
579 C = bsrc[i - 1];
580 bsrc[i] = (A - B + C + bsrc[i]) & 0xFF;
581 }
582 if (width > 32)
583 c->llviddsp.add_gradient_pred(bsrc + 32, stride2, width - 32);
584
585 A = bsrc[-stride];
586 B = bsrc[-(1 + stride + stride - width)];
587 C = bsrc[width - 1];
588 bsrc[stride] = (A - B + C + bsrc[stride]) & 0xFF;
589 for (i = 1; i < width; i++) {
590 A = bsrc[i - stride];
591 B = bsrc[i - (1 + stride)];
592 C = bsrc[i - 1 + stride];
593 bsrc[i + stride] = (A - B + C + bsrc[i + stride]) & 0xFF;
594 }
595 bsrc += stride2;
596 }
597 }
598 }
599
decode_frame(AVCodecContext * avctx,void * data,int * got_frame,AVPacket * avpkt)600 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
601 AVPacket *avpkt)
602 {
603 const uint8_t *buf = avpkt->data;
604 int buf_size = avpkt->size;
605 UtvideoContext *c = avctx->priv_data;
606 int i, j;
607 const uint8_t *plane_start[5];
608 int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
609 int ret;
610 GetByteContext gb;
611 ThreadFrame frame = { .f = data };
612
613 if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
614 return ret;
615
616 /* parse plane structure to get frame flags and validate slice offsets */
617 bytestream2_init(&gb, buf, buf_size);
618
619 if (c->pack) {
620 const uint8_t *packed_stream;
621 const uint8_t *control_stream;
622 GetByteContext pb;
623 uint32_t nb_cbs;
624 int left;
625
626 c->frame_info = PRED_GRADIENT << 8;
627
628 if (bytestream2_get_byte(&gb) != 1)
629 return AVERROR_INVALIDDATA;
630 bytestream2_skip(&gb, 3);
631 c->offset = bytestream2_get_le32(&gb);
632
633 if (buf_size <= c->offset + 8LL)
634 return AVERROR_INVALIDDATA;
635
636 bytestream2_init(&pb, buf + 8 + c->offset, buf_size - 8 - c->offset);
637
638 nb_cbs = bytestream2_get_le32(&pb);
639 if (nb_cbs > c->offset)
640 return AVERROR_INVALIDDATA;
641
642 packed_stream = buf + 8;
643 control_stream = packed_stream + (c->offset - nb_cbs);
644 left = control_stream - packed_stream;
645
646 for (i = 0; i < c->planes; i++) {
647 for (j = 0; j < c->slices; j++) {
648 c->packed_stream[i][j] = packed_stream;
649 c->packed_stream_size[i][j] = bytestream2_get_le32(&pb);
650 if (c->packed_stream_size[i][j] > left)
651 return AVERROR_INVALIDDATA;
652 left -= c->packed_stream_size[i][j];
653 packed_stream += c->packed_stream_size[i][j];
654 }
655 }
656
657 left = buf + buf_size - control_stream;
658
659 for (i = 0; i < c->planes; i++) {
660 for (j = 0; j < c->slices; j++) {
661 c->control_stream[i][j] = control_stream;
662 c->control_stream_size[i][j] = bytestream2_get_le32(&pb);
663 if (c->control_stream_size[i][j] > left)
664 return AVERROR_INVALIDDATA;
665 left -= c->control_stream_size[i][j];
666 control_stream += c->control_stream_size[i][j];
667 }
668 }
669 } else if (c->pro) {
670 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
671 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
672 return AVERROR_INVALIDDATA;
673 }
674 c->frame_info = bytestream2_get_le32u(&gb);
675 c->slices = ((c->frame_info >> 16) & 0xff) + 1;
676 for (i = 0; i < c->planes; i++) {
677 plane_start[i] = gb.buffer;
678 if (bytestream2_get_bytes_left(&gb) < 1024 + 4 * c->slices) {
679 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
680 return AVERROR_INVALIDDATA;
681 }
682 slice_start = 0;
683 slice_end = 0;
684 for (j = 0; j < c->slices; j++) {
685 slice_end = bytestream2_get_le32u(&gb);
686 if (slice_end < 0 || slice_end < slice_start ||
687 bytestream2_get_bytes_left(&gb) < slice_end + 1024LL) {
688 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
689 return AVERROR_INVALIDDATA;
690 }
691 slice_size = slice_end - slice_start;
692 slice_start = slice_end;
693 max_slice_size = FFMAX(max_slice_size, slice_size);
694 }
695 plane_size = slice_end;
696 bytestream2_skipu(&gb, plane_size);
697 bytestream2_skipu(&gb, 1024);
698 }
699 plane_start[c->planes] = gb.buffer;
700 } else {
701 for (i = 0; i < c->planes; i++) {
702 plane_start[i] = gb.buffer;
703 if (bytestream2_get_bytes_left(&gb) < 256 + 4 * c->slices) {
704 av_log(avctx, AV_LOG_ERROR, "Insufficient data for a plane\n");
705 return AVERROR_INVALIDDATA;
706 }
707 bytestream2_skipu(&gb, 256);
708 slice_start = 0;
709 slice_end = 0;
710 for (j = 0; j < c->slices; j++) {
711 slice_end = bytestream2_get_le32u(&gb);
712 if (slice_end < 0 || slice_end < slice_start ||
713 bytestream2_get_bytes_left(&gb) < slice_end) {
714 av_log(avctx, AV_LOG_ERROR, "Incorrect slice size\n");
715 return AVERROR_INVALIDDATA;
716 }
717 slice_size = slice_end - slice_start;
718 slice_start = slice_end;
719 max_slice_size = FFMAX(max_slice_size, slice_size);
720 }
721 plane_size = slice_end;
722 bytestream2_skipu(&gb, plane_size);
723 }
724 plane_start[c->planes] = gb.buffer;
725 if (bytestream2_get_bytes_left(&gb) < c->frame_info_size) {
726 av_log(avctx, AV_LOG_ERROR, "Not enough data for frame information\n");
727 return AVERROR_INVALIDDATA;
728 }
729 c->frame_info = bytestream2_get_le32u(&gb);
730 }
731 av_log(avctx, AV_LOG_DEBUG, "frame information flags %"PRIX32"\n",
732 c->frame_info);
733
734 c->frame_pred = (c->frame_info >> 8) & 3;
735
736 max_slice_size += 4*avctx->width;
737
738 if (!c->pack) {
739 av_fast_malloc(&c->slice_bits, &c->slice_bits_size,
740 max_slice_size + AV_INPUT_BUFFER_PADDING_SIZE);
741
742 if (!c->slice_bits) {
743 av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer\n");
744 return AVERROR(ENOMEM);
745 }
746 }
747
748 switch (c->avctx->pix_fmt) {
749 case AV_PIX_FMT_GBRP:
750 case AV_PIX_FMT_GBRAP:
751 for (i = 0; i < c->planes; i++) {
752 ret = decode_plane(c, i, frame.f->data[i],
753 frame.f->linesize[i], avctx->width,
754 avctx->height, plane_start[i],
755 c->frame_pred == PRED_LEFT);
756 if (ret)
757 return ret;
758 if (c->frame_pred == PRED_MEDIAN) {
759 if (!c->interlaced) {
760 restore_median_planar(c, frame.f->data[i],
761 frame.f->linesize[i], avctx->width,
762 avctx->height, c->slices, 0);
763 } else {
764 restore_median_planar_il(c, frame.f->data[i],
765 frame.f->linesize[i],
766 avctx->width, avctx->height, c->slices,
767 0);
768 }
769 } else if (c->frame_pred == PRED_GRADIENT) {
770 if (!c->interlaced) {
771 restore_gradient_planar(c, frame.f->data[i],
772 frame.f->linesize[i], avctx->width,
773 avctx->height, c->slices, 0);
774 } else {
775 restore_gradient_planar_il(c, frame.f->data[i],
776 frame.f->linesize[i],
777 avctx->width, avctx->height, c->slices,
778 0);
779 }
780 }
781 }
782 c->utdsp.restore_rgb_planes(frame.f->data[2], frame.f->data[0], frame.f->data[1],
783 frame.f->linesize[2], frame.f->linesize[0], frame.f->linesize[1],
784 avctx->width, avctx->height);
785 break;
786 case AV_PIX_FMT_GBRAP10:
787 case AV_PIX_FMT_GBRP10:
788 for (i = 0; i < c->planes; i++) {
789 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i],
790 frame.f->linesize[i] / 2, avctx->width,
791 avctx->height, plane_start[i],
792 plane_start[i + 1] - 1024,
793 c->frame_pred == PRED_LEFT);
794 if (ret)
795 return ret;
796 }
797 c->utdsp.restore_rgb_planes10((uint16_t *)frame.f->data[2], (uint16_t *)frame.f->data[0], (uint16_t *)frame.f->data[1],
798 frame.f->linesize[2] / 2, frame.f->linesize[0] / 2, frame.f->linesize[1] / 2,
799 avctx->width, avctx->height);
800 break;
801 case AV_PIX_FMT_YUV420P:
802 for (i = 0; i < 3; i++) {
803 ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
804 avctx->width >> !!i, avctx->height >> !!i,
805 plane_start[i], c->frame_pred == PRED_LEFT);
806 if (ret)
807 return ret;
808 if (c->frame_pred == PRED_MEDIAN) {
809 if (!c->interlaced) {
810 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
811 avctx->width >> !!i, avctx->height >> !!i,
812 c->slices, !i);
813 } else {
814 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
815 avctx->width >> !!i,
816 avctx->height >> !!i,
817 c->slices, !i);
818 }
819 } else if (c->frame_pred == PRED_GRADIENT) {
820 if (!c->interlaced) {
821 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
822 avctx->width >> !!i, avctx->height >> !!i,
823 c->slices, !i);
824 } else {
825 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
826 avctx->width >> !!i,
827 avctx->height >> !!i,
828 c->slices, !i);
829 }
830 }
831 }
832 break;
833 case AV_PIX_FMT_YUV422P:
834 for (i = 0; i < 3; i++) {
835 ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
836 avctx->width >> !!i, avctx->height,
837 plane_start[i], c->frame_pred == PRED_LEFT);
838 if (ret)
839 return ret;
840 if (c->frame_pred == PRED_MEDIAN) {
841 if (!c->interlaced) {
842 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
843 avctx->width >> !!i, avctx->height,
844 c->slices, 0);
845 } else {
846 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
847 avctx->width >> !!i, avctx->height,
848 c->slices, 0);
849 }
850 } else if (c->frame_pred == PRED_GRADIENT) {
851 if (!c->interlaced) {
852 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
853 avctx->width >> !!i, avctx->height,
854 c->slices, 0);
855 } else {
856 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
857 avctx->width >> !!i, avctx->height,
858 c->slices, 0);
859 }
860 }
861 }
862 break;
863 case AV_PIX_FMT_YUV444P:
864 for (i = 0; i < 3; i++) {
865 ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
866 avctx->width, avctx->height,
867 plane_start[i], c->frame_pred == PRED_LEFT);
868 if (ret)
869 return ret;
870 if (c->frame_pred == PRED_MEDIAN) {
871 if (!c->interlaced) {
872 restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
873 avctx->width, avctx->height,
874 c->slices, 0);
875 } else {
876 restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
877 avctx->width, avctx->height,
878 c->slices, 0);
879 }
880 } else if (c->frame_pred == PRED_GRADIENT) {
881 if (!c->interlaced) {
882 restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
883 avctx->width, avctx->height,
884 c->slices, 0);
885 } else {
886 restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
887 avctx->width, avctx->height,
888 c->slices, 0);
889 }
890 }
891 }
892 break;
893 case AV_PIX_FMT_YUV420P10:
894 for (i = 0; i < 3; i++) {
895 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2,
896 avctx->width >> !!i, avctx->height >> !!i,
897 plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
898 if (ret)
899 return ret;
900 }
901 break;
902 case AV_PIX_FMT_YUV422P10:
903 for (i = 0; i < 3; i++) {
904 ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2,
905 avctx->width >> !!i, avctx->height,
906 plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
907 if (ret)
908 return ret;
909 }
910 break;
911 }
912
913 frame.f->key_frame = 1;
914 frame.f->pict_type = AV_PICTURE_TYPE_I;
915 frame.f->interlaced_frame = !!c->interlaced;
916
917 *got_frame = 1;
918
919 /* always report that the buffer was completely consumed */
920 return buf_size;
921 }
922
decode_init(AVCodecContext * avctx)923 static av_cold int decode_init(AVCodecContext *avctx)
924 {
925 UtvideoContext * const c = avctx->priv_data;
926 int h_shift, v_shift;
927
928 c->avctx = avctx;
929
930 ff_utvideodsp_init(&c->utdsp);
931 ff_bswapdsp_init(&c->bdsp);
932 ff_llviddsp_init(&c->llviddsp);
933
934 c->slice_bits_size = 0;
935
936 switch (avctx->codec_tag) {
937 case MKTAG('U', 'L', 'R', 'G'):
938 c->planes = 3;
939 avctx->pix_fmt = AV_PIX_FMT_GBRP;
940 break;
941 case MKTAG('U', 'L', 'R', 'A'):
942 c->planes = 4;
943 avctx->pix_fmt = AV_PIX_FMT_GBRAP;
944 break;
945 case MKTAG('U', 'L', 'Y', '0'):
946 c->planes = 3;
947 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
948 avctx->colorspace = AVCOL_SPC_BT470BG;
949 break;
950 case MKTAG('U', 'L', 'Y', '2'):
951 c->planes = 3;
952 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
953 avctx->colorspace = AVCOL_SPC_BT470BG;
954 break;
955 case MKTAG('U', 'L', 'Y', '4'):
956 c->planes = 3;
957 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
958 avctx->colorspace = AVCOL_SPC_BT470BG;
959 break;
960 case MKTAG('U', 'Q', 'Y', '0'):
961 c->planes = 3;
962 c->pro = 1;
963 avctx->pix_fmt = AV_PIX_FMT_YUV420P10;
964 break;
965 case MKTAG('U', 'Q', 'Y', '2'):
966 c->planes = 3;
967 c->pro = 1;
968 avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
969 break;
970 case MKTAG('U', 'Q', 'R', 'G'):
971 c->planes = 3;
972 c->pro = 1;
973 avctx->pix_fmt = AV_PIX_FMT_GBRP10;
974 break;
975 case MKTAG('U', 'Q', 'R', 'A'):
976 c->planes = 4;
977 c->pro = 1;
978 avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
979 break;
980 case MKTAG('U', 'L', 'H', '0'):
981 c->planes = 3;
982 avctx->pix_fmt = AV_PIX_FMT_YUV420P;
983 avctx->colorspace = AVCOL_SPC_BT709;
984 break;
985 case MKTAG('U', 'L', 'H', '2'):
986 c->planes = 3;
987 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
988 avctx->colorspace = AVCOL_SPC_BT709;
989 break;
990 case MKTAG('U', 'L', 'H', '4'):
991 c->planes = 3;
992 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
993 avctx->colorspace = AVCOL_SPC_BT709;
994 break;
995 case MKTAG('U', 'M', 'Y', '2'):
996 c->planes = 3;
997 c->pack = 1;
998 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
999 avctx->colorspace = AVCOL_SPC_BT470BG;
1000 break;
1001 case MKTAG('U', 'M', 'H', '2'):
1002 c->planes = 3;
1003 c->pack = 1;
1004 avctx->pix_fmt = AV_PIX_FMT_YUV422P;
1005 avctx->colorspace = AVCOL_SPC_BT709;
1006 break;
1007 case MKTAG('U', 'M', 'Y', '4'):
1008 c->planes = 3;
1009 c->pack = 1;
1010 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1011 avctx->colorspace = AVCOL_SPC_BT470BG;
1012 break;
1013 case MKTAG('U', 'M', 'H', '4'):
1014 c->planes = 3;
1015 c->pack = 1;
1016 avctx->pix_fmt = AV_PIX_FMT_YUV444P;
1017 avctx->colorspace = AVCOL_SPC_BT709;
1018 break;
1019 case MKTAG('U', 'M', 'R', 'G'):
1020 c->planes = 3;
1021 c->pack = 1;
1022 avctx->pix_fmt = AV_PIX_FMT_GBRP;
1023 break;
1024 case MKTAG('U', 'M', 'R', 'A'):
1025 c->planes = 4;
1026 c->pack = 1;
1027 avctx->pix_fmt = AV_PIX_FMT_GBRAP;
1028 break;
1029 default:
1030 av_log(avctx, AV_LOG_ERROR, "Unknown Ut Video FOURCC provided (%08X)\n",
1031 avctx->codec_tag);
1032 return AVERROR_INVALIDDATA;
1033 }
1034
1035 av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &h_shift, &v_shift);
1036 if ((avctx->width & ((1<<h_shift)-1)) ||
1037 (avctx->height & ((1<<v_shift)-1))) {
1038 avpriv_request_sample(avctx, "Odd dimensions");
1039 return AVERROR_PATCHWELCOME;
1040 }
1041
1042 if (c->pack && avctx->extradata_size >= 16) {
1043 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1044 avctx->extradata[3], avctx->extradata[2],
1045 avctx->extradata[1], avctx->extradata[0]);
1046 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1047 AV_RB32(avctx->extradata + 4));
1048 c->compression = avctx->extradata[8];
1049 if (c->compression != 2)
1050 avpriv_request_sample(avctx, "Unknown compression type");
1051 c->slices = avctx->extradata[9] + 1;
1052 } else if (!c->pro && avctx->extradata_size >= 16) {
1053 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1054 avctx->extradata[3], avctx->extradata[2],
1055 avctx->extradata[1], avctx->extradata[0]);
1056 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1057 AV_RB32(avctx->extradata + 4));
1058 c->frame_info_size = AV_RL32(avctx->extradata + 8);
1059 c->flags = AV_RL32(avctx->extradata + 12);
1060
1061 if (c->frame_info_size != 4)
1062 avpriv_request_sample(avctx, "Frame info not 4 bytes");
1063 av_log(avctx, AV_LOG_DEBUG, "Encoding parameters %08"PRIX32"\n", c->flags);
1064 c->slices = (c->flags >> 24) + 1;
1065 c->compression = c->flags & 1;
1066 c->interlaced = c->flags & 0x800;
1067 } else if (c->pro && avctx->extradata_size == 8) {
1068 av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d.%d.%d\n",
1069 avctx->extradata[3], avctx->extradata[2],
1070 avctx->extradata[1], avctx->extradata[0]);
1071 av_log(avctx, AV_LOG_DEBUG, "Original format %"PRIX32"\n",
1072 AV_RB32(avctx->extradata + 4));
1073 c->interlaced = 0;
1074 c->frame_info_size = 4;
1075 } else {
1076 av_log(avctx, AV_LOG_ERROR,
1077 "Insufficient extradata size %d, should be at least 16\n",
1078 avctx->extradata_size);
1079 return AVERROR_INVALIDDATA;
1080 }
1081
1082 return 0;
1083 }
1084
decode_end(AVCodecContext * avctx)1085 static av_cold int decode_end(AVCodecContext *avctx)
1086 {
1087 UtvideoContext * const c = avctx->priv_data;
1088
1089 av_freep(&c->slice_bits);
1090
1091 return 0;
1092 }
1093
1094 AVCodec ff_utvideo_decoder = {
1095 .name = "utvideo",
1096 .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
1097 .type = AVMEDIA_TYPE_VIDEO,
1098 .id = AV_CODEC_ID_UTVIDEO,
1099 .priv_data_size = sizeof(UtvideoContext),
1100 .init = decode_init,
1101 .close = decode_end,
1102 .decode = decode_frame,
1103 .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
1104 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
1105 };
1106