1 /*
2 * generic decoding-related code
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 #include <stdint.h>
22 #include <string.h>
23
24 #include "config.h"
25
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 #include "libavutil/opt.h"
40
41 #include "avcodec.h"
42 #include "bytestream.h"
43 #include "decode.h"
44 #include "hwconfig.h"
45 #include "internal.h"
46 #include "thread.h"
47
48 typedef struct FramePool {
49 /**
50 * Pools for each data plane. For audio all the planes have the same size,
51 * so only pools[0] is used.
52 */
53 AVBufferPool *pools[4];
54
55 /*
56 * Pool parameters
57 */
58 int format;
59 int width, height;
60 int stride_align[AV_NUM_DATA_POINTERS];
61 int linesize[4];
62 int planes;
63 int channels;
64 int samples;
65 } FramePool;
66
apply_param_change(AVCodecContext * avctx,const AVPacket * avpkt)67 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
68 {
69 int size = 0, ret;
70 const uint8_t *data;
71 uint32_t flags;
72 int64_t val;
73
74 data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size);
75 if (!data)
76 return 0;
77
78 if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
79 av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
80 "changes, but PARAM_CHANGE side data was sent to it.\n");
81 ret = AVERROR(EINVAL);
82 goto fail2;
83 }
84
85 if (size < 4)
86 goto fail;
87
88 flags = bytestream_get_le32(&data);
89 size -= 4;
90
91 if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
92 if (size < 4)
93 goto fail;
94 val = bytestream_get_le32(&data);
95 if (val <= 0 || val > INT_MAX) {
96 av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
97 ret = AVERROR_INVALIDDATA;
98 goto fail2;
99 }
100 avctx->channels = val;
101 size -= 4;
102 }
103 if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
104 if (size < 8)
105 goto fail;
106 avctx->channel_layout = bytestream_get_le64(&data);
107 size -= 8;
108 }
109 if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
110 if (size < 4)
111 goto fail;
112 val = bytestream_get_le32(&data);
113 if (val <= 0 || val > INT_MAX) {
114 av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
115 ret = AVERROR_INVALIDDATA;
116 goto fail2;
117 }
118 avctx->sample_rate = val;
119 size -= 4;
120 }
121 if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
122 if (size < 8)
123 goto fail;
124 avctx->width = bytestream_get_le32(&data);
125 avctx->height = bytestream_get_le32(&data);
126 size -= 8;
127 ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
128 if (ret < 0)
129 goto fail2;
130 }
131
132 return 0;
133 fail:
134 av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
135 ret = AVERROR_INVALIDDATA;
136 fail2:
137 if (ret < 0) {
138 av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
139 if (avctx->err_recognition & AV_EF_EXPLODE)
140 return ret;
141 }
142 return 0;
143 }
144
extract_packet_props(AVCodecInternal * avci,const AVPacket * pkt)145 static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
146 {
147 int ret = 0;
148
149 av_packet_unref(avci->last_pkt_props);
150 if (pkt) {
151 ret = av_packet_copy_props(avci->last_pkt_props, pkt);
152 if (!ret)
153 avci->last_pkt_props->size = pkt->size; // HACK: Needed for ff_decode_frame_props().
154 }
155 return ret;
156 }
157
unrefcount_frame(AVCodecInternal * avci,AVFrame * frame)158 static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
159 {
160 int ret;
161
162 /* move the original frame to our backup */
163 av_frame_unref(avci->to_free);
164 av_frame_move_ref(avci->to_free, frame);
165
166 /* now copy everything except the AVBufferRefs back
167 * note that we make a COPY of the side data, so calling av_frame_free() on
168 * the caller's frame will work properly */
169 ret = av_frame_copy_props(frame, avci->to_free);
170 if (ret < 0)
171 return ret;
172
173 memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
174 memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
175 if (avci->to_free->extended_data != avci->to_free->data) {
176 int planes = avci->to_free->channels;
177 int size = planes * sizeof(*frame->extended_data);
178
179 if (!size) {
180 av_frame_unref(frame);
181 return AVERROR_BUG;
182 }
183
184 frame->extended_data = av_malloc(size);
185 if (!frame->extended_data) {
186 av_frame_unref(frame);
187 return AVERROR(ENOMEM);
188 }
189 memcpy(frame->extended_data, avci->to_free->extended_data,
190 size);
191 } else
192 frame->extended_data = frame->data;
193
194 frame->format = avci->to_free->format;
195 frame->width = avci->to_free->width;
196 frame->height = avci->to_free->height;
197 frame->channel_layout = avci->to_free->channel_layout;
198 frame->nb_samples = avci->to_free->nb_samples;
199 frame->channels = avci->to_free->channels;
200
201 return 0;
202 }
203
ff_decode_bsfs_init(AVCodecContext * avctx)204 int ff_decode_bsfs_init(AVCodecContext *avctx)
205 {
206 AVCodecInternal *avci = avctx->internal;
207 int ret;
208
209 if (avci->bsf)
210 return 0;
211
212 ret = av_bsf_list_parse_str(avctx->codec->bsfs, &avci->bsf);
213 if (ret < 0) {
214 av_log(avctx, AV_LOG_ERROR, "Error parsing decoder bitstream filters '%s': %s\n", avctx->codec->bsfs, av_err2str(ret));
215 if (ret != AVERROR(ENOMEM))
216 ret = AVERROR_BUG;
217 goto fail;
218 }
219
220 /* We do not currently have an API for passing the input timebase into decoders,
221 * but no filters used here should actually need it.
222 * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
223 avci->bsf->time_base_in = (AVRational){ 1, 90000 };
224 ret = avcodec_parameters_from_context(avci->bsf->par_in, avctx);
225 if (ret < 0)
226 goto fail;
227
228 ret = av_bsf_init(avci->bsf);
229 if (ret < 0)
230 goto fail;
231
232 return 0;
233 fail:
234 av_bsf_free(&avci->bsf);
235 return ret;
236 }
237
ff_decode_get_packet(AVCodecContext * avctx,AVPacket * pkt)238 int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
239 {
240 AVCodecInternal *avci = avctx->internal;
241 int ret;
242
243 if (avci->draining)
244 return AVERROR_EOF;
245
246 ret = av_bsf_receive_packet(avci->bsf, pkt);
247 if (ret == AVERROR_EOF)
248 avci->draining = 1;
249 if (ret < 0)
250 return ret;
251
252 ret = extract_packet_props(avctx->internal, pkt);
253 if (ret < 0)
254 goto finish;
255
256 ret = apply_param_change(avctx, pkt);
257 if (ret < 0)
258 goto finish;
259
260 if (avctx->codec->receive_frame)
261 avci->compat_decode_consumed += pkt->size;
262
263 return 0;
264 finish:
265 av_packet_unref(pkt);
266 return ret;
267 }
268
269 /**
270 * Attempt to guess proper monotonic timestamps for decoded video frames
271 * which might have incorrect times. Input timestamps may wrap around, in
272 * which case the output will as well.
273 *
274 * @param pts the pts field of the decoded AVPacket, as passed through
275 * AVFrame.pts
276 * @param dts the dts field of the decoded AVPacket
277 * @return one of the input values, may be AV_NOPTS_VALUE
278 */
guess_correct_pts(AVCodecContext * ctx,int64_t reordered_pts,int64_t dts)279 static int64_t guess_correct_pts(AVCodecContext *ctx,
280 int64_t reordered_pts, int64_t dts)
281 {
282 int64_t pts = AV_NOPTS_VALUE;
283
284 if (dts != AV_NOPTS_VALUE) {
285 ctx->pts_correction_num_faulty_dts += dts <= ctx->pts_correction_last_dts;
286 ctx->pts_correction_last_dts = dts;
287 } else if (reordered_pts != AV_NOPTS_VALUE)
288 ctx->pts_correction_last_dts = reordered_pts;
289
290 if (reordered_pts != AV_NOPTS_VALUE) {
291 ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
292 ctx->pts_correction_last_pts = reordered_pts;
293 } else if(dts != AV_NOPTS_VALUE)
294 ctx->pts_correction_last_pts = dts;
295
296 if ((ctx->pts_correction_num_faulty_pts<=ctx->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE)
297 && reordered_pts != AV_NOPTS_VALUE)
298 pts = reordered_pts;
299 else
300 pts = dts;
301
302 return pts;
303 }
304
305 /*
306 * The core of the receive_frame_wrapper for the decoders implementing
307 * the simple API. Certain decoders might consume partial packets without
308 * returning any output, so this function needs to be called in a loop until it
309 * returns EAGAIN.
310 **/
decode_simple_internal(AVCodecContext * avctx,AVFrame * frame)311 static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
312 {
313 AVCodecInternal *avci = avctx->internal;
314 DecodeSimpleContext *ds = &avci->ds;
315 AVPacket *pkt = ds->in_pkt;
316 // copy to ensure we do not change pkt
317 int got_frame, actual_got_frame;
318 int ret;
319
320 if (!pkt->data && !avci->draining) {
321 av_packet_unref(pkt);
322 ret = ff_decode_get_packet(avctx, pkt);
323 if (ret < 0 && ret != AVERROR_EOF)
324 return ret;
325 }
326
327 // Some codecs (at least wma lossless) will crash when feeding drain packets
328 // after EOF was signaled.
329 if (avci->draining_done)
330 return AVERROR_EOF;
331
332 if (!pkt->data &&
333 !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
334 avctx->active_thread_type & FF_THREAD_FRAME))
335 return AVERROR_EOF;
336
337 got_frame = 0;
338
339 if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
340 ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
341 } else {
342 ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
343
344 if (!(avctx->codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
345 frame->pkt_dts = pkt->dts;
346 if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
347 if(!avctx->has_b_frames)
348 frame->pkt_pos = pkt->pos;
349 //FIXME these should be under if(!avctx->has_b_frames)
350 /* get_buffer is supposed to set frame parameters */
351 if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
352 if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
353 if (!frame->width) frame->width = avctx->width;
354 if (!frame->height) frame->height = avctx->height;
355 if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
356 }
357 }
358 }
359 emms_c();
360 actual_got_frame = got_frame;
361
362 if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
363 if (frame->flags & AV_FRAME_FLAG_DISCARD)
364 got_frame = 0;
365 if (got_frame)
366 frame->best_effort_timestamp = guess_correct_pts(avctx,
367 frame->pts,
368 frame->pkt_dts);
369 } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
370 uint8_t *side;
371 int side_size;
372 uint32_t discard_padding = 0;
373 uint8_t skip_reason = 0;
374 uint8_t discard_reason = 0;
375
376 if (ret >= 0 && got_frame) {
377 frame->best_effort_timestamp = guess_correct_pts(avctx,
378 frame->pts,
379 frame->pkt_dts);
380 if (frame->format == AV_SAMPLE_FMT_NONE)
381 frame->format = avctx->sample_fmt;
382 if (!frame->channel_layout)
383 frame->channel_layout = avctx->channel_layout;
384 if (!frame->channels)
385 frame->channels = avctx->channels;
386 if (!frame->sample_rate)
387 frame->sample_rate = avctx->sample_rate;
388 }
389
390 side= av_packet_get_side_data(avci->last_pkt_props, AV_PKT_DATA_SKIP_SAMPLES, &side_size);
391 if(side && side_size>=10) {
392 avci->skip_samples = AV_RL32(side) * avci->skip_samples_multiplier;
393 discard_padding = AV_RL32(side + 4);
394 av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
395 avci->skip_samples, (int)discard_padding);
396 skip_reason = AV_RL8(side + 8);
397 discard_reason = AV_RL8(side + 9);
398 }
399
400 if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
401 !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
402 avci->skip_samples = FFMAX(0, avci->skip_samples - frame->nb_samples);
403 got_frame = 0;
404 }
405
406 if (avci->skip_samples > 0 && got_frame &&
407 !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
408 if(frame->nb_samples <= avci->skip_samples){
409 got_frame = 0;
410 avci->skip_samples -= frame->nb_samples;
411 av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
412 avci->skip_samples);
413 } else {
414 av_samples_copy(frame->extended_data, frame->extended_data, 0, avci->skip_samples,
415 frame->nb_samples - avci->skip_samples, avctx->channels, frame->format);
416 if(avctx->pkt_timebase.num && avctx->sample_rate) {
417 int64_t diff_ts = av_rescale_q(avci->skip_samples,
418 (AVRational){1, avctx->sample_rate},
419 avctx->pkt_timebase);
420 if(frame->pts!=AV_NOPTS_VALUE)
421 frame->pts += diff_ts;
422 #if FF_API_PKT_PTS
423 FF_DISABLE_DEPRECATION_WARNINGS
424 if(frame->pkt_pts!=AV_NOPTS_VALUE)
425 frame->pkt_pts += diff_ts;
426 FF_ENABLE_DEPRECATION_WARNINGS
427 #endif
428 if(frame->pkt_dts!=AV_NOPTS_VALUE)
429 frame->pkt_dts += diff_ts;
430 if (frame->pkt_duration >= diff_ts)
431 frame->pkt_duration -= diff_ts;
432 } else {
433 av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
434 }
435 av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
436 avci->skip_samples, frame->nb_samples);
437 frame->nb_samples -= avci->skip_samples;
438 avci->skip_samples = 0;
439 }
440 }
441
442 if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
443 !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
444 if (discard_padding == frame->nb_samples) {
445 got_frame = 0;
446 } else {
447 if(avctx->pkt_timebase.num && avctx->sample_rate) {
448 int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
449 (AVRational){1, avctx->sample_rate},
450 avctx->pkt_timebase);
451 frame->pkt_duration = diff_ts;
452 } else {
453 av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
454 }
455 av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
456 (int)discard_padding, frame->nb_samples);
457 frame->nb_samples -= discard_padding;
458 }
459 }
460
461 if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
462 AVFrameSideData *fside = av_frame_new_side_data(frame, AV_FRAME_DATA_SKIP_SAMPLES, 10);
463 if (fside) {
464 AV_WL32(fside->data, avci->skip_samples);
465 AV_WL32(fside->data + 4, discard_padding);
466 AV_WL8(fside->data + 8, skip_reason);
467 AV_WL8(fside->data + 9, discard_reason);
468 avci->skip_samples = 0;
469 }
470 }
471 }
472
473 if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
474 !avci->showed_multi_packet_warning &&
475 ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
476 av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
477 avci->showed_multi_packet_warning = 1;
478 }
479
480 if (!got_frame)
481 av_frame_unref(frame);
482
483 if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
484 ret = pkt->size;
485
486 #if FF_API_AVCTX_TIMEBASE
487 if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
488 avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
489 #endif
490
491 /* do not stop draining when actual_got_frame != 0 or ret < 0 */
492 /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
493 if (avci->draining && !actual_got_frame) {
494 if (ret < 0) {
495 /* prevent infinite loop if a decoder wrongly always return error on draining */
496 /* reasonable nb_errors_max = maximum b frames + thread count */
497 int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
498 avctx->thread_count : 1);
499
500 if (avci->nb_draining_errors++ >= nb_errors_max) {
501 av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
502 "Stop draining and force EOF.\n");
503 avci->draining_done = 1;
504 ret = AVERROR_BUG;
505 }
506 } else {
507 avci->draining_done = 1;
508 }
509 }
510
511 avci->compat_decode_consumed += ret;
512
513 if (ret >= pkt->size || ret < 0) {
514 av_packet_unref(pkt);
515 } else {
516 int consumed = ret;
517
518 pkt->data += consumed;
519 pkt->size -= consumed;
520 avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
521 pkt->pts = AV_NOPTS_VALUE;
522 pkt->dts = AV_NOPTS_VALUE;
523 avci->last_pkt_props->pts = AV_NOPTS_VALUE;
524 avci->last_pkt_props->dts = AV_NOPTS_VALUE;
525 }
526
527 if (got_frame)
528 av_assert0(frame->buf[0]);
529
530 return ret < 0 ? ret : 0;
531 }
532
decode_simple_receive_frame(AVCodecContext * avctx,AVFrame * frame)533 static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
534 {
535 int ret;
536
537 while (!frame->buf[0]) {
538 ret = decode_simple_internal(avctx, frame);
539 if (ret < 0)
540 return ret;
541 }
542
543 return 0;
544 }
545
decode_receive_frame_internal(AVCodecContext * avctx,AVFrame * frame)546 static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
547 {
548 AVCodecInternal *avci = avctx->internal;
549 int ret;
550
551 av_assert0(!frame->buf[0]);
552
553 if (avctx->codec->receive_frame)
554 ret = avctx->codec->receive_frame(avctx, frame);
555 else
556 ret = decode_simple_receive_frame(avctx, frame);
557
558 if (ret == AVERROR_EOF)
559 avci->draining_done = 1;
560
561 if (!ret) {
562 /* the only case where decode data is not set should be decoders
563 * that do not call ff_get_buffer() */
564 av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
565 !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
566
567 if (frame->private_ref) {
568 FrameDecodeData *fdd = (FrameDecodeData*)frame->private_ref->data;
569
570 if (fdd->post_process) {
571 ret = fdd->post_process(avctx, frame);
572 if (ret < 0) {
573 av_frame_unref(frame);
574 return ret;
575 }
576 }
577 }
578 }
579
580 /* free the per-frame decode data */
581 av_buffer_unref(&frame->private_ref);
582
583 return ret;
584 }
585
avcodec_send_packet(AVCodecContext * avctx,const AVPacket * avpkt)586 int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
587 {
588 AVCodecInternal *avci = avctx->internal;
589 int ret;
590
591 if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
592 return AVERROR(EINVAL);
593
594 if (avctx->internal->draining)
595 return AVERROR_EOF;
596
597 if (avpkt && !avpkt->size && avpkt->data)
598 return AVERROR(EINVAL);
599
600 av_packet_unref(avci->buffer_pkt);
601 if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
602 ret = av_packet_ref(avci->buffer_pkt, avpkt);
603 if (ret < 0)
604 return ret;
605 }
606
607 ret = av_bsf_send_packet(avci->bsf, avci->buffer_pkt);
608 if (ret < 0) {
609 av_packet_unref(avci->buffer_pkt);
610 return ret;
611 }
612
613 if (!avci->buffer_frame->buf[0]) {
614 ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
615 if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
616 return ret;
617 }
618
619 return 0;
620 }
621
apply_cropping(AVCodecContext * avctx,AVFrame * frame)622 static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
623 {
624 /* make sure we are noisy about decoders returning invalid cropping data */
625 if (frame->crop_left >= INT_MAX - frame->crop_right ||
626 frame->crop_top >= INT_MAX - frame->crop_bottom ||
627 (frame->crop_left + frame->crop_right) >= frame->width ||
628 (frame->crop_top + frame->crop_bottom) >= frame->height) {
629 av_log(avctx, AV_LOG_WARNING,
630 "Invalid cropping information set by a decoder: "
631 "%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER"/%"SIZE_SPECIFIER" "
632 "(frame size %dx%d). This is a bug, please report it\n",
633 frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
634 frame->width, frame->height);
635 frame->crop_left = 0;
636 frame->crop_right = 0;
637 frame->crop_top = 0;
638 frame->crop_bottom = 0;
639 return 0;
640 }
641
642 if (!avctx->apply_cropping)
643 return 0;
644
645 return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
646 AV_FRAME_CROP_UNALIGNED : 0);
647 }
648
avcodec_receive_frame(AVCodecContext * avctx,AVFrame * frame)649 int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
650 {
651 AVCodecInternal *avci = avctx->internal;
652 int ret, changed;
653
654 av_frame_unref(frame);
655
656 if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
657 return AVERROR(EINVAL);
658
659 if (avci->buffer_frame->buf[0]) {
660 av_frame_move_ref(frame, avci->buffer_frame);
661 } else {
662 ret = decode_receive_frame_internal(avctx, frame);
663 if (ret < 0)
664 return ret;
665 }
666
667 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
668 ret = apply_cropping(avctx, frame);
669 if (ret < 0) {
670 av_frame_unref(frame);
671 return ret;
672 }
673 }
674
675 avctx->frame_number++;
676
677 if (avctx->flags & AV_CODEC_FLAG_DROPCHANGED) {
678
679 if (avctx->frame_number == 1) {
680 avci->initial_format = frame->format;
681 switch(avctx->codec_type) {
682 case AVMEDIA_TYPE_VIDEO:
683 avci->initial_width = frame->width;
684 avci->initial_height = frame->height;
685 break;
686 case AVMEDIA_TYPE_AUDIO:
687 avci->initial_sample_rate = frame->sample_rate ? frame->sample_rate :
688 avctx->sample_rate;
689 avci->initial_channels = frame->channels;
690 avci->initial_channel_layout = frame->channel_layout;
691 break;
692 }
693 }
694
695 if (avctx->frame_number > 1) {
696 changed = avci->initial_format != frame->format;
697
698 switch(avctx->codec_type) {
699 case AVMEDIA_TYPE_VIDEO:
700 changed |= avci->initial_width != frame->width ||
701 avci->initial_height != frame->height;
702 break;
703 case AVMEDIA_TYPE_AUDIO:
704 changed |= avci->initial_sample_rate != frame->sample_rate ||
705 avci->initial_sample_rate != avctx->sample_rate ||
706 avci->initial_channels != frame->channels ||
707 avci->initial_channel_layout != frame->channel_layout;
708 break;
709 }
710
711 if (changed) {
712 avci->changed_frames_dropped++;
713 av_log(avctx, AV_LOG_INFO, "dropped changed frame #%d pts %"PRId64
714 " drop count: %d \n",
715 avctx->frame_number, frame->pts,
716 avci->changed_frames_dropped);
717 av_frame_unref(frame);
718 return AVERROR_INPUT_CHANGED;
719 }
720 }
721 }
722 return 0;
723 }
724
compat_decode(AVCodecContext * avctx,AVFrame * frame,int * got_frame,const AVPacket * pkt)725 static int compat_decode(AVCodecContext *avctx, AVFrame *frame,
726 int *got_frame, const AVPacket *pkt)
727 {
728 AVCodecInternal *avci = avctx->internal;
729 int ret = 0;
730
731 av_assert0(avci->compat_decode_consumed == 0);
732
733 if (avci->draining_done && pkt && pkt->size != 0) {
734 av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
735 avcodec_flush_buffers(avctx);
736 }
737
738 *got_frame = 0;
739
740 if (avci->compat_decode_partial_size > 0 &&
741 avci->compat_decode_partial_size != pkt->size) {
742 av_log(avctx, AV_LOG_ERROR,
743 "Got unexpected packet size after a partial decode\n");
744 ret = AVERROR(EINVAL);
745 goto finish;
746 }
747
748 if (!avci->compat_decode_partial_size) {
749 ret = avcodec_send_packet(avctx, pkt);
750 if (ret == AVERROR_EOF)
751 ret = 0;
752 else if (ret == AVERROR(EAGAIN)) {
753 /* we fully drain all the output in each decode call, so this should not
754 * ever happen */
755 ret = AVERROR_BUG;
756 goto finish;
757 } else if (ret < 0)
758 goto finish;
759 }
760
761 while (ret >= 0) {
762 ret = avcodec_receive_frame(avctx, frame);
763 if (ret < 0) {
764 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
765 ret = 0;
766 goto finish;
767 }
768
769 if (frame != avci->compat_decode_frame) {
770 if (!avctx->refcounted_frames) {
771 ret = unrefcount_frame(avci, frame);
772 if (ret < 0)
773 goto finish;
774 }
775
776 *got_frame = 1;
777 frame = avci->compat_decode_frame;
778 } else {
779 if (!avci->compat_decode_warned) {
780 av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
781 "API cannot return all the frames for this decoder. "
782 "Some frames will be dropped. Update your code to the "
783 "new decoding API to fix this.\n");
784 avci->compat_decode_warned = 1;
785 }
786 }
787
788 if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
789 break;
790 }
791
792 finish:
793 if (ret == 0) {
794 /* if there are any bsfs then assume full packet is always consumed */
795 if (avctx->codec->bsfs)
796 ret = pkt->size;
797 else
798 ret = FFMIN(avci->compat_decode_consumed, pkt->size);
799 }
800 avci->compat_decode_consumed = 0;
801 avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
802
803 return ret;
804 }
805
avcodec_decode_video2(AVCodecContext * avctx,AVFrame * picture,int * got_picture_ptr,const AVPacket * avpkt)806 int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
807 int *got_picture_ptr,
808 const AVPacket *avpkt)
809 {
810 return compat_decode(avctx, picture, got_picture_ptr, avpkt);
811 }
812
avcodec_decode_audio4(AVCodecContext * avctx,AVFrame * frame,int * got_frame_ptr,const AVPacket * avpkt)813 int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx,
814 AVFrame *frame,
815 int *got_frame_ptr,
816 const AVPacket *avpkt)
817 {
818 return compat_decode(avctx, frame, got_frame_ptr, avpkt);
819 }
820
get_subtitle_defaults(AVSubtitle * sub)821 static void get_subtitle_defaults(AVSubtitle *sub)
822 {
823 memset(sub, 0, sizeof(*sub));
824 sub->pts = AV_NOPTS_VALUE;
825 }
826
827 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
recode_subtitle(AVCodecContext * avctx,AVPacket * outpkt,const AVPacket * inpkt)828 static int recode_subtitle(AVCodecContext *avctx,
829 AVPacket *outpkt, const AVPacket *inpkt)
830 {
831 #if CONFIG_ICONV
832 iconv_t cd = (iconv_t)-1;
833 int ret = 0;
834 char *inb, *outb;
835 size_t inl, outl;
836 AVPacket tmp;
837 #endif
838
839 if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
840 return 0;
841
842 #if CONFIG_ICONV
843 cd = iconv_open("UTF-8", avctx->sub_charenc);
844 av_assert0(cd != (iconv_t)-1);
845
846 inb = inpkt->data;
847 inl = inpkt->size;
848
849 if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
850 av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
851 ret = AVERROR(ENOMEM);
852 goto end;
853 }
854
855 ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
856 if (ret < 0)
857 goto end;
858 outpkt->buf = tmp.buf;
859 outpkt->data = tmp.data;
860 outpkt->size = tmp.size;
861 outb = outpkt->data;
862 outl = outpkt->size;
863
864 if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
865 iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
866 outl >= outpkt->size || inl != 0) {
867 ret = FFMIN(AVERROR(errno), -1);
868 av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
869 "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
870 av_packet_unref(&tmp);
871 goto end;
872 }
873 outpkt->size -= outl;
874 memset(outpkt->data + outpkt->size, 0, outl);
875
876 end:
877 if (cd != (iconv_t)-1)
878 iconv_close(cd);
879 return ret;
880 #else
881 av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
882 return AVERROR(EINVAL);
883 #endif
884 }
885
utf8_check(const uint8_t * str)886 static int utf8_check(const uint8_t *str)
887 {
888 const uint8_t *byte;
889 uint32_t codepoint, min;
890
891 while (*str) {
892 byte = str;
893 GET_UTF8(codepoint, *(byte++), return 0;);
894 min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
895 1 << (5 * (byte - str) - 4);
896 if (codepoint < min || codepoint >= 0x110000 ||
897 codepoint == 0xFFFE /* BOM */ ||
898 codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
899 return 0;
900 str = byte;
901 }
902 return 1;
903 }
904
905 #if FF_API_ASS_TIMING
insert_ts(AVBPrint * buf,int ts)906 static void insert_ts(AVBPrint *buf, int ts)
907 {
908 if (ts == -1) {
909 av_bprintf(buf, "9:59:59.99,");
910 } else {
911 int h, m, s;
912
913 h = ts/360000; ts -= 360000*h;
914 m = ts/ 6000; ts -= 6000*m;
915 s = ts/ 100; ts -= 100*s;
916 av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
917 }
918 }
919
convert_sub_to_old_ass_form(AVSubtitle * sub,const AVPacket * pkt,AVRational tb)920 static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
921 {
922 int i;
923 AVBPrint buf;
924
925 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
926
927 for (i = 0; i < sub->num_rects; i++) {
928 char *final_dialog;
929 const char *dialog;
930 AVSubtitleRect *rect = sub->rects[i];
931 int ts_start, ts_duration = -1;
932 long int layer;
933
934 if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
935 continue;
936
937 av_bprint_clear(&buf);
938
939 /* skip ReadOrder */
940 dialog = strchr(rect->ass, ',');
941 if (!dialog)
942 continue;
943 dialog++;
944
945 /* extract Layer or Marked */
946 layer = strtol(dialog, (char**)&dialog, 10);
947 if (*dialog != ',')
948 continue;
949 dialog++;
950
951 /* rescale timing to ASS time base (ms) */
952 ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
953 if (pkt->duration != -1)
954 ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
955 sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
956
957 /* construct ASS (standalone file form with timestamps) string */
958 av_bprintf(&buf, "Dialogue: %ld,", layer);
959 insert_ts(&buf, ts_start);
960 insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
961 av_bprintf(&buf, "%s\r\n", dialog);
962
963 final_dialog = av_strdup(buf.str);
964 if (!av_bprint_is_complete(&buf) || !final_dialog) {
965 av_freep(&final_dialog);
966 av_bprint_finalize(&buf, NULL);
967 return AVERROR(ENOMEM);
968 }
969 av_freep(&rect->ass);
970 rect->ass = final_dialog;
971 }
972
973 av_bprint_finalize(&buf, NULL);
974 return 0;
975 }
976 #endif
977
avcodec_decode_subtitle2(AVCodecContext * avctx,AVSubtitle * sub,int * got_sub_ptr,AVPacket * avpkt)978 int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
979 int *got_sub_ptr,
980 AVPacket *avpkt)
981 {
982 int i, ret = 0;
983
984 if (!avpkt->data && avpkt->size) {
985 av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
986 return AVERROR(EINVAL);
987 }
988 if (!avctx->codec)
989 return AVERROR(EINVAL);
990 if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
991 av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
992 return AVERROR(EINVAL);
993 }
994
995 *got_sub_ptr = 0;
996 get_subtitle_defaults(sub);
997
998 if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
999 AVPacket pkt_recoded = *avpkt;
1000
1001 ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1002 if (ret < 0) {
1003 *got_sub_ptr = 0;
1004 } else {
1005 ret = extract_packet_props(avctx->internal, &pkt_recoded);
1006 if (ret < 0)
1007 return ret;
1008
1009 if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1010 sub->pts = av_rescale_q(avpkt->pts,
1011 avctx->pkt_timebase, AV_TIME_BASE_Q);
1012 ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1013 av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1014 !!*got_sub_ptr >= !!sub->num_rects);
1015
1016 #if FF_API_ASS_TIMING
1017 if (avctx->sub_text_format == FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
1018 && *got_sub_ptr && sub->num_rects) {
1019 const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1020 : avctx->time_base;
1021 int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1022 if (err < 0)
1023 ret = err;
1024 }
1025 #endif
1026
1027 if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1028 avctx->pkt_timebase.num) {
1029 AVRational ms = { 1, 1000 };
1030 sub->end_display_time = av_rescale_q(avpkt->duration,
1031 avctx->pkt_timebase, ms);
1032 }
1033
1034 if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB)
1035 sub->format = 0;
1036 else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1037 sub->format = 1;
1038
1039 for (i = 0; i < sub->num_rects; i++) {
1040 if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_IGNORE &&
1041 sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1042 av_log(avctx, AV_LOG_ERROR,
1043 "Invalid UTF-8 in decoded subtitles text; "
1044 "maybe missing -sub_charenc option\n");
1045 avsubtitle_free(sub);
1046 ret = AVERROR_INVALIDDATA;
1047 break;
1048 }
1049 }
1050
1051 if (avpkt->data != pkt_recoded.data) { // did we recode?
1052 /* prevent from destroying side data from original packet */
1053 pkt_recoded.side_data = NULL;
1054 pkt_recoded.side_data_elems = 0;
1055
1056 av_packet_unref(&pkt_recoded);
1057 }
1058 }
1059
1060 if (*got_sub_ptr)
1061 avctx->frame_number++;
1062 }
1063
1064 return ret;
1065 }
1066
avcodec_default_get_format(struct AVCodecContext * avctx,const enum AVPixelFormat * fmt)1067 enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx,
1068 const enum AVPixelFormat *fmt)
1069 {
1070 const AVPixFmtDescriptor *desc;
1071 const AVCodecHWConfig *config;
1072 int i, n;
1073
1074 // If a device was supplied when the codec was opened, assume that the
1075 // user wants to use it.
1076 if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1077 AVHWDeviceContext *device_ctx =
1078 (AVHWDeviceContext*)avctx->hw_device_ctx->data;
1079 for (i = 0;; i++) {
1080 config = &avctx->codec->hw_configs[i]->public;
1081 if (!config)
1082 break;
1083 if (!(config->methods &
1084 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
1085 continue;
1086 if (device_ctx->type != config->device_type)
1087 continue;
1088 for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1089 if (config->pix_fmt == fmt[n])
1090 return fmt[n];
1091 }
1092 }
1093 }
1094 // No device or other setup, so we have to choose from things which
1095 // don't any other external information.
1096
1097 // If the last element of the list is a software format, choose it
1098 // (this should be best software format if any exist).
1099 for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1100 desc = av_pix_fmt_desc_get(fmt[n - 1]);
1101 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1102 return fmt[n - 1];
1103
1104 // Finally, traverse the list in order and choose the first entry
1105 // with no external dependencies (if there is no hardware configuration
1106 // information available then this just picks the first entry).
1107 for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1108 for (i = 0;; i++) {
1109 config = avcodec_get_hw_config(avctx->codec, i);
1110 if (!config)
1111 break;
1112 if (config->pix_fmt == fmt[n])
1113 break;
1114 }
1115 if (!config) {
1116 // No specific config available, so the decoder must be able
1117 // to handle this format without any additional setup.
1118 return fmt[n];
1119 }
1120 if (config->methods & AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
1121 // Usable with only internal setup.
1122 return fmt[n];
1123 }
1124 }
1125
1126 // Nothing is usable, give up.
1127 return AV_PIX_FMT_NONE;
1128 }
1129
ff_decode_get_hw_frames_ctx(AVCodecContext * avctx,enum AVHWDeviceType dev_type)1130 int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx,
1131 enum AVHWDeviceType dev_type)
1132 {
1133 AVHWDeviceContext *device_ctx;
1134 AVHWFramesContext *frames_ctx;
1135 int ret;
1136
1137 if (!avctx->hwaccel)
1138 return AVERROR(ENOSYS);
1139
1140 if (avctx->hw_frames_ctx)
1141 return 0;
1142 if (!avctx->hw_device_ctx) {
1143 av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1144 "required for hardware accelerated decoding.\n");
1145 return AVERROR(EINVAL);
1146 }
1147
1148 device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1149 if (device_ctx->type != dev_type) {
1150 av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1151 "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1152 av_hwdevice_get_type_name(device_ctx->type));
1153 return AVERROR(EINVAL);
1154 }
1155
1156 ret = avcodec_get_hw_frames_parameters(avctx,
1157 avctx->hw_device_ctx,
1158 avctx->hwaccel->pix_fmt,
1159 &avctx->hw_frames_ctx);
1160 if (ret < 0)
1161 return ret;
1162
1163 frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1164
1165
1166 if (frames_ctx->initial_pool_size) {
1167 // We guarantee 4 base work surfaces. The function above guarantees 1
1168 // (the absolute minimum), so add the missing count.
1169 frames_ctx->initial_pool_size += 3;
1170 }
1171
1172 ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1173 if (ret < 0) {
1174 av_buffer_unref(&avctx->hw_frames_ctx);
1175 return ret;
1176 }
1177
1178 return 0;
1179 }
1180
avcodec_get_hw_frames_parameters(AVCodecContext * avctx,AVBufferRef * device_ref,enum AVPixelFormat hw_pix_fmt,AVBufferRef ** out_frames_ref)1181 int avcodec_get_hw_frames_parameters(AVCodecContext *avctx,
1182 AVBufferRef *device_ref,
1183 enum AVPixelFormat hw_pix_fmt,
1184 AVBufferRef **out_frames_ref)
1185 {
1186 AVBufferRef *frames_ref = NULL;
1187 const AVCodecHWConfigInternal *hw_config;
1188 const AVHWAccel *hwa;
1189 int i, ret;
1190
1191 for (i = 0;; i++) {
1192 hw_config = avctx->codec->hw_configs[i];
1193 if (!hw_config)
1194 return AVERROR(ENOENT);
1195 if (hw_config->public.pix_fmt == hw_pix_fmt)
1196 break;
1197 }
1198
1199 hwa = hw_config->hwaccel;
1200 if (!hwa || !hwa->frame_params)
1201 return AVERROR(ENOENT);
1202
1203 frames_ref = av_hwframe_ctx_alloc(device_ref);
1204 if (!frames_ref)
1205 return AVERROR(ENOMEM);
1206
1207 ret = hwa->frame_params(avctx, frames_ref);
1208 if (ret >= 0) {
1209 AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1210
1211 if (frames_ctx->initial_pool_size) {
1212 // If the user has requested that extra output surfaces be
1213 // available then add them here.
1214 if (avctx->extra_hw_frames > 0)
1215 frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1216
1217 // If frame threading is enabled then an extra surface per thread
1218 // is also required.
1219 if (avctx->active_thread_type & FF_THREAD_FRAME)
1220 frames_ctx->initial_pool_size += avctx->thread_count;
1221 }
1222
1223 *out_frames_ref = frames_ref;
1224 } else {
1225 av_buffer_unref(&frames_ref);
1226 }
1227 return ret;
1228 }
1229
hwaccel_init(AVCodecContext * avctx,const AVCodecHWConfigInternal * hw_config)1230 static int hwaccel_init(AVCodecContext *avctx,
1231 const AVCodecHWConfigInternal *hw_config)
1232 {
1233 const AVHWAccel *hwaccel;
1234 int err;
1235
1236 hwaccel = hw_config->hwaccel;
1237 if (hwaccel->capabilities & AV_HWACCEL_CODEC_CAP_EXPERIMENTAL &&
1238 avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
1239 av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1240 hwaccel->name);
1241 return AVERROR_PATCHWELCOME;
1242 }
1243
1244 if (hwaccel->priv_data_size) {
1245 avctx->internal->hwaccel_priv_data =
1246 av_mallocz(hwaccel->priv_data_size);
1247 if (!avctx->internal->hwaccel_priv_data)
1248 return AVERROR(ENOMEM);
1249 }
1250
1251 avctx->hwaccel = hwaccel;
1252 if (hwaccel->init) {
1253 err = hwaccel->init(avctx);
1254 if (err < 0) {
1255 av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1256 "hwaccel initialisation returned error.\n",
1257 av_get_pix_fmt_name(hw_config->public.pix_fmt));
1258 av_freep(&avctx->internal->hwaccel_priv_data);
1259 avctx->hwaccel = NULL;
1260 return err;
1261 }
1262 }
1263
1264 return 0;
1265 }
1266
hwaccel_uninit(AVCodecContext * avctx)1267 static void hwaccel_uninit(AVCodecContext *avctx)
1268 {
1269 if (avctx->hwaccel && avctx->hwaccel->uninit)
1270 avctx->hwaccel->uninit(avctx);
1271
1272 av_freep(&avctx->internal->hwaccel_priv_data);
1273
1274 avctx->hwaccel = NULL;
1275
1276 av_buffer_unref(&avctx->hw_frames_ctx);
1277 }
1278
ff_get_format(AVCodecContext * avctx,const enum AVPixelFormat * fmt)1279 int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
1280 {
1281 const AVPixFmtDescriptor *desc;
1282 enum AVPixelFormat *choices;
1283 enum AVPixelFormat ret, user_choice;
1284 const AVCodecHWConfigInternal *hw_config;
1285 const AVCodecHWConfig *config;
1286 int i, n, err;
1287
1288 // Find end of list.
1289 for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1290 // Must contain at least one entry.
1291 av_assert0(n >= 1);
1292 // If a software format is available, it must be the last entry.
1293 desc = av_pix_fmt_desc_get(fmt[n - 1]);
1294 if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1295 // No software format is available.
1296 } else {
1297 avctx->sw_pix_fmt = fmt[n - 1];
1298 }
1299
1300 choices = av_malloc_array(n + 1, sizeof(*choices));
1301 if (!choices)
1302 return AV_PIX_FMT_NONE;
1303
1304 memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1305
1306 for (;;) {
1307 // Remove the previous hwaccel, if there was one.
1308 hwaccel_uninit(avctx);
1309
1310 user_choice = avctx->get_format(avctx, choices);
1311 if (user_choice == AV_PIX_FMT_NONE) {
1312 // Explicitly chose nothing, give up.
1313 ret = AV_PIX_FMT_NONE;
1314 break;
1315 }
1316
1317 desc = av_pix_fmt_desc_get(user_choice);
1318 if (!desc) {
1319 av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1320 "get_format() callback.\n");
1321 ret = AV_PIX_FMT_NONE;
1322 break;
1323 }
1324 av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1325 desc->name);
1326
1327 for (i = 0; i < n; i++) {
1328 if (choices[i] == user_choice)
1329 break;
1330 }
1331 if (i == n) {
1332 av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1333 "%s not in possible list.\n", desc->name);
1334 ret = AV_PIX_FMT_NONE;
1335 break;
1336 }
1337
1338 if (avctx->codec->hw_configs) {
1339 for (i = 0;; i++) {
1340 hw_config = avctx->codec->hw_configs[i];
1341 if (!hw_config)
1342 break;
1343 if (hw_config->public.pix_fmt == user_choice)
1344 break;
1345 }
1346 } else {
1347 hw_config = NULL;
1348 }
1349
1350 if (!hw_config) {
1351 // No config available, so no extra setup required.
1352 ret = user_choice;
1353 break;
1354 }
1355 config = &hw_config->public;
1356
1357 if (config->methods &
1358 AV_CODEC_HW_CONFIG_METHOD_HW_FRAMES_CTX &&
1359 avctx->hw_frames_ctx) {
1360 const AVHWFramesContext *frames_ctx =
1361 (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1362 if (frames_ctx->format != user_choice) {
1363 av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1364 "does not match the format of the provided frames "
1365 "context.\n", desc->name);
1366 goto try_again;
1367 }
1368 } else if (config->methods &
1369 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX &&
1370 avctx->hw_device_ctx) {
1371 const AVHWDeviceContext *device_ctx =
1372 (AVHWDeviceContext*)avctx->hw_device_ctx->data;
1373 if (device_ctx->type != config->device_type) {
1374 av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1375 "does not match the type of the provided device "
1376 "context.\n", desc->name);
1377 goto try_again;
1378 }
1379 } else if (config->methods &
1380 AV_CODEC_HW_CONFIG_METHOD_INTERNAL) {
1381 // Internal-only setup, no additional configuration.
1382 } else if (config->methods &
1383 AV_CODEC_HW_CONFIG_METHOD_AD_HOC) {
1384 // Some ad-hoc configuration we can't see and can't check.
1385 } else {
1386 av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1387 "missing configuration.\n", desc->name);
1388 goto try_again;
1389 }
1390 if (hw_config->hwaccel) {
1391 av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1392 "initialisation.\n", desc->name);
1393 err = hwaccel_init(avctx, hw_config);
1394 if (err < 0)
1395 goto try_again;
1396 }
1397 ret = user_choice;
1398 break;
1399
1400 try_again:
1401 av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1402 "get_format() without it.\n", desc->name);
1403 for (i = 0; i < n; i++) {
1404 if (choices[i] == user_choice)
1405 break;
1406 }
1407 for (; i + 1 < n; i++)
1408 choices[i] = choices[i + 1];
1409 --n;
1410 }
1411
1412 av_freep(&choices);
1413 return ret;
1414 }
1415
frame_pool_free(void * opaque,uint8_t * data)1416 static void frame_pool_free(void *opaque, uint8_t *data)
1417 {
1418 FramePool *pool = (FramePool*)data;
1419 int i;
1420
1421 for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++)
1422 av_buffer_pool_uninit(&pool->pools[i]);
1423
1424 av_freep(&data);
1425 }
1426
frame_pool_alloc(void)1427 static AVBufferRef *frame_pool_alloc(void)
1428 {
1429 FramePool *pool = av_mallocz(sizeof(*pool));
1430 AVBufferRef *buf;
1431
1432 if (!pool)
1433 return NULL;
1434
1435 buf = av_buffer_create((uint8_t*)pool, sizeof(*pool),
1436 frame_pool_free, NULL, 0);
1437 if (!buf) {
1438 av_freep(&pool);
1439 return NULL;
1440 }
1441
1442 return buf;
1443 }
1444
update_frame_pool(AVCodecContext * avctx,AVFrame * frame)1445 static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
1446 {
1447 FramePool *pool = avctx->internal->pool ?
1448 (FramePool*)avctx->internal->pool->data : NULL;
1449 AVBufferRef *pool_buf;
1450 int i, ret, ch, planes;
1451
1452 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1453 int planar = av_sample_fmt_is_planar(frame->format);
1454 ch = frame->channels;
1455 planes = planar ? ch : 1;
1456 }
1457
1458 if (pool && pool->format == frame->format) {
1459 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO &&
1460 pool->width == frame->width && pool->height == frame->height)
1461 return 0;
1462 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && pool->planes == planes &&
1463 pool->channels == ch && frame->nb_samples == pool->samples)
1464 return 0;
1465 }
1466
1467 pool_buf = frame_pool_alloc();
1468 if (!pool_buf)
1469 return AVERROR(ENOMEM);
1470 pool = (FramePool*)pool_buf->data;
1471
1472 switch (avctx->codec_type) {
1473 case AVMEDIA_TYPE_VIDEO: {
1474 uint8_t *data[4];
1475 int linesize[4];
1476 int size[4] = { 0 };
1477 int w = frame->width;
1478 int h = frame->height;
1479 int tmpsize, unaligned;
1480
1481 avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1482
1483 do {
1484 // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1485 // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1486 ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1487 if (ret < 0)
1488 goto fail;
1489 // increase alignment of w for next try (rhs gives the lowest bit set in w)
1490 w += w & ~(w - 1);
1491
1492 unaligned = 0;
1493 for (i = 0; i < 4; i++)
1494 unaligned |= linesize[i] % pool->stride_align[i];
1495 } while (unaligned);
1496
1497 tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
1498 NULL, linesize);
1499 if (tmpsize < 0) {
1500 ret = tmpsize;
1501 goto fail;
1502 }
1503
1504 for (i = 0; i < 3 && data[i + 1]; i++)
1505 size[i] = data[i + 1] - data[i];
1506 size[i] = tmpsize - (data[i] - data[0]);
1507
1508 for (i = 0; i < 4; i++) {
1509 pool->linesize[i] = linesize[i];
1510 if (size[i]) {
1511 pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1512 CONFIG_MEMORY_POISONING ?
1513 NULL :
1514 av_buffer_allocz);
1515 if (!pool->pools[i]) {
1516 ret = AVERROR(ENOMEM);
1517 goto fail;
1518 }
1519 }
1520 }
1521 pool->format = frame->format;
1522 pool->width = frame->width;
1523 pool->height = frame->height;
1524
1525 break;
1526 }
1527 case AVMEDIA_TYPE_AUDIO: {
1528 ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
1529 frame->nb_samples, frame->format, 0);
1530 if (ret < 0)
1531 goto fail;
1532
1533 pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1534 if (!pool->pools[0]) {
1535 ret = AVERROR(ENOMEM);
1536 goto fail;
1537 }
1538
1539 pool->format = frame->format;
1540 pool->planes = planes;
1541 pool->channels = ch;
1542 pool->samples = frame->nb_samples;
1543 break;
1544 }
1545 default: av_assert0(0);
1546 }
1547
1548 av_buffer_unref(&avctx->internal->pool);
1549 avctx->internal->pool = pool_buf;
1550
1551 return 0;
1552 fail:
1553 av_buffer_unref(&pool_buf);
1554 return ret;
1555 }
1556
audio_get_buffer(AVCodecContext * avctx,AVFrame * frame)1557 static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
1558 {
1559 FramePool *pool = (FramePool*)avctx->internal->pool->data;
1560 int planes = pool->planes;
1561 int i;
1562
1563 frame->linesize[0] = pool->linesize[0];
1564
1565 if (planes > AV_NUM_DATA_POINTERS) {
1566 frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1567 frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
1568 frame->extended_buf = av_mallocz_array(frame->nb_extended_buf,
1569 sizeof(*frame->extended_buf));
1570 if (!frame->extended_data || !frame->extended_buf) {
1571 av_freep(&frame->extended_data);
1572 av_freep(&frame->extended_buf);
1573 return AVERROR(ENOMEM);
1574 }
1575 } else {
1576 frame->extended_data = frame->data;
1577 av_assert0(frame->nb_extended_buf == 0);
1578 }
1579
1580 for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1581 frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1582 if (!frame->buf[i])
1583 goto fail;
1584 frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1585 }
1586 for (i = 0; i < frame->nb_extended_buf; i++) {
1587 frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1588 if (!frame->extended_buf[i])
1589 goto fail;
1590 frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1591 }
1592
1593 if (avctx->debug & FF_DEBUG_BUFFERS)
1594 av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1595
1596 return 0;
1597 fail:
1598 av_frame_unref(frame);
1599 return AVERROR(ENOMEM);
1600 }
1601
video_get_buffer(AVCodecContext * s,AVFrame * pic)1602 static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
1603 {
1604 FramePool *pool = (FramePool*)s->internal->pool->data;
1605 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pic->format);
1606 int i;
1607
1608 if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1609 av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1610 return -1;
1611 }
1612
1613 if (!desc) {
1614 av_log(s, AV_LOG_ERROR,
1615 "Unable to get pixel format descriptor for format %s\n",
1616 av_get_pix_fmt_name(pic->format));
1617 return AVERROR(EINVAL);
1618 }
1619
1620 memset(pic->data, 0, sizeof(pic->data));
1621 pic->extended_data = pic->data;
1622
1623 for (i = 0; i < 4 && pool->pools[i]; i++) {
1624 pic->linesize[i] = pool->linesize[i];
1625
1626 pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1627 if (!pic->buf[i])
1628 goto fail;
1629
1630 pic->data[i] = pic->buf[i]->data;
1631 }
1632 for (; i < AV_NUM_DATA_POINTERS; i++) {
1633 pic->data[i] = NULL;
1634 pic->linesize[i] = 0;
1635 }
1636 if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1637 ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
1638 avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1639
1640 if (s->debug & FF_DEBUG_BUFFERS)
1641 av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1642
1643 return 0;
1644 fail:
1645 av_frame_unref(pic);
1646 return AVERROR(ENOMEM);
1647 }
1648
avcodec_default_get_buffer2(AVCodecContext * avctx,AVFrame * frame,int flags)1649 int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
1650 {
1651 int ret;
1652
1653 if (avctx->hw_frames_ctx) {
1654 ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1655 frame->width = avctx->coded_width;
1656 frame->height = avctx->coded_height;
1657 return ret;
1658 }
1659
1660 if ((ret = update_frame_pool(avctx, frame)) < 0)
1661 return ret;
1662
1663 switch (avctx->codec_type) {
1664 case AVMEDIA_TYPE_VIDEO:
1665 return video_get_buffer(avctx, frame);
1666 case AVMEDIA_TYPE_AUDIO:
1667 return audio_get_buffer(avctx, frame);
1668 default:
1669 return -1;
1670 }
1671 }
1672
add_metadata_from_side_data(const AVPacket * avpkt,AVFrame * frame)1673 static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
1674 {
1675 int size;
1676 const uint8_t *side_metadata;
1677
1678 AVDictionary **frame_md = &frame->metadata;
1679
1680 side_metadata = av_packet_get_side_data(avpkt,
1681 AV_PKT_DATA_STRINGS_METADATA, &size);
1682 return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1683 }
1684
ff_decode_frame_props(AVCodecContext * avctx,AVFrame * frame)1685 int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
1686 {
1687 const AVPacket *pkt = avctx->internal->last_pkt_props;
1688 int i;
1689 static const struct {
1690 enum AVPacketSideDataType packet;
1691 enum AVFrameSideDataType frame;
1692 } sd[] = {
1693 { AV_PKT_DATA_REPLAYGAIN , AV_FRAME_DATA_REPLAYGAIN },
1694 { AV_PKT_DATA_DISPLAYMATRIX, AV_FRAME_DATA_DISPLAYMATRIX },
1695 { AV_PKT_DATA_SPHERICAL, AV_FRAME_DATA_SPHERICAL },
1696 { AV_PKT_DATA_STEREO3D, AV_FRAME_DATA_STEREO3D },
1697 { AV_PKT_DATA_AUDIO_SERVICE_TYPE, AV_FRAME_DATA_AUDIO_SERVICE_TYPE },
1698 { AV_PKT_DATA_MASTERING_DISPLAY_METADATA, AV_FRAME_DATA_MASTERING_DISPLAY_METADATA },
1699 { AV_PKT_DATA_CONTENT_LIGHT_LEVEL, AV_FRAME_DATA_CONTENT_LIGHT_LEVEL },
1700 { AV_PKT_DATA_A53_CC, AV_FRAME_DATA_A53_CC },
1701 { AV_PKT_DATA_ICC_PROFILE, AV_FRAME_DATA_ICC_PROFILE },
1702 };
1703
1704 if (pkt) {
1705 frame->pts = pkt->pts;
1706 #if FF_API_PKT_PTS
1707 FF_DISABLE_DEPRECATION_WARNINGS
1708 frame->pkt_pts = pkt->pts;
1709 FF_ENABLE_DEPRECATION_WARNINGS
1710 #endif
1711 frame->pkt_pos = pkt->pos;
1712 frame->pkt_duration = pkt->duration;
1713 frame->pkt_size = pkt->size;
1714
1715 for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1716 int size;
1717 uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1718 if (packet_sd) {
1719 AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1720 sd[i].frame,
1721 size);
1722 if (!frame_sd)
1723 return AVERROR(ENOMEM);
1724
1725 memcpy(frame_sd->data, packet_sd, size);
1726 }
1727 }
1728 add_metadata_from_side_data(pkt, frame);
1729
1730 if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1731 frame->flags |= AV_FRAME_FLAG_DISCARD;
1732 } else {
1733 frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1734 }
1735 }
1736 frame->reordered_opaque = avctx->reordered_opaque;
1737
1738 if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1739 frame->color_primaries = avctx->color_primaries;
1740 if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1741 frame->color_trc = avctx->color_trc;
1742 if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1743 frame->colorspace = avctx->colorspace;
1744 if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1745 frame->color_range = avctx->color_range;
1746 if (frame->chroma_location == AVCHROMA_LOC_UNSPECIFIED)
1747 frame->chroma_location = avctx->chroma_sample_location;
1748
1749 switch (avctx->codec->type) {
1750 case AVMEDIA_TYPE_VIDEO:
1751 frame->format = avctx->pix_fmt;
1752 if (!frame->sample_aspect_ratio.num)
1753 frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1754
1755 if (frame->width && frame->height &&
1756 av_image_check_sar(frame->width, frame->height,
1757 frame->sample_aspect_ratio) < 0) {
1758 av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1759 frame->sample_aspect_ratio.num,
1760 frame->sample_aspect_ratio.den);
1761 frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1762 }
1763
1764 break;
1765 case AVMEDIA_TYPE_AUDIO:
1766 if (!frame->sample_rate)
1767 frame->sample_rate = avctx->sample_rate;
1768 if (frame->format < 0)
1769 frame->format = avctx->sample_fmt;
1770 if (!frame->channel_layout) {
1771 if (avctx->channel_layout) {
1772 if (av_get_channel_layout_nb_channels(avctx->channel_layout) !=
1773 avctx->channels) {
1774 av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1775 "configuration.\n");
1776 return AVERROR(EINVAL);
1777 }
1778
1779 frame->channel_layout = avctx->channel_layout;
1780 } else {
1781 if (avctx->channels > FF_SANE_NB_CHANNELS) {
1782 av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1783 avctx->channels);
1784 return AVERROR(ENOSYS);
1785 }
1786 }
1787 }
1788 frame->channels = avctx->channels;
1789 break;
1790 }
1791 return 0;
1792 }
1793
validate_avframe_allocation(AVCodecContext * avctx,AVFrame * frame)1794 static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
1795 {
1796 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1797 int i;
1798 int num_planes = av_pix_fmt_count_planes(frame->format);
1799 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
1800 int flags = desc ? desc->flags : 0;
1801 if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1802 num_planes = 2;
1803 if ((flags & FF_PSEUDOPAL) && frame->data[1])
1804 num_planes = 2;
1805 for (i = 0; i < num_planes; i++) {
1806 av_assert0(frame->data[i]);
1807 }
1808 // For formats without data like hwaccel allow unused pointers to be non-NULL.
1809 for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1810 if (frame->data[i])
1811 av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1812 frame->data[i] = NULL;
1813 }
1814 }
1815 }
1816
decode_data_free(void * opaque,uint8_t * data)1817 static void decode_data_free(void *opaque, uint8_t *data)
1818 {
1819 FrameDecodeData *fdd = (FrameDecodeData*)data;
1820
1821 if (fdd->post_process_opaque_free)
1822 fdd->post_process_opaque_free(fdd->post_process_opaque);
1823
1824 if (fdd->hwaccel_priv_free)
1825 fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1826
1827 av_freep(&fdd);
1828 }
1829
ff_attach_decode_data(AVFrame * frame)1830 int ff_attach_decode_data(AVFrame *frame)
1831 {
1832 AVBufferRef *fdd_buf;
1833 FrameDecodeData *fdd;
1834
1835 av_assert1(!frame->private_ref);
1836 av_buffer_unref(&frame->private_ref);
1837
1838 fdd = av_mallocz(sizeof(*fdd));
1839 if (!fdd)
1840 return AVERROR(ENOMEM);
1841
1842 fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1843 NULL, AV_BUFFER_FLAG_READONLY);
1844 if (!fdd_buf) {
1845 av_freep(&fdd);
1846 return AVERROR(ENOMEM);
1847 }
1848
1849 frame->private_ref = fdd_buf;
1850
1851 return 0;
1852 }
1853
ff_get_buffer(AVCodecContext * avctx,AVFrame * frame,int flags)1854 int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
1855 {
1856 const AVHWAccel *hwaccel = avctx->hwaccel;
1857 int override_dimensions = 1;
1858 int ret;
1859
1860 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1861 if ((unsigned)avctx->width > INT_MAX - STRIDE_ALIGN ||
1862 (ret = av_image_check_size2(FFALIGN(avctx->width, STRIDE_ALIGN), avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1863 av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1864 ret = AVERROR(EINVAL);
1865 goto fail;
1866 }
1867
1868 if (frame->width <= 0 || frame->height <= 0) {
1869 frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1870 frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1871 override_dimensions = 0;
1872 }
1873
1874 if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1875 av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1876 ret = AVERROR(EINVAL);
1877 goto fail;
1878 }
1879 } else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
1880 if (frame->nb_samples * (int64_t)avctx->channels > avctx->max_samples) {
1881 av_log(avctx, AV_LOG_ERROR, "samples per frame %d, exceeds max_samples %"PRId64"\n", frame->nb_samples, avctx->max_samples);
1882 ret = AVERROR(EINVAL);
1883 goto fail;
1884 }
1885 }
1886 ret = ff_decode_frame_props(avctx, frame);
1887 if (ret < 0)
1888 goto fail;
1889
1890 if (hwaccel) {
1891 if (hwaccel->alloc_frame) {
1892 ret = hwaccel->alloc_frame(avctx, frame);
1893 goto end;
1894 }
1895 } else
1896 avctx->sw_pix_fmt = avctx->pix_fmt;
1897
1898 ret = avctx->get_buffer2(avctx, frame, flags);
1899 if (ret < 0)
1900 goto fail;
1901
1902 validate_avframe_allocation(avctx, frame);
1903
1904 ret = ff_attach_decode_data(frame);
1905 if (ret < 0)
1906 goto fail;
1907
1908 end:
1909 if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1910 !(avctx->codec->caps_internal & FF_CODEC_CAP_EXPORTS_CROPPING)) {
1911 frame->width = avctx->width;
1912 frame->height = avctx->height;
1913 }
1914
1915 fail:
1916 if (ret < 0) {
1917 av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1918 av_frame_unref(frame);
1919 }
1920
1921 return ret;
1922 }
1923
reget_buffer_internal(AVCodecContext * avctx,AVFrame * frame,int flags)1924 static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
1925 {
1926 AVFrame *tmp;
1927 int ret;
1928
1929 av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO);
1930
1931 if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1932 av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1933 frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1934 av_frame_unref(frame);
1935 }
1936
1937 if (!frame->data[0])
1938 return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1939
1940 if ((flags & FF_REGET_BUFFER_FLAG_READONLY) || av_frame_is_writable(frame))
1941 return ff_decode_frame_props(avctx, frame);
1942
1943 tmp = av_frame_alloc();
1944 if (!tmp)
1945 return AVERROR(ENOMEM);
1946
1947 av_frame_move_ref(tmp, frame);
1948
1949 ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1950 if (ret < 0) {
1951 av_frame_free(&tmp);
1952 return ret;
1953 }
1954
1955 av_frame_copy(frame, tmp);
1956 av_frame_free(&tmp);
1957
1958 return 0;
1959 }
1960
ff_reget_buffer(AVCodecContext * avctx,AVFrame * frame,int flags)1961 int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
1962 {
1963 int ret = reget_buffer_internal(avctx, frame, flags);
1964 if (ret < 0)
1965 av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
1966 return ret;
1967 }
1968