1 /*
2 * Copyright (c) 2010 Nicolas George
3 * Copyright (c) 2011 Stefano Sabatini
4 * Copyright (c) 2014 Andrey Utkin
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
25 /**
26 * @file
27 * API example for demuxing, decoding, filtering, encoding and muxing
28 * @example transcoding.c
29 */
30
31 #include <libavcodec/avcodec.h>
32 #include <libavformat/avformat.h>
33 #include <libavfilter/buffersink.h>
34 #include <libavfilter/buffersrc.h>
35 #include <libavutil/channel_layout.h>
36 #include <libavutil/opt.h>
37 #include <libavutil/pixdesc.h>
38
39 static AVFormatContext *ifmt_ctx;
40 static AVFormatContext *ofmt_ctx;
41 typedef struct FilteringContext {
42 AVFilterContext *buffersink_ctx;
43 AVFilterContext *buffersrc_ctx;
44 AVFilterGraph *filter_graph;
45
46 AVPacket *enc_pkt;
47 AVFrame *filtered_frame;
48 } FilteringContext;
49 static FilteringContext *filter_ctx;
50
51 typedef struct StreamContext {
52 AVCodecContext *dec_ctx;
53 AVCodecContext *enc_ctx;
54
55 AVFrame *dec_frame;
56 } StreamContext;
57 static StreamContext *stream_ctx;
58
open_input_file(const char * filename)59 static int open_input_file(const char *filename)
60 {
61 int ret;
62 unsigned int i;
63
64 ifmt_ctx = NULL;
65 if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
66 av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
67 return ret;
68 }
69
70 if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
71 av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
72 return ret;
73 }
74
75 stream_ctx = av_calloc(ifmt_ctx->nb_streams, sizeof(*stream_ctx));
76 if (!stream_ctx)
77 return AVERROR(ENOMEM);
78
79 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
80 AVStream *stream = ifmt_ctx->streams[i];
81 const AVCodec *dec = avcodec_find_decoder(stream->codecpar->codec_id);
82 AVCodecContext *codec_ctx;
83 if (!dec) {
84 av_log(NULL, AV_LOG_ERROR, "Failed to find decoder for stream #%u\n", i);
85 return AVERROR_DECODER_NOT_FOUND;
86 }
87 codec_ctx = avcodec_alloc_context3(dec);
88 if (!codec_ctx) {
89 av_log(NULL, AV_LOG_ERROR, "Failed to allocate the decoder context for stream #%u\n", i);
90 return AVERROR(ENOMEM);
91 }
92 ret = avcodec_parameters_to_context(codec_ctx, stream->codecpar);
93 if (ret < 0) {
94 av_log(NULL, AV_LOG_ERROR, "Failed to copy decoder parameters to input decoder context "
95 "for stream #%u\n", i);
96 return ret;
97 }
98 /* Reencode video & audio and remux subtitles etc. */
99 if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
100 || codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
101 if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
102 codec_ctx->framerate = av_guess_frame_rate(ifmt_ctx, stream, NULL);
103 /* Open decoder */
104 ret = avcodec_open2(codec_ctx, dec, NULL);
105 if (ret < 0) {
106 av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
107 return ret;
108 }
109 }
110 stream_ctx[i].dec_ctx = codec_ctx;
111
112 stream_ctx[i].dec_frame = av_frame_alloc();
113 if (!stream_ctx[i].dec_frame)
114 return AVERROR(ENOMEM);
115 }
116
117 av_dump_format(ifmt_ctx, 0, filename, 0);
118 return 0;
119 }
120
open_output_file(const char * filename)121 static int open_output_file(const char *filename)
122 {
123 AVStream *out_stream;
124 AVStream *in_stream;
125 AVCodecContext *dec_ctx, *enc_ctx;
126 const AVCodec *encoder;
127 int ret;
128 unsigned int i;
129
130 ofmt_ctx = NULL;
131 avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
132 if (!ofmt_ctx) {
133 av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
134 return AVERROR_UNKNOWN;
135 }
136
137
138 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
139 out_stream = avformat_new_stream(ofmt_ctx, NULL);
140 if (!out_stream) {
141 av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
142 return AVERROR_UNKNOWN;
143 }
144
145 in_stream = ifmt_ctx->streams[i];
146 dec_ctx = stream_ctx[i].dec_ctx;
147
148 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
149 || dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
150 /* in this example, we choose transcoding to same codec */
151 encoder = avcodec_find_encoder(dec_ctx->codec_id);
152 if (!encoder) {
153 av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
154 return AVERROR_INVALIDDATA;
155 }
156 enc_ctx = avcodec_alloc_context3(encoder);
157 if (!enc_ctx) {
158 av_log(NULL, AV_LOG_FATAL, "Failed to allocate the encoder context\n");
159 return AVERROR(ENOMEM);
160 }
161
162 /* In this example, we transcode to same properties (picture size,
163 * sample rate etc.). These properties can be changed for output
164 * streams easily using filters */
165 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
166 enc_ctx->height = dec_ctx->height;
167 enc_ctx->width = dec_ctx->width;
168 enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
169 /* take first format from list of supported formats */
170 if (encoder->pix_fmts)
171 enc_ctx->pix_fmt = encoder->pix_fmts[0];
172 else
173 enc_ctx->pix_fmt = dec_ctx->pix_fmt;
174 /* video time_base can be set to whatever is handy and supported by encoder */
175 enc_ctx->time_base = av_inv_q(dec_ctx->framerate);
176 } else {
177 enc_ctx->sample_rate = dec_ctx->sample_rate;
178 ret = av_channel_layout_copy(&enc_ctx->ch_layout, &dec_ctx->ch_layout);
179 if (ret < 0)
180 return ret;
181 /* take first format from list of supported formats */
182 enc_ctx->sample_fmt = encoder->sample_fmts[0];
183 enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
184 }
185
186 if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
187 enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
188
189 /* Third parameter can be used to pass settings to encoder */
190 ret = avcodec_open2(enc_ctx, encoder, NULL);
191 if (ret < 0) {
192 av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
193 return ret;
194 }
195 ret = avcodec_parameters_from_context(out_stream->codecpar, enc_ctx);
196 if (ret < 0) {
197 av_log(NULL, AV_LOG_ERROR, "Failed to copy encoder parameters to output stream #%u\n", i);
198 return ret;
199 }
200
201 out_stream->time_base = enc_ctx->time_base;
202 stream_ctx[i].enc_ctx = enc_ctx;
203 } else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
204 av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
205 return AVERROR_INVALIDDATA;
206 } else {
207 /* if this stream must be remuxed */
208 ret = avcodec_parameters_copy(out_stream->codecpar, in_stream->codecpar);
209 if (ret < 0) {
210 av_log(NULL, AV_LOG_ERROR, "Copying parameters for stream #%u failed\n", i);
211 return ret;
212 }
213 out_stream->time_base = in_stream->time_base;
214 }
215
216 }
217 av_dump_format(ofmt_ctx, 0, filename, 1);
218
219 if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
220 ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
221 if (ret < 0) {
222 av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
223 return ret;
224 }
225 }
226
227 /* init muxer, write output file header */
228 ret = avformat_write_header(ofmt_ctx, NULL);
229 if (ret < 0) {
230 av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
231 return ret;
232 }
233
234 return 0;
235 }
236
init_filter(FilteringContext * fctx,AVCodecContext * dec_ctx,AVCodecContext * enc_ctx,const char * filter_spec)237 static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
238 AVCodecContext *enc_ctx, const char *filter_spec)
239 {
240 char args[512];
241 int ret = 0;
242 const AVFilter *buffersrc = NULL;
243 const AVFilter *buffersink = NULL;
244 AVFilterContext *buffersrc_ctx = NULL;
245 AVFilterContext *buffersink_ctx = NULL;
246 AVFilterInOut *outputs = avfilter_inout_alloc();
247 AVFilterInOut *inputs = avfilter_inout_alloc();
248 AVFilterGraph *filter_graph = avfilter_graph_alloc();
249
250 if (!outputs || !inputs || !filter_graph) {
251 ret = AVERROR(ENOMEM);
252 goto end;
253 }
254
255 if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
256 buffersrc = avfilter_get_by_name("buffer");
257 buffersink = avfilter_get_by_name("buffersink");
258 if (!buffersrc || !buffersink) {
259 av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
260 ret = AVERROR_UNKNOWN;
261 goto end;
262 }
263
264 snprintf(args, sizeof(args),
265 "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
266 dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
267 dec_ctx->time_base.num, dec_ctx->time_base.den,
268 dec_ctx->sample_aspect_ratio.num,
269 dec_ctx->sample_aspect_ratio.den);
270
271 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
272 args, NULL, filter_graph);
273 if (ret < 0) {
274 av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
275 goto end;
276 }
277
278 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
279 NULL, NULL, filter_graph);
280 if (ret < 0) {
281 av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
282 goto end;
283 }
284
285 ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
286 (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
287 AV_OPT_SEARCH_CHILDREN);
288 if (ret < 0) {
289 av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
290 goto end;
291 }
292 } else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
293 char buf[64];
294 buffersrc = avfilter_get_by_name("abuffer");
295 buffersink = avfilter_get_by_name("abuffersink");
296 if (!buffersrc || !buffersink) {
297 av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
298 ret = AVERROR_UNKNOWN;
299 goto end;
300 }
301
302 if (dec_ctx->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
303 av_channel_layout_default(&dec_ctx->ch_layout, dec_ctx->ch_layout.nb_channels);
304 av_channel_layout_describe(&dec_ctx->ch_layout, buf, sizeof(buf));
305 snprintf(args, sizeof(args),
306 "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=%s",
307 dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
308 av_get_sample_fmt_name(dec_ctx->sample_fmt),
309 buf);
310 ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
311 args, NULL, filter_graph);
312 if (ret < 0) {
313 av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
314 goto end;
315 }
316
317 ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
318 NULL, NULL, filter_graph);
319 if (ret < 0) {
320 av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
321 goto end;
322 }
323
324 ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
325 (uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
326 AV_OPT_SEARCH_CHILDREN);
327 if (ret < 0) {
328 av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
329 goto end;
330 }
331
332 av_channel_layout_describe(&enc_ctx->ch_layout, buf, sizeof(buf));
333 ret = av_opt_set(buffersink_ctx, "ch_layouts",
334 buf, AV_OPT_SEARCH_CHILDREN);
335 if (ret < 0) {
336 av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
337 goto end;
338 }
339
340 ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
341 (uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
342 AV_OPT_SEARCH_CHILDREN);
343 if (ret < 0) {
344 av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
345 goto end;
346 }
347 } else {
348 ret = AVERROR_UNKNOWN;
349 goto end;
350 }
351
352 /* Endpoints for the filter graph. */
353 outputs->name = av_strdup("in");
354 outputs->filter_ctx = buffersrc_ctx;
355 outputs->pad_idx = 0;
356 outputs->next = NULL;
357
358 inputs->name = av_strdup("out");
359 inputs->filter_ctx = buffersink_ctx;
360 inputs->pad_idx = 0;
361 inputs->next = NULL;
362
363 if (!outputs->name || !inputs->name) {
364 ret = AVERROR(ENOMEM);
365 goto end;
366 }
367
368 if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
369 &inputs, &outputs, NULL)) < 0)
370 goto end;
371
372 if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
373 goto end;
374
375 /* Fill FilteringContext */
376 fctx->buffersrc_ctx = buffersrc_ctx;
377 fctx->buffersink_ctx = buffersink_ctx;
378 fctx->filter_graph = filter_graph;
379
380 end:
381 avfilter_inout_free(&inputs);
382 avfilter_inout_free(&outputs);
383
384 return ret;
385 }
386
init_filters(void)387 static int init_filters(void)
388 {
389 const char *filter_spec;
390 unsigned int i;
391 int ret;
392 filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
393 if (!filter_ctx)
394 return AVERROR(ENOMEM);
395
396 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
397 filter_ctx[i].buffersrc_ctx = NULL;
398 filter_ctx[i].buffersink_ctx = NULL;
399 filter_ctx[i].filter_graph = NULL;
400 if (!(ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_AUDIO
401 || ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO))
402 continue;
403
404
405 if (ifmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
406 filter_spec = "null"; /* passthrough (dummy) filter for video */
407 else
408 filter_spec = "anull"; /* passthrough (dummy) filter for audio */
409 ret = init_filter(&filter_ctx[i], stream_ctx[i].dec_ctx,
410 stream_ctx[i].enc_ctx, filter_spec);
411 if (ret)
412 return ret;
413
414 filter_ctx[i].enc_pkt = av_packet_alloc();
415 if (!filter_ctx[i].enc_pkt)
416 return AVERROR(ENOMEM);
417
418 filter_ctx[i].filtered_frame = av_frame_alloc();
419 if (!filter_ctx[i].filtered_frame)
420 return AVERROR(ENOMEM);
421 }
422 return 0;
423 }
424
encode_write_frame(unsigned int stream_index,int flush)425 static int encode_write_frame(unsigned int stream_index, int flush)
426 {
427 StreamContext *stream = &stream_ctx[stream_index];
428 FilteringContext *filter = &filter_ctx[stream_index];
429 AVFrame *filt_frame = flush ? NULL : filter->filtered_frame;
430 AVPacket *enc_pkt = filter->enc_pkt;
431 int ret;
432
433 av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
434 /* encode filtered frame */
435 av_packet_unref(enc_pkt);
436
437 ret = avcodec_send_frame(stream->enc_ctx, filt_frame);
438
439 if (ret < 0)
440 return ret;
441
442 while (ret >= 0) {
443 ret = avcodec_receive_packet(stream->enc_ctx, enc_pkt);
444
445 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
446 return 0;
447
448 /* prepare packet for muxing */
449 enc_pkt->stream_index = stream_index;
450 av_packet_rescale_ts(enc_pkt,
451 stream->enc_ctx->time_base,
452 ofmt_ctx->streams[stream_index]->time_base);
453
454 av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
455 /* mux encoded frame */
456 ret = av_interleaved_write_frame(ofmt_ctx, enc_pkt);
457 }
458
459 return ret;
460 }
461
filter_encode_write_frame(AVFrame * frame,unsigned int stream_index)462 static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
463 {
464 FilteringContext *filter = &filter_ctx[stream_index];
465 int ret;
466
467 av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
468 /* push the decoded frame into the filtergraph */
469 ret = av_buffersrc_add_frame_flags(filter->buffersrc_ctx,
470 frame, 0);
471 if (ret < 0) {
472 av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
473 return ret;
474 }
475
476 /* pull filtered frames from the filtergraph */
477 while (1) {
478 av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
479 ret = av_buffersink_get_frame(filter->buffersink_ctx,
480 filter->filtered_frame);
481 if (ret < 0) {
482 /* if no more frames for output - returns AVERROR(EAGAIN)
483 * if flushed and no more frames for output - returns AVERROR_EOF
484 * rewrite retcode to 0 to show it as normal procedure completion
485 */
486 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
487 ret = 0;
488 break;
489 }
490
491 filter->filtered_frame->pict_type = AV_PICTURE_TYPE_NONE;
492 ret = encode_write_frame(stream_index, 0);
493 av_frame_unref(filter->filtered_frame);
494 if (ret < 0)
495 break;
496 }
497
498 return ret;
499 }
500
flush_encoder(unsigned int stream_index)501 static int flush_encoder(unsigned int stream_index)
502 {
503 if (!(stream_ctx[stream_index].enc_ctx->codec->capabilities &
504 AV_CODEC_CAP_DELAY))
505 return 0;
506
507 av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
508 return encode_write_frame(stream_index, 1);
509 }
510
main(int argc,char ** argv)511 int main(int argc, char **argv)
512 {
513 int ret;
514 AVPacket *packet = NULL;
515 unsigned int stream_index;
516 unsigned int i;
517
518 if (argc != 3) {
519 av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
520 return 1;
521 }
522
523 if ((ret = open_input_file(argv[1])) < 0)
524 goto end;
525 if ((ret = open_output_file(argv[2])) < 0)
526 goto end;
527 if ((ret = init_filters()) < 0)
528 goto end;
529 if (!(packet = av_packet_alloc()))
530 goto end;
531
532 /* read all packets */
533 while (1) {
534 if ((ret = av_read_frame(ifmt_ctx, packet)) < 0)
535 break;
536 stream_index = packet->stream_index;
537 av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
538 stream_index);
539
540 if (filter_ctx[stream_index].filter_graph) {
541 StreamContext *stream = &stream_ctx[stream_index];
542
543 av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
544
545 av_packet_rescale_ts(packet,
546 ifmt_ctx->streams[stream_index]->time_base,
547 stream->dec_ctx->time_base);
548 ret = avcodec_send_packet(stream->dec_ctx, packet);
549 if (ret < 0) {
550 av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
551 break;
552 }
553
554 while (ret >= 0) {
555 ret = avcodec_receive_frame(stream->dec_ctx, stream->dec_frame);
556 if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
557 break;
558 else if (ret < 0)
559 goto end;
560
561 stream->dec_frame->pts = stream->dec_frame->best_effort_timestamp;
562 ret = filter_encode_write_frame(stream->dec_frame, stream_index);
563 if (ret < 0)
564 goto end;
565 }
566 } else {
567 /* remux this frame without reencoding */
568 av_packet_rescale_ts(packet,
569 ifmt_ctx->streams[stream_index]->time_base,
570 ofmt_ctx->streams[stream_index]->time_base);
571
572 ret = av_interleaved_write_frame(ofmt_ctx, packet);
573 if (ret < 0)
574 goto end;
575 }
576 av_packet_unref(packet);
577 }
578
579 /* flush filters and encoders */
580 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
581 /* flush filter */
582 if (!filter_ctx[i].filter_graph)
583 continue;
584 ret = filter_encode_write_frame(NULL, i);
585 if (ret < 0) {
586 av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
587 goto end;
588 }
589
590 /* flush encoder */
591 ret = flush_encoder(i);
592 if (ret < 0) {
593 av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
594 goto end;
595 }
596 }
597
598 av_write_trailer(ofmt_ctx);
599 end:
600 av_packet_free(&packet);
601 for (i = 0; i < ifmt_ctx->nb_streams; i++) {
602 avcodec_free_context(&stream_ctx[i].dec_ctx);
603 if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && stream_ctx[i].enc_ctx)
604 avcodec_free_context(&stream_ctx[i].enc_ctx);
605 if (filter_ctx && filter_ctx[i].filter_graph) {
606 avfilter_graph_free(&filter_ctx[i].filter_graph);
607 av_packet_free(&filter_ctx[i].enc_pkt);
608 av_frame_free(&filter_ctx[i].filtered_frame);
609 }
610
611 av_frame_free(&stream_ctx[i].dec_frame);
612 }
613 av_free(filter_ctx);
614 av_free(stream_ctx);
615 avformat_close_input(&ifmt_ctx);
616 if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
617 avio_closep(&ofmt_ctx->pb);
618 avformat_free_context(ofmt_ctx);
619
620 if (ret < 0)
621 av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
622
623 return ret ? 1 : 0;
624 }
625