1 /*
2 * Copyright (c) 2000-2003 Fabrice Bellard
3 *
4 * This file is part of FFmpeg.
5 *
6 * FFmpeg is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * FFmpeg is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with FFmpeg; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20
21 /**
22 * @file
23 * multimedia converter based on the FFmpeg libraries
24 */
25
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavcodec/version.h"
68 #include "libavformat/os_support.h"
69
70 # include "libavfilter/avfilter.h"
71 # include "libavfilter/buffersrc.h"
72 # include "libavfilter/buffersink.h"
73
74 #if HAVE_SYS_RESOURCE_H
75 #include <sys/time.h>
76 #include <sys/types.h>
77 #include <sys/resource.h>
78 #elif HAVE_GETPROCESSTIMES
79 #include <windows.h>
80 #endif
81 #if HAVE_GETPROCESSMEMORYINFO
82 #include <windows.h>
83 #include <psapi.h>
84 #endif
85 #if HAVE_SETCONSOLECTRLHANDLER
86 #include <windows.h>
87 #endif
88
89
90 #if HAVE_SYS_SELECT_H
91 #include <sys/select.h>
92 #endif
93
94 #if HAVE_TERMIOS_H
95 #include <fcntl.h>
96 #include <sys/ioctl.h>
97 #include <sys/time.h>
98 #include <termios.h>
99 #elif HAVE_KBHIT
100 #include <conio.h>
101 #endif
102
103 #include <time.h>
104
105 #include "ffmpeg.h"
106 #include "cmdutils.h"
107
108 #include "libavutil/avassert.h"
109
110 const char program_name[] = "ffmpeg";
111 const int program_birth_year = 2000;
112
113 static FILE *vstats_file;
114
115 const char *const forced_keyframes_const_names[] = {
116 "n",
117 "n_forced",
118 "prev_forced_n",
119 "prev_forced_t",
120 "t",
121 NULL
122 };
123
124 typedef struct BenchmarkTimeStamps {
125 int64_t real_usec;
126 int64_t user_usec;
127 int64_t sys_usec;
128 } BenchmarkTimeStamps;
129
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
133
134 static int64_t nb_frames_dup = 0;
135 static uint64_t dup_warning = 1000;
136 static int64_t nb_frames_drop = 0;
137 static int64_t decode_error_stat[2];
138 unsigned nb_output_dumped = 0;
139
140 int want_sdp = 1;
141
142 static BenchmarkTimeStamps current_time;
143 AVIOContext *progress_avio = NULL;
144
145 static uint8_t *subtitle_out;
146
147 InputStream **input_streams = NULL;
148 int nb_input_streams = 0;
149 InputFile **input_files = NULL;
150 int nb_input_files = 0;
151
152 OutputStream **output_streams = NULL;
153 int nb_output_streams = 0;
154 OutputFile **output_files = NULL;
155 int nb_output_files = 0;
156
157 FilterGraph **filtergraphs;
158 int nb_filtergraphs;
159
160 #if HAVE_TERMIOS_H
161
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170
171 /* sub2video hack:
172 Convert subtitles to video with alpha to insert them in filter graphs.
173 This is a temporary solution until libavfilter gets real subtitles support.
174 */
175
sub2video_get_blank_frame(InputStream * ist)176 static int sub2video_get_blank_frame(InputStream *ist)
177 {
178 int ret;
179 AVFrame *frame = ist->sub2video.frame;
180
181 av_frame_unref(frame);
182 ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183 ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
184 ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
185 if ((ret = av_frame_get_buffer(frame, 0)) < 0)
186 return ret;
187 memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188 return 0;
189 }
190
sub2video_copy_rect(uint8_t * dst,int dst_linesize,int w,int h,AVSubtitleRect * r)191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192 AVSubtitleRect *r)
193 {
194 uint32_t *pal, *dst2;
195 uint8_t *src, *src2;
196 int x, y;
197
198 if (r->type != SUBTITLE_BITMAP) {
199 av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200 return;
201 }
202 if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203 av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204 r->x, r->y, r->w, r->h, w, h
205 );
206 return;
207 }
208
209 dst += r->y * dst_linesize + r->x * 4;
210 src = r->data[0];
211 pal = (uint32_t *)r->data[1];
212 for (y = 0; y < r->h; y++) {
213 dst2 = (uint32_t *)dst;
214 src2 = src;
215 for (x = 0; x < r->w; x++)
216 *(dst2++) = pal[*(src2++)];
217 dst += dst_linesize;
218 src += r->linesize[0];
219 }
220 }
221
sub2video_push_ref(InputStream * ist,int64_t pts)222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224 AVFrame *frame = ist->sub2video.frame;
225 int i;
226 int ret;
227
228 av_assert1(frame->data[0]);
229 ist->sub2video.last_pts = frame->pts = pts;
230 for (i = 0; i < ist->nb_filters; i++) {
231 ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
232 AV_BUFFERSRC_FLAG_KEEP_REF |
233 AV_BUFFERSRC_FLAG_PUSH);
234 if (ret != AVERROR_EOF && ret < 0)
235 av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236 av_err2str(ret));
237 }
238 }
239
sub2video_update(InputStream * ist,int64_t heartbeat_pts,AVSubtitle * sub)240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241 {
242 AVFrame *frame = ist->sub2video.frame;
243 int8_t *dst;
244 int dst_linesize;
245 int num_rects, i;
246 int64_t pts, end_pts;
247
248 if (!frame)
249 return;
250 if (sub) {
251 pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252 AV_TIME_BASE_Q, ist->st->time_base);
253 end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254 AV_TIME_BASE_Q, ist->st->time_base);
255 num_rects = sub->num_rects;
256 } else {
257 /* If we are initializing the system, utilize current heartbeat
258 PTS as the start time, and show until the following subpicture
259 is received. Otherwise, utilize the previous subpicture's end time
260 as the fall-back value. */
261 pts = ist->sub2video.initialize ?
262 heartbeat_pts : ist->sub2video.end_pts;
263 end_pts = INT64_MAX;
264 num_rects = 0;
265 }
266 if (sub2video_get_blank_frame(ist) < 0) {
267 av_log(ist->dec_ctx, AV_LOG_ERROR,
268 "Impossible to get a blank canvas.\n");
269 return;
270 }
271 dst = frame->data [0];
272 dst_linesize = frame->linesize[0];
273 for (i = 0; i < num_rects; i++)
274 sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275 sub2video_push_ref(ist, pts);
276 ist->sub2video.end_pts = end_pts;
277 ist->sub2video.initialize = 0;
278 }
279
sub2video_heartbeat(InputStream * ist,int64_t pts)280 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
281 {
282 InputFile *infile = input_files[ist->file_index];
283 int i, j, nb_reqs;
284 int64_t pts2;
285
286 /* When a frame is read from a file, examine all sub2video streams in
287 the same file and send the sub2video frame again. Otherwise, decoded
288 video frames could be accumulating in the filter graph while a filter
289 (possibly overlay) is desperately waiting for a subtitle frame. */
290 for (i = 0; i < infile->nb_streams; i++) {
291 InputStream *ist2 = input_streams[infile->ist_index + i];
292 if (!ist2->sub2video.frame)
293 continue;
294 /* subtitles seem to be usually muxed ahead of other streams;
295 if not, subtracting a larger time here is necessary */
296 pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297 /* do not send the heartbeat frame if the subtitle is already ahead */
298 if (pts2 <= ist2->sub2video.last_pts)
299 continue;
300 if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301 /* if we have hit the end of the current displayed subpicture,
302 or if we need to initialize the system, update the
303 overlayed subpicture and its start/end times */
304 sub2video_update(ist2, pts2 + 1, NULL);
305 for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306 nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307 if (nb_reqs)
308 sub2video_push_ref(ist2, pts2);
309 }
310 }
311
sub2video_flush(InputStream * ist)312 static void sub2video_flush(InputStream *ist)
313 {
314 int i;
315 int ret;
316
317 if (ist->sub2video.end_pts < INT64_MAX)
318 sub2video_update(ist, INT64_MAX, NULL);
319 for (i = 0; i < ist->nb_filters; i++) {
320 ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321 if (ret != AVERROR_EOF && ret < 0)
322 av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323 }
324 }
325
326 /* end of sub2video hack */
327
term_exit_sigsafe(void)328 static void term_exit_sigsafe(void)
329 {
330 #if HAVE_TERMIOS_H
331 if(restore_tty)
332 tcsetattr (0, TCSANOW, &oldtty);
333 #endif
334 }
335
term_exit(void)336 void term_exit(void)
337 {
338 av_log(NULL, AV_LOG_QUIET, "%s", "");
339 term_exit_sigsafe();
340 }
341
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
344 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
345 static volatile int ffmpeg_exited = 0;
346 int main_return_code = 0;
347 static int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
348
349 static void
sigterm_handler(int sig)350 sigterm_handler(int sig)
351 {
352 int ret;
353 received_sigterm = sig;
354 received_nb_signals++;
355 term_exit_sigsafe();
356 if(received_nb_signals > 3) {
357 ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
358 strlen("Received > 3 system signals, hard exiting\n"));
359 if (ret < 0) { /* Do nothing */ };
360 exit(123);
361 }
362 }
363
364 #if HAVE_SETCONSOLECTRLHANDLER
CtrlHandler(DWORD fdwCtrlType)365 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
366 {
367 av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
368
369 switch (fdwCtrlType)
370 {
371 case CTRL_C_EVENT:
372 case CTRL_BREAK_EVENT:
373 sigterm_handler(SIGINT);
374 return TRUE;
375
376 case CTRL_CLOSE_EVENT:
377 case CTRL_LOGOFF_EVENT:
378 case CTRL_SHUTDOWN_EVENT:
379 sigterm_handler(SIGTERM);
380 /* Basically, with these 3 events, when we return from this method the
381 process is hard terminated, so stall as long as we need to
382 to try and let the main thread(s) clean up and gracefully terminate
383 (we have at most 5 seconds, but should be done far before that). */
384 while (!ffmpeg_exited) {
385 Sleep(0);
386 }
387 return TRUE;
388
389 default:
390 av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
391 return FALSE;
392 }
393 }
394 #endif
395
396 #ifdef __linux__
397 #define SIGNAL(sig, func) \
398 do { \
399 action.sa_handler = func; \
400 sigaction(sig, &action, NULL); \
401 } while (0)
402 #else
403 #define SIGNAL(sig, func) \
404 signal(sig, func)
405 #endif
406
term_init(void)407 void term_init(void)
408 {
409 #if defined __linux__
410 struct sigaction action = {0};
411 action.sa_handler = sigterm_handler;
412
413 /* block other interrupts while processing this one */
414 sigfillset(&action.sa_mask);
415
416 /* restart interruptible functions (i.e. don't fail with EINTR) */
417 action.sa_flags = SA_RESTART;
418 #endif
419
420 #if HAVE_TERMIOS_H
421 if (stdin_interaction) {
422 struct termios tty;
423 if (tcgetattr (0, &tty) == 0) {
424 oldtty = tty;
425 restore_tty = 1;
426
427 tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
428 |INLCR|IGNCR|ICRNL|IXON);
429 tty.c_oflag |= OPOST;
430 tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
431 tty.c_cflag &= ~(CSIZE|PARENB);
432 tty.c_cflag |= CS8;
433 tty.c_cc[VMIN] = 1;
434 tty.c_cc[VTIME] = 0;
435
436 tcsetattr (0, TCSANOW, &tty);
437 }
438 SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
439 }
440 #endif
441
442 SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
443 SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI). */
444 #ifdef SIGXCPU
445 SIGNAL(SIGXCPU, sigterm_handler);
446 #endif
447 #ifdef SIGPIPE
448 signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
449 #endif
450 #if HAVE_SETCONSOLECTRLHANDLER
451 SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
452 #endif
453 }
454
455 /* read a key without blocking */
read_key(void)456 static int read_key(void)
457 {
458 unsigned char ch;
459 #if HAVE_TERMIOS_H
460 int n = 1;
461 struct timeval tv;
462 fd_set rfds;
463
464 FD_ZERO(&rfds);
465 FD_SET(0, &rfds);
466 tv.tv_sec = 0;
467 tv.tv_usec = 0;
468 n = select(1, &rfds, NULL, NULL, &tv);
469 if (n > 0) {
470 n = read(0, &ch, 1);
471 if (n == 1)
472 return ch;
473
474 return n;
475 }
476 #elif HAVE_KBHIT
477 # if HAVE_PEEKNAMEDPIPE
478 static int is_pipe;
479 static HANDLE input_handle;
480 DWORD dw, nchars;
481 if(!input_handle){
482 input_handle = GetStdHandle(STD_INPUT_HANDLE);
483 is_pipe = !GetConsoleMode(input_handle, &dw);
484 }
485
486 if (is_pipe) {
487 /* When running under a GUI, you will end here. */
488 if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
489 // input pipe may have been closed by the program that ran ffmpeg
490 return -1;
491 }
492 //Read it
493 if(nchars != 0) {
494 read(0, &ch, 1);
495 return ch;
496 }else{
497 return -1;
498 }
499 }
500 # endif
501 if(kbhit())
502 return(getch());
503 #endif
504 return -1;
505 }
506
decode_interrupt_cb(void * ctx)507 static int decode_interrupt_cb(void *ctx)
508 {
509 return received_nb_signals > atomic_load(&transcode_init_done);
510 }
511
512 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
513
ffmpeg_cleanup(int ret)514 static void ffmpeg_cleanup(int ret)
515 {
516 int i, j;
517
518 if (do_benchmark) {
519 int maxrss = getmaxrss() / 1024;
520 av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
521 }
522
523 for (i = 0; i < nb_filtergraphs; i++) {
524 FilterGraph *fg = filtergraphs[i];
525 avfilter_graph_free(&fg->graph);
526 for (j = 0; j < fg->nb_inputs; j++) {
527 InputFilter *ifilter = fg->inputs[j];
528 struct InputStream *ist = ifilter->ist;
529
530 if (ifilter->frame_queue) {
531 AVFrame *frame;
532 while (av_fifo_read(ifilter->frame_queue, &frame, 1) >= 0)
533 av_frame_free(&frame);
534 av_fifo_freep2(&ifilter->frame_queue);
535 }
536 av_freep(&ifilter->displaymatrix);
537 if (ist->sub2video.sub_queue) {
538 AVSubtitle sub;
539 while (av_fifo_read(ist->sub2video.sub_queue, &sub, 1) >= 0)
540 avsubtitle_free(&sub);
541 av_fifo_freep2(&ist->sub2video.sub_queue);
542 }
543 av_buffer_unref(&ifilter->hw_frames_ctx);
544 av_freep(&ifilter->name);
545 av_freep(&fg->inputs[j]);
546 }
547 av_freep(&fg->inputs);
548 for (j = 0; j < fg->nb_outputs; j++) {
549 OutputFilter *ofilter = fg->outputs[j];
550
551 avfilter_inout_free(&ofilter->out_tmp);
552 av_freep(&ofilter->name);
553 av_channel_layout_uninit(&ofilter->ch_layout);
554 av_freep(&fg->outputs[j]);
555 }
556 av_freep(&fg->outputs);
557 av_freep(&fg->graph_desc);
558
559 av_freep(&filtergraphs[i]);
560 }
561 av_freep(&filtergraphs);
562
563 av_freep(&subtitle_out);
564
565 /* close files */
566 for (i = 0; i < nb_output_files; i++)
567 of_close(&output_files[i]);
568
569 for (i = 0; i < nb_output_streams; i++) {
570 OutputStream *ost = output_streams[i];
571
572 if (!ost)
573 continue;
574
575 av_bsf_free(&ost->bsf_ctx);
576
577 av_frame_free(&ost->filtered_frame);
578 av_frame_free(&ost->last_frame);
579 av_packet_free(&ost->pkt);
580 av_dict_free(&ost->encoder_opts);
581
582 av_freep(&ost->forced_keyframes);
583 av_expr_free(ost->forced_keyframes_pexpr);
584 av_freep(&ost->avfilter);
585 av_freep(&ost->logfile_prefix);
586
587 av_freep(&ost->audio_channels_map);
588 ost->audio_channels_mapped = 0;
589
590 av_dict_free(&ost->sws_dict);
591 av_dict_free(&ost->swr_opts);
592
593 avcodec_free_context(&ost->enc_ctx);
594 avcodec_parameters_free(&ost->ref_par);
595
596 if (ost->muxing_queue) {
597 AVPacket *pkt;
598 while (av_fifo_read(ost->muxing_queue, &pkt, 1) >= 0)
599 av_packet_free(&pkt);
600 av_fifo_freep2(&ost->muxing_queue);
601 }
602
603 av_freep(&output_streams[i]);
604 }
605 #if HAVE_THREADS
606 free_input_threads();
607 #endif
608 for (i = 0; i < nb_input_files; i++) {
609 avformat_close_input(&input_files[i]->ctx);
610 av_packet_free(&input_files[i]->pkt);
611 av_freep(&input_files[i]);
612 }
613 for (i = 0; i < nb_input_streams; i++) {
614 InputStream *ist = input_streams[i];
615
616 av_frame_free(&ist->decoded_frame);
617 av_packet_free(&ist->pkt);
618 av_dict_free(&ist->decoder_opts);
619 avsubtitle_free(&ist->prev_sub.subtitle);
620 av_frame_free(&ist->sub2video.frame);
621 av_freep(&ist->filters);
622 av_freep(&ist->hwaccel_device);
623 av_freep(&ist->dts_buffer);
624
625 avcodec_free_context(&ist->dec_ctx);
626
627 av_freep(&input_streams[i]);
628 }
629
630 if (vstats_file) {
631 if (fclose(vstats_file))
632 av_log(NULL, AV_LOG_ERROR,
633 "Error closing vstats file, loss of information possible: %s\n",
634 av_err2str(AVERROR(errno)));
635 }
636 av_freep(&vstats_filename);
637 av_freep(&filter_nbthreads);
638
639 av_freep(&input_streams);
640 av_freep(&input_files);
641 av_freep(&output_streams);
642 av_freep(&output_files);
643
644 uninit_opts();
645
646 avformat_network_deinit();
647
648 if (received_sigterm) {
649 av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
650 (int) received_sigterm);
651 } else if (ret && atomic_load(&transcode_init_done)) {
652 av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
653 }
654 term_exit();
655 ffmpeg_exited = 1;
656 }
657
remove_avoptions(AVDictionary ** a,AVDictionary * b)658 void remove_avoptions(AVDictionary **a, AVDictionary *b)
659 {
660 const AVDictionaryEntry *t = NULL;
661
662 while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
663 av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
664 }
665 }
666
assert_avoptions(AVDictionary * m)667 void assert_avoptions(AVDictionary *m)
668 {
669 const AVDictionaryEntry *t;
670 if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
671 av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
672 exit_program(1);
673 }
674 }
675
abort_codec_experimental(const AVCodec * c,int encoder)676 static void abort_codec_experimental(const AVCodec *c, int encoder)
677 {
678 exit_program(1);
679 }
680
update_benchmark(const char * fmt,...)681 static void update_benchmark(const char *fmt, ...)
682 {
683 if (do_benchmark_all) {
684 BenchmarkTimeStamps t = get_benchmark_time_stamps();
685 va_list va;
686 char buf[1024];
687
688 if (fmt) {
689 va_start(va, fmt);
690 vsnprintf(buf, sizeof(buf), fmt, va);
691 va_end(va);
692 av_log(NULL, AV_LOG_INFO,
693 "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
694 t.user_usec - current_time.user_usec,
695 t.sys_usec - current_time.sys_usec,
696 t.real_usec - current_time.real_usec, buf);
697 }
698 current_time = t;
699 }
700 }
701
close_output_stream(OutputStream * ost)702 static void close_output_stream(OutputStream *ost)
703 {
704 OutputFile *of = output_files[ost->file_index];
705 AVRational time_base = ost->stream_copy ? ost->mux_timebase : ost->enc_ctx->time_base;
706
707 ost->finished |= ENCODER_FINISHED;
708 if (of->shortest) {
709 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
710 of->recording_time = FFMIN(of->recording_time, end);
711 }
712 }
713
714 /*
715 * Send a single packet to the output, applying any bitstream filters
716 * associated with the output stream. This may result in any number
717 * of packets actually being written, depending on what bitstream
718 * filters are applied. The supplied packet is consumed and will be
719 * blank (as if newly-allocated) when this function returns.
720 *
721 * If eof is set, instead indicate EOF to all bitstream filters and
722 * therefore flush any delayed packets to the output. A blank packet
723 * must be supplied in this case.
724 */
output_packet(OutputFile * of,AVPacket * pkt,OutputStream * ost,int eof)725 static void output_packet(OutputFile *of, AVPacket *pkt,
726 OutputStream *ost, int eof)
727 {
728 int ret = 0;
729
730 /* apply the output bitstream filters */
731 if (ost->bsf_ctx) {
732 ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
733 if (ret < 0)
734 goto finish;
735 while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
736 of_write_packet(of, pkt, ost, 0);
737 if (ret == AVERROR(EAGAIN))
738 ret = 0;
739 } else if (!eof)
740 of_write_packet(of, pkt, ost, 0);
741
742 finish:
743 if (ret < 0 && ret != AVERROR_EOF) {
744 av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
745 "packet for stream #%d:%d.\n", ost->file_index, ost->index);
746 if(exit_on_error)
747 exit_program(1);
748 }
749 }
750
check_recording_time(OutputStream * ost)751 static int check_recording_time(OutputStream *ost)
752 {
753 OutputFile *of = output_files[ost->file_index];
754
755 if (of->recording_time != INT64_MAX &&
756 av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
757 AV_TIME_BASE_Q) >= 0) {
758 close_output_stream(ost);
759 return 0;
760 }
761 return 1;
762 }
763
adjust_frame_pts_to_encoder_tb(OutputFile * of,OutputStream * ost,AVFrame * frame)764 static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost,
765 AVFrame *frame)
766 {
767 double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
768 AVCodecContext *enc = ost->enc_ctx;
769 if (!frame || frame->pts == AV_NOPTS_VALUE ||
770 !enc || !ost->filter || !ost->filter->graph->graph)
771 goto early_exit;
772
773 {
774 AVFilterContext *filter = ost->filter->filter;
775
776 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
777 AVRational filter_tb = av_buffersink_get_time_base(filter);
778 AVRational tb = enc->time_base;
779 int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
780
781 tb.den <<= extra_bits;
782 float_pts =
783 av_rescale_q(frame->pts, filter_tb, tb) -
784 av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
785 float_pts /= 1 << extra_bits;
786 // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
787 float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
788
789 frame->pts =
790 av_rescale_q(frame->pts, filter_tb, enc->time_base) -
791 av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
792 }
793
794 early_exit:
795
796 if (debug_ts) {
797 av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
798 frame ? av_ts2str(frame->pts) : "NULL",
799 frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
800 float_pts,
801 enc ? enc->time_base.num : -1,
802 enc ? enc->time_base.den : -1);
803 }
804
805 return float_pts;
806 }
807
808 static int init_output_stream(OutputStream *ost, AVFrame *frame,
809 char *error, int error_len);
810
init_output_stream_wrapper(OutputStream * ost,AVFrame * frame,unsigned int fatal)811 static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
812 unsigned int fatal)
813 {
814 int ret = AVERROR_BUG;
815 char error[1024] = {0};
816
817 if (ost->initialized)
818 return 0;
819
820 ret = init_output_stream(ost, frame, error, sizeof(error));
821 if (ret < 0) {
822 av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
823 ost->file_index, ost->index, error);
824
825 if (fatal)
826 exit_program(1);
827 }
828
829 return ret;
830 }
831
psnr(double d)832 static double psnr(double d)
833 {
834 return -10.0 * log10(d);
835 }
836
update_video_stats(OutputStream * ost,const AVPacket * pkt,int write_vstats)837 static void update_video_stats(OutputStream *ost, const AVPacket *pkt, int write_vstats)
838 {
839 const uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
840 NULL);
841 AVCodecContext *enc = ost->enc_ctx;
842 int64_t frame_number;
843 double ti1, bitrate, avg_bitrate;
844
845 ost->quality = sd ? AV_RL32(sd) : -1;
846 ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
847
848 for (int i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
849 if (sd && i < sd[5])
850 ost->error[i] = AV_RL64(sd + 8 + 8*i);
851 else
852 ost->error[i] = -1;
853 }
854
855 if (!write_vstats)
856 return;
857
858 /* this is executed just the first time update_video_stats is called */
859 if (!vstats_file) {
860 vstats_file = fopen(vstats_filename, "w");
861 if (!vstats_file) {
862 perror("fopen");
863 exit_program(1);
864 }
865 }
866
867 frame_number = ost->packets_encoded;
868 if (vstats_version <= 1) {
869 fprintf(vstats_file, "frame= %5"PRId64" q= %2.1f ", frame_number,
870 ost->quality / (float)FF_QP2LAMBDA);
871 } else {
872 fprintf(vstats_file, "out= %2d st= %2d frame= %5"PRId64" q= %2.1f ", ost->file_index, ost->index, frame_number,
873 ost->quality / (float)FF_QP2LAMBDA);
874 }
875
876 if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
877 fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
878
879 fprintf(vstats_file,"f_size= %6d ", pkt->size);
880 /* compute pts value */
881 ti1 = pkt->dts * av_q2d(ost->mux_timebase);
882 if (ti1 < 0.01)
883 ti1 = 0.01;
884
885 bitrate = (pkt->size * 8) / av_q2d(enc->time_base) / 1000.0;
886 avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
887 fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
888 (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
889 fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
890 }
891
encode_frame(OutputFile * of,OutputStream * ost,AVFrame * frame)892 static int encode_frame(OutputFile *of, OutputStream *ost, AVFrame *frame)
893 {
894 AVCodecContext *enc = ost->enc_ctx;
895 AVPacket *pkt = ost->pkt;
896 const char *type_desc = av_get_media_type_string(enc->codec_type);
897 const char *action = frame ? "encode" : "flush";
898 int ret;
899
900 if (frame) {
901 ost->frames_encoded++;
902
903 if (debug_ts) {
904 av_log(NULL, AV_LOG_INFO, "encoder <- type:%s "
905 "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
906 type_desc,
907 av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
908 enc->time_base.num, enc->time_base.den);
909 }
910 }
911
912 update_benchmark(NULL);
913
914 ret = avcodec_send_frame(enc, frame);
915 if (ret < 0 && !(ret == AVERROR_EOF && !frame)) {
916 av_log(NULL, AV_LOG_ERROR, "Error submitting %s frame to the encoder\n",
917 type_desc);
918 return ret;
919 }
920
921 while (1) {
922 ret = avcodec_receive_packet(enc, pkt);
923 update_benchmark("%s_%s %d.%d", action, type_desc,
924 ost->file_index, ost->index);
925
926 /* if two pass, output log on success and EOF */
927 if ((ret >= 0 || ret == AVERROR_EOF) && ost->logfile && enc->stats_out)
928 fprintf(ost->logfile, "%s", enc->stats_out);
929
930 if (ret == AVERROR(EAGAIN)) {
931 av_assert0(frame); // should never happen during flushing
932 return 0;
933 } else if (ret == AVERROR_EOF) {
934 output_packet(of, pkt, ost, 1);
935 return ret;
936 } else if (ret < 0) {
937 av_log(NULL, AV_LOG_ERROR, "%s encoding failed\n", type_desc);
938 return ret;
939 }
940
941 if (debug_ts) {
942 av_log(NULL, AV_LOG_INFO, "encoder -> type:%s "
943 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
944 "duration:%s duration_time:%s\n",
945 type_desc,
946 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
947 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base),
948 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base));
949 }
950
951 av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
952
953 if (debug_ts) {
954 av_log(NULL, AV_LOG_INFO, "encoder -> type:%s "
955 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s "
956 "duration:%s duration_time:%s\n",
957 type_desc,
958 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
959 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base),
960 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &enc->time_base));
961 }
962
963 if (enc->codec_type == AVMEDIA_TYPE_VIDEO)
964 update_video_stats(ost, pkt, !!vstats_filename);
965
966 ost->packets_encoded++;
967
968 output_packet(of, pkt, ost, 0);
969 }
970
971 av_assert0(0);
972 }
973
do_audio_out(OutputFile * of,OutputStream * ost,AVFrame * frame)974 static void do_audio_out(OutputFile *of, OutputStream *ost,
975 AVFrame *frame)
976 {
977 int ret;
978
979 adjust_frame_pts_to_encoder_tb(of, ost, frame);
980
981 if (!check_recording_time(ost))
982 return;
983
984 if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
985 frame->pts = ost->sync_opts;
986 ost->sync_opts = frame->pts + frame->nb_samples;
987 ost->samples_encoded += frame->nb_samples;
988
989 ret = encode_frame(of, ost, frame);
990 if (ret < 0)
991 exit_program(1);
992 }
993
do_subtitle_out(OutputFile * of,OutputStream * ost,AVSubtitle * sub)994 static void do_subtitle_out(OutputFile *of,
995 OutputStream *ost,
996 AVSubtitle *sub)
997 {
998 int subtitle_out_max_size = 1024 * 1024;
999 int subtitle_out_size, nb, i;
1000 AVCodecContext *enc;
1001 AVPacket *pkt = ost->pkt;
1002 int64_t pts;
1003
1004 if (sub->pts == AV_NOPTS_VALUE) {
1005 av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1006 if (exit_on_error)
1007 exit_program(1);
1008 return;
1009 }
1010
1011 enc = ost->enc_ctx;
1012
1013 if (!subtitle_out) {
1014 subtitle_out = av_malloc(subtitle_out_max_size);
1015 if (!subtitle_out) {
1016 av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1017 exit_program(1);
1018 }
1019 }
1020
1021 /* Note: DVB subtitle need one packet to draw them and one other
1022 packet to clear them */
1023 /* XXX: signal it in the codec context ? */
1024 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1025 nb = 2;
1026 else
1027 nb = 1;
1028
1029 /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1030 pts = sub->pts;
1031 if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1032 pts -= output_files[ost->file_index]->start_time;
1033 for (i = 0; i < nb; i++) {
1034 unsigned save_num_rects = sub->num_rects;
1035
1036 ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1037 if (!check_recording_time(ost))
1038 return;
1039
1040 sub->pts = pts;
1041 // start_display_time is required to be 0
1042 sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1043 sub->end_display_time -= sub->start_display_time;
1044 sub->start_display_time = 0;
1045 if (i == 1)
1046 sub->num_rects = 0;
1047
1048 ost->frames_encoded++;
1049
1050 subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1051 subtitle_out_max_size, sub);
1052 if (i == 1)
1053 sub->num_rects = save_num_rects;
1054 if (subtitle_out_size < 0) {
1055 av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1056 exit_program(1);
1057 }
1058
1059 av_packet_unref(pkt);
1060 pkt->data = subtitle_out;
1061 pkt->size = subtitle_out_size;
1062 pkt->pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1063 pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1064 if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1065 /* XXX: the pts correction is handled here. Maybe handling
1066 it in the codec would be better */
1067 if (i == 0)
1068 pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1069 else
1070 pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1071 }
1072 pkt->dts = pkt->pts;
1073 output_packet(of, pkt, ost, 0);
1074 }
1075 }
1076
1077 /* May modify/reset next_picture */
do_video_out(OutputFile * of,OutputStream * ost,AVFrame * next_picture)1078 static void do_video_out(OutputFile *of,
1079 OutputStream *ost,
1080 AVFrame *next_picture)
1081 {
1082 int ret;
1083 AVCodecContext *enc = ost->enc_ctx;
1084 AVRational frame_rate;
1085 int64_t nb_frames, nb0_frames, i;
1086 double delta, delta0;
1087 double duration = 0;
1088 double sync_ipts = AV_NOPTS_VALUE;
1089 InputStream *ist = NULL;
1090 AVFilterContext *filter = ost->filter->filter;
1091
1092 init_output_stream_wrapper(ost, next_picture, 1);
1093 sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1094
1095 if (ost->source_index >= 0)
1096 ist = input_streams[ost->source_index];
1097
1098 frame_rate = av_buffersink_get_frame_rate(filter);
1099 if (frame_rate.num > 0 && frame_rate.den > 0)
1100 duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1101
1102 if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1103 duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1104
1105 if (!ost->filters_script &&
1106 !ost->filters &&
1107 (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1108 next_picture &&
1109 ist &&
1110 lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1111 duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1112 }
1113
1114 if (!next_picture) {
1115 //end, flushing
1116 nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1117 ost->last_nb0_frames[1],
1118 ost->last_nb0_frames[2]);
1119 } else {
1120 delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1121 delta = delta0 + duration;
1122
1123 /* by default, we output a single frame */
1124 nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1125 nb_frames = 1;
1126
1127 if (delta0 < 0 &&
1128 delta > 0 &&
1129 ost->vsync_method != VSYNC_PASSTHROUGH &&
1130 ost->vsync_method != VSYNC_DROP) {
1131 if (delta0 < -0.6) {
1132 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1133 } else
1134 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1135 sync_ipts = ost->sync_opts;
1136 duration += delta0;
1137 delta0 = 0;
1138 }
1139
1140 switch (ost->vsync_method) {
1141 case VSYNC_VSCFR:
1142 if (ost->frame_number == 0 && delta0 >= 0.5) {
1143 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1144 delta = duration;
1145 delta0 = 0;
1146 ost->sync_opts = llrint(sync_ipts);
1147 }
1148 case VSYNC_CFR:
1149 // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1150 if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1151 nb_frames = 0;
1152 } else if (delta < -1.1)
1153 nb_frames = 0;
1154 else if (delta > 1.1) {
1155 nb_frames = llrintf(delta);
1156 if (delta0 > 1.1)
1157 nb0_frames = llrintf(delta0 - 0.6);
1158 }
1159 break;
1160 case VSYNC_VFR:
1161 if (delta <= -0.6)
1162 nb_frames = 0;
1163 else if (delta > 0.6)
1164 ost->sync_opts = llrint(sync_ipts);
1165 break;
1166 case VSYNC_DROP:
1167 case VSYNC_PASSTHROUGH:
1168 ost->sync_opts = llrint(sync_ipts);
1169 break;
1170 default:
1171 av_assert0(0);
1172 }
1173 }
1174
1175 /*
1176 * For video, number of frames in == number of packets out.
1177 * But there may be reordering, so we can't throw away frames on encoder
1178 * flush, we need to limit them here, before they go into encoder.
1179 */
1180 nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1181 nb0_frames = FFMIN(nb0_frames, nb_frames);
1182
1183 memmove(ost->last_nb0_frames + 1,
1184 ost->last_nb0_frames,
1185 sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1186 ost->last_nb0_frames[0] = nb0_frames;
1187
1188 if (nb0_frames == 0 && ost->last_dropped) {
1189 nb_frames_drop++;
1190 av_log(NULL, AV_LOG_VERBOSE,
1191 "*** dropping frame %"PRId64" from stream %d at ts %"PRId64"\n",
1192 ost->frame_number, ost->st->index, ost->last_frame->pts);
1193 }
1194 if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1195 if (nb_frames > dts_error_threshold * 30) {
1196 av_log(NULL, AV_LOG_ERROR, "%"PRId64" frame duplication too large, skipping\n", nb_frames - 1);
1197 nb_frames_drop++;
1198 return;
1199 }
1200 nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1201 av_log(NULL, AV_LOG_VERBOSE, "*** %"PRId64" dup!\n", nb_frames - 1);
1202 if (nb_frames_dup > dup_warning) {
1203 av_log(NULL, AV_LOG_WARNING, "More than %"PRIu64" frames duplicated\n", dup_warning);
1204 dup_warning *= 10;
1205 }
1206 }
1207 ost->last_dropped = nb_frames == nb0_frames && next_picture;
1208 ost->dropped_keyframe = ost->last_dropped && next_picture && next_picture->key_frame;
1209
1210 /* duplicates frame if needed */
1211 for (i = 0; i < nb_frames; i++) {
1212 AVFrame *in_picture;
1213 int forced_keyframe = 0;
1214 double pts_time;
1215
1216 if (i < nb0_frames && ost->last_frame->buf[0]) {
1217 in_picture = ost->last_frame;
1218 } else
1219 in_picture = next_picture;
1220
1221 if (!in_picture)
1222 return;
1223
1224 in_picture->pts = ost->sync_opts;
1225
1226 if (!check_recording_time(ost))
1227 return;
1228
1229 in_picture->quality = enc->global_quality;
1230 in_picture->pict_type = 0;
1231
1232 if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1233 in_picture->pts != AV_NOPTS_VALUE)
1234 ost->forced_kf_ref_pts = in_picture->pts;
1235
1236 pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1237 (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1238 if (ost->forced_kf_index < ost->forced_kf_count &&
1239 in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1240 ost->forced_kf_index++;
1241 forced_keyframe = 1;
1242 } else if (ost->forced_keyframes_pexpr) {
1243 double res;
1244 ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1245 res = av_expr_eval(ost->forced_keyframes_pexpr,
1246 ost->forced_keyframes_expr_const_values, NULL);
1247 ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1248 ost->forced_keyframes_expr_const_values[FKF_N],
1249 ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1250 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1251 ost->forced_keyframes_expr_const_values[FKF_T],
1252 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1253 res);
1254 if (res) {
1255 forced_keyframe = 1;
1256 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1257 ost->forced_keyframes_expr_const_values[FKF_N];
1258 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1259 ost->forced_keyframes_expr_const_values[FKF_T];
1260 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1261 }
1262
1263 ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1264 } else if ( ost->forced_keyframes
1265 && !strncmp(ost->forced_keyframes, "source", 6)
1266 && in_picture->key_frame==1
1267 && !i) {
1268 forced_keyframe = 1;
1269 } else if ( ost->forced_keyframes
1270 && !strncmp(ost->forced_keyframes, "source_no_drop", 14)
1271 && !i) {
1272 forced_keyframe = (in_picture->key_frame == 1) || ost->dropped_keyframe;
1273 ost->dropped_keyframe = 0;
1274 }
1275
1276 if (forced_keyframe) {
1277 in_picture->pict_type = AV_PICTURE_TYPE_I;
1278 av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1279 }
1280
1281 ret = encode_frame(of, ost, in_picture);
1282 if (ret < 0)
1283 exit_program(1);
1284
1285 ost->sync_opts++;
1286 ost->frame_number++;
1287 }
1288
1289 av_frame_unref(ost->last_frame);
1290 if (next_picture)
1291 av_frame_move_ref(ost->last_frame, next_picture);
1292 }
1293
finish_output_stream(OutputStream * ost)1294 static void finish_output_stream(OutputStream *ost)
1295 {
1296 OutputFile *of = output_files[ost->file_index];
1297 AVRational time_base = ost->stream_copy ? ost->mux_timebase : ost->enc_ctx->time_base;
1298
1299 ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1300
1301 if (of->shortest) {
1302 int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, time_base, AV_TIME_BASE_Q);
1303 of->recording_time = FFMIN(of->recording_time, end);
1304 }
1305 }
1306
1307 /**
1308 * Get and encode new output from any of the filtergraphs, without causing
1309 * activity.
1310 *
1311 * @return 0 for success, <0 for severe errors
1312 */
reap_filters(int flush)1313 static int reap_filters(int flush)
1314 {
1315 AVFrame *filtered_frame = NULL;
1316 int i;
1317
1318 /* Reap all buffers present in the buffer sinks */
1319 for (i = 0; i < nb_output_streams; i++) {
1320 OutputStream *ost = output_streams[i];
1321 OutputFile *of = output_files[ost->file_index];
1322 AVFilterContext *filter;
1323 AVCodecContext *enc = ost->enc_ctx;
1324 int ret = 0;
1325
1326 if (!ost->filter || !ost->filter->graph->graph)
1327 continue;
1328 filter = ost->filter->filter;
1329
1330 /*
1331 * Unlike video, with audio the audio frame size matters.
1332 * Currently we are fully reliant on the lavfi filter chain to
1333 * do the buffering deed for us, and thus the frame size parameter
1334 * needs to be set accordingly. Where does one get the required
1335 * frame size? From the initialized AVCodecContext of an audio
1336 * encoder. Thus, if we have gotten to an audio stream, initialize
1337 * the encoder earlier than receiving the first AVFrame.
1338 */
1339 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1340 init_output_stream_wrapper(ost, NULL, 1);
1341
1342 filtered_frame = ost->filtered_frame;
1343
1344 while (1) {
1345 ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1346 AV_BUFFERSINK_FLAG_NO_REQUEST);
1347 if (ret < 0) {
1348 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1349 av_log(NULL, AV_LOG_WARNING,
1350 "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1351 } else if (flush && ret == AVERROR_EOF) {
1352 if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1353 do_video_out(of, ost, NULL);
1354 }
1355 break;
1356 }
1357 if (ost->finished) {
1358 av_frame_unref(filtered_frame);
1359 continue;
1360 }
1361
1362 switch (av_buffersink_get_type(filter)) {
1363 case AVMEDIA_TYPE_VIDEO:
1364 if (!ost->frame_aspect_ratio.num)
1365 enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1366
1367 do_video_out(of, ost, filtered_frame);
1368 break;
1369 case AVMEDIA_TYPE_AUDIO:
1370 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1371 enc->ch_layout.nb_channels != filtered_frame->ch_layout.nb_channels) {
1372 av_log(NULL, AV_LOG_ERROR,
1373 "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1374 break;
1375 }
1376 do_audio_out(of, ost, filtered_frame);
1377 break;
1378 default:
1379 // TODO support subtitle filters
1380 av_assert0(0);
1381 }
1382
1383 av_frame_unref(filtered_frame);
1384 }
1385 }
1386
1387 return 0;
1388 }
1389
print_final_stats(int64_t total_size)1390 static void print_final_stats(int64_t total_size)
1391 {
1392 uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1393 uint64_t subtitle_size = 0;
1394 uint64_t data_size = 0;
1395 float percent = -1.0;
1396 int i, j;
1397 int pass1_used = 1;
1398
1399 for (i = 0; i < nb_output_streams; i++) {
1400 OutputStream *ost = output_streams[i];
1401 switch (ost->enc_ctx->codec_type) {
1402 case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1403 case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1404 case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1405 default: other_size += ost->data_size; break;
1406 }
1407 extra_size += ost->enc_ctx->extradata_size;
1408 data_size += ost->data_size;
1409 if ( (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1410 != AV_CODEC_FLAG_PASS1)
1411 pass1_used = 0;
1412 }
1413
1414 if (data_size && total_size>0 && total_size >= data_size)
1415 percent = 100.0 * (total_size - data_size) / data_size;
1416
1417 av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1418 video_size / 1024.0,
1419 audio_size / 1024.0,
1420 subtitle_size / 1024.0,
1421 other_size / 1024.0,
1422 extra_size / 1024.0);
1423 if (percent >= 0.0)
1424 av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1425 else
1426 av_log(NULL, AV_LOG_INFO, "unknown");
1427 av_log(NULL, AV_LOG_INFO, "\n");
1428
1429 /* print verbose per-stream stats */
1430 for (i = 0; i < nb_input_files; i++) {
1431 InputFile *f = input_files[i];
1432 uint64_t total_packets = 0, total_size = 0;
1433
1434 av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1435 i, f->ctx->url);
1436
1437 for (j = 0; j < f->nb_streams; j++) {
1438 InputStream *ist = input_streams[f->ist_index + j];
1439 enum AVMediaType type = ist->dec_ctx->codec_type;
1440
1441 total_size += ist->data_size;
1442 total_packets += ist->nb_packets;
1443
1444 av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1445 i, j, av_get_media_type_string(type));
1446 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1447 ist->nb_packets, ist->data_size);
1448
1449 if (ist->decoding_needed) {
1450 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1451 ist->frames_decoded);
1452 if (type == AVMEDIA_TYPE_AUDIO)
1453 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1454 av_log(NULL, AV_LOG_VERBOSE, "; ");
1455 }
1456
1457 av_log(NULL, AV_LOG_VERBOSE, "\n");
1458 }
1459
1460 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1461 total_packets, total_size);
1462 }
1463
1464 for (i = 0; i < nb_output_files; i++) {
1465 OutputFile *of = output_files[i];
1466 uint64_t total_packets = 0, total_size = 0;
1467
1468 av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1469 i, of->ctx->url);
1470
1471 for (j = 0; j < of->ctx->nb_streams; j++) {
1472 OutputStream *ost = output_streams[of->ost_index + j];
1473 enum AVMediaType type = ost->enc_ctx->codec_type;
1474
1475 total_size += ost->data_size;
1476 total_packets += ost->packets_written;
1477
1478 av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1479 i, j, av_get_media_type_string(type));
1480 if (ost->encoding_needed) {
1481 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1482 ost->frames_encoded);
1483 if (type == AVMEDIA_TYPE_AUDIO)
1484 av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1485 av_log(NULL, AV_LOG_VERBOSE, "; ");
1486 }
1487
1488 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1489 ost->packets_written, ost->data_size);
1490
1491 av_log(NULL, AV_LOG_VERBOSE, "\n");
1492 }
1493
1494 av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1495 total_packets, total_size);
1496 }
1497 if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1498 av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1499 if (pass1_used) {
1500 av_log(NULL, AV_LOG_WARNING, "\n");
1501 } else {
1502 av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1503 }
1504 }
1505 }
1506
print_report(int is_last_report,int64_t timer_start,int64_t cur_time)1507 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1508 {
1509 AVBPrint buf, buf_script;
1510 OutputStream *ost;
1511 AVFormatContext *oc;
1512 int64_t total_size;
1513 AVCodecContext *enc;
1514 int vid, i;
1515 double bitrate;
1516 double speed;
1517 int64_t pts = INT64_MIN + 1;
1518 static int64_t last_time = -1;
1519 static int first_report = 1;
1520 static int qp_histogram[52];
1521 int hours, mins, secs, us;
1522 const char *hours_sign;
1523 int ret;
1524 float t;
1525
1526 if (!print_stats && !is_last_report && !progress_avio)
1527 return;
1528
1529 if (!is_last_report) {
1530 if (last_time == -1) {
1531 last_time = cur_time;
1532 }
1533 if (((cur_time - last_time) < stats_period && !first_report) ||
1534 (first_report && nb_output_dumped < nb_output_files))
1535 return;
1536 last_time = cur_time;
1537 }
1538
1539 t = (cur_time-timer_start) / 1000000.0;
1540
1541
1542 oc = output_files[0]->ctx;
1543
1544 total_size = avio_size(oc->pb);
1545 if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1546 total_size = avio_tell(oc->pb);
1547
1548 vid = 0;
1549 av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1550 av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1551 for (i = 0; i < nb_output_streams; i++) {
1552 float q = -1;
1553 ost = output_streams[i];
1554 enc = ost->enc_ctx;
1555 if (!ost->stream_copy)
1556 q = ost->quality / (float) FF_QP2LAMBDA;
1557
1558 if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1559 av_bprintf(&buf, "q=%2.1f ", q);
1560 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1561 ost->file_index, ost->index, q);
1562 }
1563 if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1564 float fps;
1565 int64_t frame_number = ost->frame_number;
1566
1567 fps = t > 1 ? frame_number / t : 0;
1568 av_bprintf(&buf, "frame=%5"PRId64" fps=%3.*f q=%3.1f ",
1569 frame_number, fps < 9.95, fps, q);
1570 av_bprintf(&buf_script, "frame=%"PRId64"\n", frame_number);
1571 av_bprintf(&buf_script, "fps=%.2f\n", fps);
1572 av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1573 ost->file_index, ost->index, q);
1574 if (is_last_report)
1575 av_bprintf(&buf, "L");
1576 if (qp_hist) {
1577 int j;
1578 int qp = lrintf(q);
1579 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1580 qp_histogram[qp]++;
1581 for (j = 0; j < 32; j++)
1582 av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1583 }
1584
1585 if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1586 int j;
1587 double error, error_sum = 0;
1588 double scale, scale_sum = 0;
1589 double p;
1590 char type[3] = { 'Y','U','V' };
1591 av_bprintf(&buf, "PSNR=");
1592 for (j = 0; j < 3; j++) {
1593 if (is_last_report) {
1594 error = enc->error[j];
1595 scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1596 } else {
1597 error = ost->error[j];
1598 scale = enc->width * enc->height * 255.0 * 255.0;
1599 }
1600 if (j)
1601 scale /= 4;
1602 error_sum += error;
1603 scale_sum += scale;
1604 p = psnr(error / scale);
1605 av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1606 av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1607 ost->file_index, ost->index, type[j] | 32, p);
1608 }
1609 p = psnr(error_sum / scale_sum);
1610 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1611 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1612 ost->file_index, ost->index, p);
1613 }
1614 vid = 1;
1615 }
1616 /* compute min output value */
1617 if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1618 pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1619 ost->st->time_base, AV_TIME_BASE_Q));
1620 if (copy_ts) {
1621 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1622 copy_ts_first_pts = pts;
1623 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1624 pts -= copy_ts_first_pts;
1625 }
1626 }
1627
1628 if (is_last_report)
1629 nb_frames_drop += ost->last_dropped;
1630 }
1631
1632 secs = FFABS(pts) / AV_TIME_BASE;
1633 us = FFABS(pts) % AV_TIME_BASE;
1634 mins = secs / 60;
1635 secs %= 60;
1636 hours = mins / 60;
1637 mins %= 60;
1638 hours_sign = (pts < 0) ? "-" : "";
1639
1640 bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1641 speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1642
1643 if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1644 else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1645 if (pts == AV_NOPTS_VALUE) {
1646 av_bprintf(&buf, "N/A ");
1647 } else {
1648 av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1649 hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1650 }
1651
1652 if (bitrate < 0) {
1653 av_bprintf(&buf, "bitrate=N/A");
1654 av_bprintf(&buf_script, "bitrate=N/A\n");
1655 }else{
1656 av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1657 av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1658 }
1659
1660 if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1661 else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1662 if (pts == AV_NOPTS_VALUE) {
1663 av_bprintf(&buf_script, "out_time_us=N/A\n");
1664 av_bprintf(&buf_script, "out_time_ms=N/A\n");
1665 av_bprintf(&buf_script, "out_time=N/A\n");
1666 } else {
1667 av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1668 av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1669 av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1670 hours_sign, hours, mins, secs, us);
1671 }
1672
1673 if (nb_frames_dup || nb_frames_drop)
1674 av_bprintf(&buf, " dup=%"PRId64" drop=%"PRId64, nb_frames_dup, nb_frames_drop);
1675 av_bprintf(&buf_script, "dup_frames=%"PRId64"\n", nb_frames_dup);
1676 av_bprintf(&buf_script, "drop_frames=%"PRId64"\n", nb_frames_drop);
1677
1678 if (speed < 0) {
1679 av_bprintf(&buf, " speed=N/A");
1680 av_bprintf(&buf_script, "speed=N/A\n");
1681 } else {
1682 av_bprintf(&buf, " speed=%4.3gx", speed);
1683 av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1684 }
1685
1686 if (print_stats || is_last_report) {
1687 const char end = is_last_report ? '\n' : '\r';
1688 if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1689 fprintf(stderr, "%s %c", buf.str, end);
1690 } else
1691 av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1692
1693 fflush(stderr);
1694 }
1695 av_bprint_finalize(&buf, NULL);
1696
1697 if (progress_avio) {
1698 av_bprintf(&buf_script, "progress=%s\n",
1699 is_last_report ? "end" : "continue");
1700 avio_write(progress_avio, buf_script.str,
1701 FFMIN(buf_script.len, buf_script.size - 1));
1702 avio_flush(progress_avio);
1703 av_bprint_finalize(&buf_script, NULL);
1704 if (is_last_report) {
1705 if ((ret = avio_closep(&progress_avio)) < 0)
1706 av_log(NULL, AV_LOG_ERROR,
1707 "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1708 }
1709 }
1710
1711 first_report = 0;
1712
1713 if (is_last_report)
1714 print_final_stats(total_size);
1715 }
1716
ifilter_parameters_from_codecpar(InputFilter * ifilter,AVCodecParameters * par)1717 static int ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1718 {
1719 int ret;
1720
1721 // We never got any input. Set a fake format, which will
1722 // come from libavformat.
1723 ifilter->format = par->format;
1724 ifilter->sample_rate = par->sample_rate;
1725 ifilter->width = par->width;
1726 ifilter->height = par->height;
1727 ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1728 ret = av_channel_layout_copy(&ifilter->ch_layout, &par->ch_layout);
1729 if (ret < 0)
1730 return ret;
1731
1732 return 0;
1733 }
1734
flush_encoders(void)1735 static void flush_encoders(void)
1736 {
1737 int i, ret;
1738
1739 for (i = 0; i < nb_output_streams; i++) {
1740 OutputStream *ost = output_streams[i];
1741 AVCodecContext *enc = ost->enc_ctx;
1742 OutputFile *of = output_files[ost->file_index];
1743
1744 if (!ost->encoding_needed)
1745 continue;
1746
1747 // Try to enable encoding with no input frames.
1748 // Maybe we should just let encoding fail instead.
1749 if (!ost->initialized) {
1750 FilterGraph *fg = ost->filter->graph;
1751
1752 av_log(NULL, AV_LOG_WARNING,
1753 "Finishing stream %d:%d without any data written to it.\n",
1754 ost->file_index, ost->st->index);
1755
1756 if (ost->filter && !fg->graph) {
1757 int x;
1758 for (x = 0; x < fg->nb_inputs; x++) {
1759 InputFilter *ifilter = fg->inputs[x];
1760 if (ifilter->format < 0 &&
1761 ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar) < 0) {
1762 av_log(NULL, AV_LOG_ERROR, "Error copying paramerets from input stream\n");
1763 exit_program(1);
1764 }
1765 }
1766
1767 if (!ifilter_has_all_input_formats(fg))
1768 continue;
1769
1770 ret = configure_filtergraph(fg);
1771 if (ret < 0) {
1772 av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1773 exit_program(1);
1774 }
1775
1776 finish_output_stream(ost);
1777 }
1778
1779 init_output_stream_wrapper(ost, NULL, 1);
1780 }
1781
1782 if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1783 continue;
1784
1785 ret = encode_frame(of, ost, NULL);
1786 if (ret != AVERROR_EOF)
1787 exit_program(1);
1788 }
1789 }
1790
1791 /*
1792 * Check whether a packet from ist should be written into ost at this time
1793 */
check_output_constraints(InputStream * ist,OutputStream * ost)1794 static int check_output_constraints(InputStream *ist, OutputStream *ost)
1795 {
1796 OutputFile *of = output_files[ost->file_index];
1797 int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1798
1799 if (ost->source_index != ist_index)
1800 return 0;
1801
1802 if (ost->finished & MUXER_FINISHED)
1803 return 0;
1804
1805 if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1806 return 0;
1807
1808 return 1;
1809 }
1810
do_streamcopy(InputStream * ist,OutputStream * ost,const AVPacket * pkt)1811 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1812 {
1813 OutputFile *of = output_files[ost->file_index];
1814 InputFile *f = input_files [ist->file_index];
1815 int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1816 int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1817 AVPacket *opkt = ost->pkt;
1818
1819 av_packet_unref(opkt);
1820 // EOF: flush output bitstream filters.
1821 if (!pkt) {
1822 output_packet(of, opkt, ost, 1);
1823 return;
1824 }
1825
1826 if (!ost->streamcopy_started && !(pkt->flags & AV_PKT_FLAG_KEY) &&
1827 !ost->copy_initial_nonkeyframes)
1828 return;
1829
1830 if (!ost->streamcopy_started && !ost->copy_prior_start) {
1831 int64_t comp_start = start_time;
1832 if (copy_ts && f->start_time != AV_NOPTS_VALUE)
1833 comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
1834 if (pkt->pts == AV_NOPTS_VALUE ?
1835 ist->pts < comp_start :
1836 pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
1837 return;
1838 }
1839
1840 if (of->recording_time != INT64_MAX &&
1841 ist->pts >= of->recording_time + start_time) {
1842 close_output_stream(ost);
1843 return;
1844 }
1845
1846 if (f->recording_time != INT64_MAX) {
1847 start_time = 0;
1848 if (copy_ts) {
1849 start_time += f->start_time != AV_NOPTS_VALUE ? f->start_time : 0;
1850 start_time += start_at_zero ? 0 : f->ctx->start_time;
1851 }
1852 if (ist->pts >= f->recording_time + start_time) {
1853 close_output_stream(ost);
1854 return;
1855 }
1856 }
1857
1858 if (av_packet_ref(opkt, pkt) < 0)
1859 exit_program(1);
1860
1861 if (pkt->pts != AV_NOPTS_VALUE)
1862 opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
1863
1864 if (pkt->dts == AV_NOPTS_VALUE) {
1865 opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
1866 } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
1867 int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
1868 if(!duration)
1869 duration = ist->dec_ctx->frame_size;
1870 opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
1871 (AVRational){1, ist->dec_ctx->sample_rate}, duration,
1872 &ist->filter_in_rescale_delta_last, ost->mux_timebase);
1873 /* dts will be set immediately afterwards to what pts is now */
1874 opkt->pts = opkt->dts - ost_tb_start_time;
1875 } else
1876 opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
1877 opkt->dts -= ost_tb_start_time;
1878
1879 opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
1880
1881 ost->sync_opts += opkt->duration;
1882
1883 output_packet(of, opkt, ost, 0);
1884
1885 ost->streamcopy_started = 1;
1886 }
1887
guess_input_channel_layout(InputStream * ist)1888 int guess_input_channel_layout(InputStream *ist)
1889 {
1890 AVCodecContext *dec = ist->dec_ctx;
1891
1892 if (dec->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC) {
1893 char layout_name[256];
1894
1895 if (dec->ch_layout.nb_channels > ist->guess_layout_max)
1896 return 0;
1897 av_channel_layout_default(&dec->ch_layout, dec->ch_layout.nb_channels);
1898 if (dec->ch_layout.order == AV_CHANNEL_ORDER_UNSPEC)
1899 return 0;
1900 av_channel_layout_describe(&dec->ch_layout, layout_name, sizeof(layout_name));
1901 av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
1902 "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
1903 }
1904 return 1;
1905 }
1906
check_decode_result(InputStream * ist,int * got_output,int ret)1907 static void check_decode_result(InputStream *ist, int *got_output, int ret)
1908 {
1909 if (*got_output || ret<0)
1910 decode_error_stat[ret<0] ++;
1911
1912 if (ret < 0 && exit_on_error)
1913 exit_program(1);
1914
1915 if (*got_output && ist) {
1916 if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
1917 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
1918 "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
1919 if (exit_on_error)
1920 exit_program(1);
1921 }
1922 }
1923 }
1924
1925 // Filters can be configured only if the formats of all inputs are known.
ifilter_has_all_input_formats(FilterGraph * fg)1926 static int ifilter_has_all_input_formats(FilterGraph *fg)
1927 {
1928 int i;
1929 for (i = 0; i < fg->nb_inputs; i++) {
1930 if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
1931 fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
1932 return 0;
1933 }
1934 return 1;
1935 }
1936
ifilter_send_frame(InputFilter * ifilter,AVFrame * frame,int keep_reference)1937 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame, int keep_reference)
1938 {
1939 FilterGraph *fg = ifilter->graph;
1940 AVFrameSideData *sd;
1941 int need_reinit, ret;
1942 int buffersrc_flags = AV_BUFFERSRC_FLAG_PUSH;
1943
1944 if (keep_reference)
1945 buffersrc_flags |= AV_BUFFERSRC_FLAG_KEEP_REF;
1946
1947 /* determine if the parameters for this input changed */
1948 need_reinit = ifilter->format != frame->format;
1949
1950 switch (ifilter->ist->st->codecpar->codec_type) {
1951 case AVMEDIA_TYPE_AUDIO:
1952 need_reinit |= ifilter->sample_rate != frame->sample_rate ||
1953 av_channel_layout_compare(&ifilter->ch_layout, &frame->ch_layout);
1954 break;
1955 case AVMEDIA_TYPE_VIDEO:
1956 need_reinit |= ifilter->width != frame->width ||
1957 ifilter->height != frame->height;
1958 break;
1959 }
1960
1961 if (!ifilter->ist->reinit_filters && fg->graph)
1962 need_reinit = 0;
1963
1964 if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
1965 (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
1966 need_reinit = 1;
1967
1968 if (sd = av_frame_get_side_data(frame, AV_FRAME_DATA_DISPLAYMATRIX)) {
1969 if (!ifilter->displaymatrix || memcmp(sd->data, ifilter->displaymatrix, sizeof(int32_t) * 9))
1970 need_reinit = 1;
1971 } else if (ifilter->displaymatrix)
1972 need_reinit = 1;
1973
1974 if (need_reinit) {
1975 ret = ifilter_parameters_from_frame(ifilter, frame);
1976 if (ret < 0)
1977 return ret;
1978 }
1979
1980 /* (re)init the graph if possible, otherwise buffer the frame and return */
1981 if (need_reinit || !fg->graph) {
1982 if (!ifilter_has_all_input_formats(fg)) {
1983 AVFrame *tmp = av_frame_clone(frame);
1984 if (!tmp)
1985 return AVERROR(ENOMEM);
1986
1987 ret = av_fifo_write(ifilter->frame_queue, &tmp, 1);
1988 if (ret < 0)
1989 av_frame_free(&tmp);
1990
1991 return ret;
1992 }
1993
1994 ret = reap_filters(1);
1995 if (ret < 0 && ret != AVERROR_EOF) {
1996 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
1997 return ret;
1998 }
1999
2000 ret = configure_filtergraph(fg);
2001 if (ret < 0) {
2002 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2003 return ret;
2004 }
2005 }
2006
2007 ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, buffersrc_flags);
2008 if (ret < 0) {
2009 if (ret != AVERROR_EOF)
2010 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2011 return ret;
2012 }
2013
2014 return 0;
2015 }
2016
ifilter_send_eof(InputFilter * ifilter,int64_t pts)2017 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2018 {
2019 int ret;
2020
2021 ifilter->eof = 1;
2022
2023 if (ifilter->filter) {
2024 ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2025 if (ret < 0)
2026 return ret;
2027 } else {
2028 // the filtergraph was never configured
2029 if (ifilter->format < 0) {
2030 ret = ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2031 if (ret < 0)
2032 return ret;
2033 }
2034 if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2035 av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2036 return AVERROR_INVALIDDATA;
2037 }
2038 }
2039
2040 return 0;
2041 }
2042
2043 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2044 // There is the following difference: if you got a frame, you must call
2045 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2046 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
decode(AVCodecContext * avctx,AVFrame * frame,int * got_frame,AVPacket * pkt)2047 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2048 {
2049 int ret;
2050
2051 *got_frame = 0;
2052
2053 if (pkt) {
2054 ret = avcodec_send_packet(avctx, pkt);
2055 // In particular, we don't expect AVERROR(EAGAIN), because we read all
2056 // decoded frames with avcodec_receive_frame() until done.
2057 if (ret < 0 && ret != AVERROR_EOF)
2058 return ret;
2059 }
2060
2061 ret = avcodec_receive_frame(avctx, frame);
2062 if (ret < 0 && ret != AVERROR(EAGAIN))
2063 return ret;
2064 if (ret >= 0)
2065 *got_frame = 1;
2066
2067 return 0;
2068 }
2069
send_frame_to_filters(InputStream * ist,AVFrame * decoded_frame)2070 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2071 {
2072 int i, ret;
2073
2074 av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2075 for (i = 0; i < ist->nb_filters; i++) {
2076 ret = ifilter_send_frame(ist->filters[i], decoded_frame, i < ist->nb_filters - 1);
2077 if (ret == AVERROR_EOF)
2078 ret = 0; /* ignore */
2079 if (ret < 0) {
2080 av_log(NULL, AV_LOG_ERROR,
2081 "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2082 break;
2083 }
2084 }
2085 return ret;
2086 }
2087
decode_audio(InputStream * ist,AVPacket * pkt,int * got_output,int * decode_failed)2088 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2089 int *decode_failed)
2090 {
2091 AVFrame *decoded_frame = ist->decoded_frame;
2092 AVCodecContext *avctx = ist->dec_ctx;
2093 int ret, err = 0;
2094 AVRational decoded_frame_tb;
2095
2096 update_benchmark(NULL);
2097 ret = decode(avctx, decoded_frame, got_output, pkt);
2098 update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2099 if (ret < 0)
2100 *decode_failed = 1;
2101
2102 if (ret >= 0 && avctx->sample_rate <= 0) {
2103 av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2104 ret = AVERROR_INVALIDDATA;
2105 }
2106
2107 if (ret != AVERROR_EOF)
2108 check_decode_result(ist, got_output, ret);
2109
2110 if (!*got_output || ret < 0)
2111 return ret;
2112
2113 ist->samples_decoded += decoded_frame->nb_samples;
2114 ist->frames_decoded++;
2115
2116 /* increment next_dts to use for the case where the input stream does not
2117 have timestamps or there are multiple frames in the packet */
2118 ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2119 avctx->sample_rate;
2120 ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2121 avctx->sample_rate;
2122
2123 if (decoded_frame->pts != AV_NOPTS_VALUE) {
2124 decoded_frame_tb = ist->st->time_base;
2125 } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2126 decoded_frame->pts = pkt->pts;
2127 decoded_frame_tb = ist->st->time_base;
2128 }else {
2129 decoded_frame->pts = ist->dts;
2130 decoded_frame_tb = AV_TIME_BASE_Q;
2131 }
2132 if (pkt && pkt->duration && ist->prev_pkt_pts != AV_NOPTS_VALUE &&
2133 pkt->pts != AV_NOPTS_VALUE && pkt->pts - ist->prev_pkt_pts > pkt->duration)
2134 ist->filter_in_rescale_delta_last = AV_NOPTS_VALUE;
2135 if (pkt)
2136 ist->prev_pkt_pts = pkt->pts;
2137 if (decoded_frame->pts != AV_NOPTS_VALUE)
2138 decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2139 (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2140 (AVRational){1, avctx->sample_rate});
2141 ist->nb_samples = decoded_frame->nb_samples;
2142 err = send_frame_to_filters(ist, decoded_frame);
2143
2144 av_frame_unref(decoded_frame);
2145 return err < 0 ? err : ret;
2146 }
2147
decode_video(InputStream * ist,AVPacket * pkt,int * got_output,int64_t * duration_pts,int eof,int * decode_failed)2148 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2149 int *decode_failed)
2150 {
2151 AVFrame *decoded_frame = ist->decoded_frame;
2152 int i, ret = 0, err = 0;
2153 int64_t best_effort_timestamp;
2154 int64_t dts = AV_NOPTS_VALUE;
2155
2156 // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2157 // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2158 // skip the packet.
2159 if (!eof && pkt && pkt->size == 0)
2160 return 0;
2161
2162 if (ist->dts != AV_NOPTS_VALUE)
2163 dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2164 if (pkt) {
2165 pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2166 }
2167
2168 // The old code used to set dts on the drain packet, which does not work
2169 // with the new API anymore.
2170 if (eof) {
2171 void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2172 if (!new)
2173 return AVERROR(ENOMEM);
2174 ist->dts_buffer = new;
2175 ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2176 }
2177
2178 update_benchmark(NULL);
2179 ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
2180 update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2181 if (ret < 0)
2182 *decode_failed = 1;
2183
2184 // The following line may be required in some cases where there is no parser
2185 // or the parser does not has_b_frames correctly
2186 if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2187 if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2188 ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2189 } else
2190 av_log(ist->dec_ctx, AV_LOG_WARNING,
2191 "video_delay is larger in decoder than demuxer %d > %d.\n"
2192 "If you want to help, upload a sample "
2193 "of this file to https://streams.videolan.org/upload/ "
2194 "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2195 ist->dec_ctx->has_b_frames,
2196 ist->st->codecpar->video_delay);
2197 }
2198
2199 if (ret != AVERROR_EOF)
2200 check_decode_result(ist, got_output, ret);
2201
2202 if (*got_output && ret >= 0) {
2203 if (ist->dec_ctx->width != decoded_frame->width ||
2204 ist->dec_ctx->height != decoded_frame->height ||
2205 ist->dec_ctx->pix_fmt != decoded_frame->format) {
2206 av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2207 decoded_frame->width,
2208 decoded_frame->height,
2209 decoded_frame->format,
2210 ist->dec_ctx->width,
2211 ist->dec_ctx->height,
2212 ist->dec_ctx->pix_fmt);
2213 }
2214 }
2215
2216 if (!*got_output || ret < 0)
2217 return ret;
2218
2219 if(ist->top_field_first>=0)
2220 decoded_frame->top_field_first = ist->top_field_first;
2221
2222 ist->frames_decoded++;
2223
2224 if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2225 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2226 if (err < 0)
2227 goto fail;
2228 }
2229 ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2230
2231 best_effort_timestamp= decoded_frame->best_effort_timestamp;
2232 *duration_pts = decoded_frame->pkt_duration;
2233
2234 if (ist->framerate.num)
2235 best_effort_timestamp = ist->cfr_next_pts++;
2236
2237 if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2238 best_effort_timestamp = ist->dts_buffer[0];
2239
2240 for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2241 ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2242 ist->nb_dts_buffer--;
2243 }
2244
2245 if(best_effort_timestamp != AV_NOPTS_VALUE) {
2246 int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2247
2248 if (ts != AV_NOPTS_VALUE)
2249 ist->next_pts = ist->pts = ts;
2250 }
2251
2252 if (debug_ts) {
2253 av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2254 "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2255 ist->st->index, av_ts2str(decoded_frame->pts),
2256 av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2257 best_effort_timestamp,
2258 av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2259 decoded_frame->key_frame, decoded_frame->pict_type,
2260 ist->st->time_base.num, ist->st->time_base.den);
2261 }
2262
2263 if (ist->st->sample_aspect_ratio.num)
2264 decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2265
2266 err = send_frame_to_filters(ist, decoded_frame);
2267
2268 fail:
2269 av_frame_unref(decoded_frame);
2270 return err < 0 ? err : ret;
2271 }
2272
transcode_subtitles(InputStream * ist,AVPacket * pkt,int * got_output,int * decode_failed)2273 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2274 int *decode_failed)
2275 {
2276 AVSubtitle subtitle;
2277 int free_sub = 1;
2278 int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2279 &subtitle, got_output, pkt);
2280
2281 check_decode_result(NULL, got_output, ret);
2282
2283 if (ret < 0 || !*got_output) {
2284 *decode_failed = 1;
2285 if (!pkt->size)
2286 sub2video_flush(ist);
2287 return ret;
2288 }
2289
2290 if (ist->fix_sub_duration) {
2291 int end = 1;
2292 if (ist->prev_sub.got_output) {
2293 end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2294 1000, AV_TIME_BASE);
2295 if (end < ist->prev_sub.subtitle.end_display_time) {
2296 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2297 "Subtitle duration reduced from %"PRId32" to %d%s\n",
2298 ist->prev_sub.subtitle.end_display_time, end,
2299 end <= 0 ? ", dropping it" : "");
2300 ist->prev_sub.subtitle.end_display_time = end;
2301 }
2302 }
2303 FFSWAP(int, *got_output, ist->prev_sub.got_output);
2304 FFSWAP(int, ret, ist->prev_sub.ret);
2305 FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2306 if (end <= 0)
2307 goto out;
2308 }
2309
2310 if (!*got_output)
2311 return ret;
2312
2313 if (ist->sub2video.frame) {
2314 sub2video_update(ist, INT64_MIN, &subtitle);
2315 } else if (ist->nb_filters) {
2316 if (!ist->sub2video.sub_queue)
2317 ist->sub2video.sub_queue = av_fifo_alloc2(8, sizeof(AVSubtitle), AV_FIFO_FLAG_AUTO_GROW);
2318 if (!ist->sub2video.sub_queue)
2319 exit_program(1);
2320
2321 ret = av_fifo_write(ist->sub2video.sub_queue, &subtitle, 1);
2322 if (ret < 0)
2323 exit_program(1);
2324 free_sub = 0;
2325 }
2326
2327 if (!subtitle.num_rects)
2328 goto out;
2329
2330 ist->frames_decoded++;
2331
2332 for (i = 0; i < nb_output_streams; i++) {
2333 OutputStream *ost = output_streams[i];
2334
2335 if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2336 || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2337 continue;
2338
2339 do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2340 }
2341
2342 out:
2343 if (free_sub)
2344 avsubtitle_free(&subtitle);
2345 return ret;
2346 }
2347
send_filter_eof(InputStream * ist)2348 static int send_filter_eof(InputStream *ist)
2349 {
2350 int i, ret;
2351 /* TODO keep pts also in stream time base to avoid converting back */
2352 int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2353 AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2354
2355 for (i = 0; i < ist->nb_filters; i++) {
2356 ret = ifilter_send_eof(ist->filters[i], pts);
2357 if (ret < 0)
2358 return ret;
2359 }
2360 return 0;
2361 }
2362
2363 /* pkt = NULL means EOF (needed to flush decoder buffers) */
process_input_packet(InputStream * ist,const AVPacket * pkt,int no_eof)2364 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2365 {
2366 int ret = 0, i;
2367 int repeating = 0;
2368 int eof_reached = 0;
2369
2370 AVPacket *avpkt = ist->pkt;
2371
2372 if (!ist->saw_first_ts) {
2373 ist->first_dts =
2374 ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2375 ist->pts = 0;
2376 if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2377 ist->first_dts =
2378 ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2379 ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2380 }
2381 ist->saw_first_ts = 1;
2382 }
2383
2384 if (ist->next_dts == AV_NOPTS_VALUE)
2385 ist->next_dts = ist->dts;
2386 if (ist->next_pts == AV_NOPTS_VALUE)
2387 ist->next_pts = ist->pts;
2388
2389 if (pkt) {
2390 av_packet_unref(avpkt);
2391 ret = av_packet_ref(avpkt, pkt);
2392 if (ret < 0)
2393 return ret;
2394 }
2395
2396 if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2397 ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2398 if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2399 ist->next_pts = ist->pts = ist->dts;
2400 }
2401
2402 // while we have more to decode or while the decoder did output something on EOF
2403 while (ist->decoding_needed) {
2404 int64_t duration_dts = 0;
2405 int64_t duration_pts = 0;
2406 int got_output = 0;
2407 int decode_failed = 0;
2408
2409 ist->pts = ist->next_pts;
2410 ist->dts = ist->next_dts;
2411
2412 switch (ist->dec_ctx->codec_type) {
2413 case AVMEDIA_TYPE_AUDIO:
2414 ret = decode_audio (ist, repeating ? NULL : avpkt, &got_output,
2415 &decode_failed);
2416 av_packet_unref(avpkt);
2417 break;
2418 case AVMEDIA_TYPE_VIDEO:
2419 ret = decode_video (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2420 &decode_failed);
2421 if (!repeating || !pkt || got_output) {
2422 if (pkt && pkt->duration) {
2423 duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2424 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2425 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2426 duration_dts = ((int64_t)AV_TIME_BASE *
2427 ist->dec_ctx->framerate.den * ticks) /
2428 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2429 }
2430
2431 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2432 ist->next_dts += duration_dts;
2433 }else
2434 ist->next_dts = AV_NOPTS_VALUE;
2435 }
2436
2437 if (got_output) {
2438 if (duration_pts > 0) {
2439 ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2440 } else {
2441 ist->next_pts += duration_dts;
2442 }
2443 }
2444 av_packet_unref(avpkt);
2445 break;
2446 case AVMEDIA_TYPE_SUBTITLE:
2447 if (repeating)
2448 break;
2449 ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2450 if (!pkt && ret >= 0)
2451 ret = AVERROR_EOF;
2452 av_packet_unref(avpkt);
2453 break;
2454 default:
2455 return -1;
2456 }
2457
2458 if (ret == AVERROR_EOF) {
2459 eof_reached = 1;
2460 break;
2461 }
2462
2463 if (ret < 0) {
2464 if (decode_failed) {
2465 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2466 ist->file_index, ist->st->index, av_err2str(ret));
2467 } else {
2468 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2469 "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2470 }
2471 if (!decode_failed || exit_on_error)
2472 exit_program(1);
2473 break;
2474 }
2475
2476 if (got_output)
2477 ist->got_output = 1;
2478
2479 if (!got_output)
2480 break;
2481
2482 // During draining, we might get multiple output frames in this loop.
2483 // ffmpeg.c does not drain the filter chain on configuration changes,
2484 // which means if we send multiple frames at once to the filters, and
2485 // one of those frames changes configuration, the buffered frames will
2486 // be lost. This can upset certain FATE tests.
2487 // Decode only 1 frame per call on EOF to appease these FATE tests.
2488 // The ideal solution would be to rewrite decoding to use the new
2489 // decoding API in a better way.
2490 if (!pkt)
2491 break;
2492
2493 repeating = 1;
2494 }
2495
2496 /* after flushing, send an EOF on all the filter inputs attached to the stream */
2497 /* except when looping we need to flush but not to send an EOF */
2498 if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2499 int ret = send_filter_eof(ist);
2500 if (ret < 0) {
2501 av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2502 exit_program(1);
2503 }
2504 }
2505
2506 /* handle stream copy */
2507 if (!ist->decoding_needed && pkt) {
2508 ist->dts = ist->next_dts;
2509 switch (ist->dec_ctx->codec_type) {
2510 case AVMEDIA_TYPE_AUDIO:
2511 av_assert1(pkt->duration >= 0);
2512 if (ist->dec_ctx->sample_rate) {
2513 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2514 ist->dec_ctx->sample_rate;
2515 } else {
2516 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2517 }
2518 break;
2519 case AVMEDIA_TYPE_VIDEO:
2520 if (ist->framerate.num) {
2521 // TODO: Remove work-around for c99-to-c89 issue 7
2522 AVRational time_base_q = AV_TIME_BASE_Q;
2523 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2524 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2525 } else if (pkt->duration) {
2526 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2527 } else if(ist->dec_ctx->framerate.num != 0) {
2528 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2529 ist->next_dts += ((int64_t)AV_TIME_BASE *
2530 ist->dec_ctx->framerate.den * ticks) /
2531 ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2532 }
2533 break;
2534 }
2535 ist->pts = ist->dts;
2536 ist->next_pts = ist->next_dts;
2537 } else if (!ist->decoding_needed)
2538 eof_reached = 1;
2539
2540 for (i = 0; i < nb_output_streams; i++) {
2541 OutputStream *ost = output_streams[i];
2542
2543 if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2544 continue;
2545
2546 do_streamcopy(ist, ost, pkt);
2547 }
2548
2549 return !eof_reached;
2550 }
2551
get_format(AVCodecContext * s,const enum AVPixelFormat * pix_fmts)2552 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2553 {
2554 InputStream *ist = s->opaque;
2555 const enum AVPixelFormat *p;
2556 int ret;
2557
2558 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2559 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2560 const AVCodecHWConfig *config = NULL;
2561 int i;
2562
2563 if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2564 break;
2565
2566 if (ist->hwaccel_id == HWACCEL_GENERIC ||
2567 ist->hwaccel_id == HWACCEL_AUTO) {
2568 for (i = 0;; i++) {
2569 config = avcodec_get_hw_config(s->codec, i);
2570 if (!config)
2571 break;
2572 if (!(config->methods &
2573 AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2574 continue;
2575 if (config->pix_fmt == *p)
2576 break;
2577 }
2578 }
2579 if (config && config->device_type == ist->hwaccel_device_type) {
2580 ret = hwaccel_decode_init(s);
2581 if (ret < 0) {
2582 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2583 av_log(NULL, AV_LOG_FATAL,
2584 "%s hwaccel requested for input stream #%d:%d, "
2585 "but cannot be initialized.\n",
2586 av_hwdevice_get_type_name(config->device_type),
2587 ist->file_index, ist->st->index);
2588 return AV_PIX_FMT_NONE;
2589 }
2590 continue;
2591 }
2592
2593 ist->hwaccel_pix_fmt = *p;
2594 break;
2595 }
2596 }
2597
2598 return *p;
2599 }
2600
init_input_stream(int ist_index,char * error,int error_len)2601 static int init_input_stream(int ist_index, char *error, int error_len)
2602 {
2603 int ret;
2604 InputStream *ist = input_streams[ist_index];
2605
2606 if (ist->decoding_needed) {
2607 const AVCodec *codec = ist->dec;
2608 if (!codec) {
2609 snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2610 avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2611 return AVERROR(EINVAL);
2612 }
2613
2614 ist->dec_ctx->opaque = ist;
2615 ist->dec_ctx->get_format = get_format;
2616 #if LIBAVCODEC_VERSION_MAJOR < 60
2617 AV_NOWARN_DEPRECATED({
2618 ist->dec_ctx->thread_safe_callbacks = 1;
2619 })
2620 #endif
2621
2622 if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2623 (ist->decoding_needed & DECODING_FOR_OST)) {
2624 av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2625 if (ist->decoding_needed & DECODING_FOR_FILTER)
2626 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2627 }
2628
2629 /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2630 * audio, and video decoders such as cuvid or mediacodec */
2631 ist->dec_ctx->pkt_timebase = ist->st->time_base;
2632
2633 if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2634 av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2635 /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2636 if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2637 av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2638
2639 ret = hw_device_setup_for_decode(ist);
2640 if (ret < 0) {
2641 snprintf(error, error_len, "Device setup failed for "
2642 "decoder on input stream #%d:%d : %s",
2643 ist->file_index, ist->st->index, av_err2str(ret));
2644 return ret;
2645 }
2646
2647 if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2648 if (ret == AVERROR_EXPERIMENTAL)
2649 abort_codec_experimental(codec, 0);
2650
2651 snprintf(error, error_len,
2652 "Error while opening decoder for input stream "
2653 "#%d:%d : %s",
2654 ist->file_index, ist->st->index, av_err2str(ret));
2655 return ret;
2656 }
2657 assert_avoptions(ist->decoder_opts);
2658 }
2659
2660 ist->next_pts = AV_NOPTS_VALUE;
2661 ist->next_dts = AV_NOPTS_VALUE;
2662
2663 return 0;
2664 }
2665
get_input_stream(OutputStream * ost)2666 static InputStream *get_input_stream(OutputStream *ost)
2667 {
2668 if (ost->source_index >= 0)
2669 return input_streams[ost->source_index];
2670 return NULL;
2671 }
2672
compare_int64(const void * a,const void * b)2673 static int compare_int64(const void *a, const void *b)
2674 {
2675 return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2676 }
2677
init_output_bsfs(OutputStream * ost)2678 static int init_output_bsfs(OutputStream *ost)
2679 {
2680 AVBSFContext *ctx = ost->bsf_ctx;
2681 int ret;
2682
2683 if (!ctx)
2684 return 0;
2685
2686 ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
2687 if (ret < 0)
2688 return ret;
2689
2690 ctx->time_base_in = ost->st->time_base;
2691
2692 ret = av_bsf_init(ctx);
2693 if (ret < 0) {
2694 av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
2695 ctx->filter->name);
2696 return ret;
2697 }
2698
2699 ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
2700 if (ret < 0)
2701 return ret;
2702 ost->st->time_base = ctx->time_base_out;
2703
2704 return 0;
2705 }
2706
init_output_stream_streamcopy(OutputStream * ost)2707 static int init_output_stream_streamcopy(OutputStream *ost)
2708 {
2709 OutputFile *of = output_files[ost->file_index];
2710 InputStream *ist = get_input_stream(ost);
2711 AVCodecParameters *par_dst = ost->st->codecpar;
2712 AVCodecParameters *par_src = ost->ref_par;
2713 AVRational sar;
2714 int i, ret;
2715 uint32_t codec_tag = par_dst->codec_tag;
2716
2717 av_assert0(ist && !ost->filter);
2718
2719 ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
2720 if (ret >= 0)
2721 ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
2722 if (ret < 0) {
2723 av_log(NULL, AV_LOG_FATAL,
2724 "Error setting up codec context options.\n");
2725 return ret;
2726 }
2727
2728 ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
2729 if (ret < 0) {
2730 av_log(NULL, AV_LOG_FATAL,
2731 "Error getting reference codec parameters.\n");
2732 return ret;
2733 }
2734
2735 if (!codec_tag) {
2736 unsigned int codec_tag_tmp;
2737 if (!of->format->codec_tag ||
2738 av_codec_get_id (of->format->codec_tag, par_src->codec_tag) == par_src->codec_id ||
2739 !av_codec_get_tag2(of->format->codec_tag, par_src->codec_id, &codec_tag_tmp))
2740 codec_tag = par_src->codec_tag;
2741 }
2742
2743 ret = avcodec_parameters_copy(par_dst, par_src);
2744 if (ret < 0)
2745 return ret;
2746
2747 par_dst->codec_tag = codec_tag;
2748
2749 if (!ost->frame_rate.num)
2750 ost->frame_rate = ist->framerate;
2751
2752 if (ost->frame_rate.num)
2753 ost->st->avg_frame_rate = ost->frame_rate;
2754 else
2755 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2756
2757 ret = avformat_transfer_internal_stream_timing_info(of->format, ost->st, ist->st, copy_tb);
2758 if (ret < 0)
2759 return ret;
2760
2761 // copy timebase while removing common factors
2762 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0) {
2763 if (ost->frame_rate.num)
2764 ost->st->time_base = av_inv_q(ost->frame_rate);
2765 else
2766 ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
2767 }
2768
2769 // copy estimated duration as a hint to the muxer
2770 if (ost->st->duration <= 0 && ist->st->duration > 0)
2771 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
2772
2773 if (ist->st->nb_side_data) {
2774 for (i = 0; i < ist->st->nb_side_data; i++) {
2775 const AVPacketSideData *sd_src = &ist->st->side_data[i];
2776 uint8_t *dst_data;
2777
2778 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
2779 if (!dst_data)
2780 return AVERROR(ENOMEM);
2781 memcpy(dst_data, sd_src->data, sd_src->size);
2782 }
2783 }
2784
2785 if (ost->rotate_overridden) {
2786 uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
2787 sizeof(int32_t) * 9);
2788 if (sd)
2789 av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
2790 }
2791
2792 switch (par_dst->codec_type) {
2793 case AVMEDIA_TYPE_AUDIO:
2794 if (audio_volume != 256) {
2795 av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
2796 exit_program(1);
2797 }
2798 if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
2799 par_dst->block_align= 0;
2800 if(par_dst->codec_id == AV_CODEC_ID_AC3)
2801 par_dst->block_align= 0;
2802 break;
2803 case AVMEDIA_TYPE_VIDEO:
2804 if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
2805 sar =
2806 av_mul_q(ost->frame_aspect_ratio,
2807 (AVRational){ par_dst->height, par_dst->width });
2808 av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
2809 "with stream copy may produce invalid files\n");
2810 }
2811 else if (ist->st->sample_aspect_ratio.num)
2812 sar = ist->st->sample_aspect_ratio;
2813 else
2814 sar = par_src->sample_aspect_ratio;
2815 ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
2816 ost->st->avg_frame_rate = ist->st->avg_frame_rate;
2817 ost->st->r_frame_rate = ist->st->r_frame_rate;
2818 break;
2819 }
2820
2821 ost->mux_timebase = ist->st->time_base;
2822
2823 return 0;
2824 }
2825
set_encoder_id(OutputFile * of,OutputStream * ost)2826 static void set_encoder_id(OutputFile *of, OutputStream *ost)
2827 {
2828 const AVDictionaryEntry *e;
2829
2830 uint8_t *encoder_string;
2831 int encoder_string_len;
2832 int format_flags = 0;
2833 int codec_flags = ost->enc_ctx->flags;
2834
2835 if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
2836 return;
2837
2838 e = av_dict_get(of->opts, "fflags", NULL, 0);
2839 if (e) {
2840 const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
2841 if (!o)
2842 return;
2843 av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
2844 }
2845 e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
2846 if (e) {
2847 const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
2848 if (!o)
2849 return;
2850 av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
2851 }
2852
2853 encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
2854 encoder_string = av_mallocz(encoder_string_len);
2855 if (!encoder_string)
2856 exit_program(1);
2857
2858 if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
2859 av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
2860 else
2861 av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
2862 av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
2863 av_dict_set(&ost->st->metadata, "encoder", encoder_string,
2864 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
2865 }
2866
parse_forced_key_frames(char * kf,OutputStream * ost,AVCodecContext * avctx)2867 static void parse_forced_key_frames(char *kf, OutputStream *ost,
2868 AVCodecContext *avctx)
2869 {
2870 char *p;
2871 int n = 1, i, size, index = 0;
2872 int64_t t, *pts;
2873
2874 for (p = kf; *p; p++)
2875 if (*p == ',')
2876 n++;
2877 size = n;
2878 pts = av_malloc_array(size, sizeof(*pts));
2879 if (!pts) {
2880 av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
2881 exit_program(1);
2882 }
2883
2884 p = kf;
2885 for (i = 0; i < n; i++) {
2886 char *next = strchr(p, ',');
2887
2888 if (next)
2889 *next++ = 0;
2890
2891 if (!memcmp(p, "chapters", 8)) {
2892
2893 AVFormatContext *avf = output_files[ost->file_index]->ctx;
2894 int j;
2895
2896 if (avf->nb_chapters > INT_MAX - size ||
2897 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
2898 sizeof(*pts)))) {
2899 av_log(NULL, AV_LOG_FATAL,
2900 "Could not allocate forced key frames array.\n");
2901 exit_program(1);
2902 }
2903 t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
2904 t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2905
2906 for (j = 0; j < avf->nb_chapters; j++) {
2907 AVChapter *c = avf->chapters[j];
2908 av_assert1(index < size);
2909 pts[index++] = av_rescale_q(c->start, c->time_base,
2910 avctx->time_base) + t;
2911 }
2912
2913 } else {
2914
2915 t = parse_time_or_die("force_key_frames", p, 1);
2916 av_assert1(index < size);
2917 pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
2918
2919 }
2920
2921 p = next;
2922 }
2923
2924 av_assert0(index == size);
2925 qsort(pts, size, sizeof(*pts), compare_int64);
2926 ost->forced_kf_count = size;
2927 ost->forced_kf_pts = pts;
2928 }
2929
init_encoder_time_base(OutputStream * ost,AVRational default_time_base)2930 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
2931 {
2932 InputStream *ist = get_input_stream(ost);
2933 AVCodecContext *enc_ctx = ost->enc_ctx;
2934 AVFormatContext *oc;
2935
2936 if (ost->enc_timebase.num > 0) {
2937 enc_ctx->time_base = ost->enc_timebase;
2938 return;
2939 }
2940
2941 if (ost->enc_timebase.num < 0) {
2942 if (ist) {
2943 enc_ctx->time_base = ist->st->time_base;
2944 return;
2945 }
2946
2947 oc = output_files[ost->file_index]->ctx;
2948 av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
2949 }
2950
2951 enc_ctx->time_base = default_time_base;
2952 }
2953
init_output_stream_encode(OutputStream * ost,AVFrame * frame)2954 static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
2955 {
2956 InputStream *ist = get_input_stream(ost);
2957 AVCodecContext *enc_ctx = ost->enc_ctx;
2958 AVCodecContext *dec_ctx = NULL;
2959 OutputFile *of = output_files[ost->file_index];
2960 AVFormatContext *oc = of->ctx;
2961 int ret;
2962
2963 set_encoder_id(output_files[ost->file_index], ost);
2964
2965 if (ist) {
2966 dec_ctx = ist->dec_ctx;
2967 }
2968
2969 if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
2970 if (!ost->frame_rate.num)
2971 ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
2972 if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
2973 ost->frame_rate = (AVRational){25, 1};
2974 av_log(NULL, AV_LOG_WARNING,
2975 "No information "
2976 "about the input framerate is available. Falling "
2977 "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
2978 "if you want a different framerate.\n",
2979 ost->file_index, ost->index);
2980 }
2981
2982 if (ost->max_frame_rate.num &&
2983 (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
2984 !ost->frame_rate.den))
2985 ost->frame_rate = ost->max_frame_rate;
2986
2987 if (ost->enc->supported_framerates && !ost->force_fps) {
2988 int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
2989 ost->frame_rate = ost->enc->supported_framerates[idx];
2990 }
2991 // reduce frame rate for mpeg4 to be within the spec limits
2992 if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
2993 av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
2994 ost->frame_rate.num, ost->frame_rate.den, 65535);
2995 }
2996 }
2997
2998 switch (enc_ctx->codec_type) {
2999 case AVMEDIA_TYPE_AUDIO:
3000 enc_ctx->sample_fmt = av_buffersink_get_format(ost->filter->filter);
3001 enc_ctx->sample_rate = av_buffersink_get_sample_rate(ost->filter->filter);
3002 ret = av_buffersink_get_ch_layout(ost->filter->filter, &enc_ctx->ch_layout);
3003 if (ret < 0)
3004 return ret;
3005
3006 if (ost->bits_per_raw_sample)
3007 enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3008 else if (dec_ctx && ost->filter->graph->is_meta)
3009 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3010 av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3011
3012 init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3013 break;
3014
3015 case AVMEDIA_TYPE_VIDEO:
3016 init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3017
3018 if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3019 enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3020 if ( av_q2d(enc_ctx->time_base) < 0.001 && ost->vsync_method != VSYNC_PASSTHROUGH
3021 && (ost->vsync_method == VSYNC_CFR || ost->vsync_method == VSYNC_VSCFR ||
3022 (ost->vsync_method == VSYNC_AUTO && !(of->format->flags & AVFMT_VARIABLE_FPS)))){
3023 av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3024 "Please consider specifying a lower framerate, a different muxer or "
3025 "setting vsync/fps_mode to vfr\n");
3026 }
3027
3028 enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3029 enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3030 enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3031 ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3032 av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3033 av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3034
3035 enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3036
3037 if (ost->bits_per_raw_sample)
3038 enc_ctx->bits_per_raw_sample = ost->bits_per_raw_sample;
3039 else if (dec_ctx && ost->filter->graph->is_meta)
3040 enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3041 av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3042
3043 if (frame) {
3044 enc_ctx->color_range = frame->color_range;
3045 enc_ctx->color_primaries = frame->color_primaries;
3046 enc_ctx->color_trc = frame->color_trc;
3047 enc_ctx->colorspace = frame->colorspace;
3048 enc_ctx->chroma_sample_location = frame->chroma_location;
3049 }
3050
3051 enc_ctx->framerate = ost->frame_rate;
3052
3053 ost->st->avg_frame_rate = ost->frame_rate;
3054
3055 // Field order: autodetection
3056 if (frame) {
3057 if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3058 ost->top_field_first >= 0)
3059 frame->top_field_first = !!ost->top_field_first;
3060
3061 if (frame->interlaced_frame) {
3062 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3063 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3064 else
3065 enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3066 } else
3067 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3068 }
3069
3070 // Field order: override
3071 if (ost->top_field_first == 0) {
3072 enc_ctx->field_order = AV_FIELD_BB;
3073 } else if (ost->top_field_first == 1) {
3074 enc_ctx->field_order = AV_FIELD_TT;
3075 }
3076
3077 if (ost->forced_keyframes) {
3078 if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3079 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3080 forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3081 if (ret < 0) {
3082 av_log(NULL, AV_LOG_ERROR,
3083 "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3084 return ret;
3085 }
3086 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3087 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3088 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3089 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3090
3091 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3092 // parse it only for static kf timings
3093 } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3094 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3095 }
3096 }
3097 break;
3098 case AVMEDIA_TYPE_SUBTITLE:
3099 enc_ctx->time_base = AV_TIME_BASE_Q;
3100 if (!enc_ctx->width) {
3101 enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3102 enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3103 }
3104 break;
3105 case AVMEDIA_TYPE_DATA:
3106 break;
3107 default:
3108 abort();
3109 break;
3110 }
3111
3112 ost->mux_timebase = enc_ctx->time_base;
3113
3114 return 0;
3115 }
3116
init_output_stream(OutputStream * ost,AVFrame * frame,char * error,int error_len)3117 static int init_output_stream(OutputStream *ost, AVFrame *frame,
3118 char *error, int error_len)
3119 {
3120 int ret = 0;
3121
3122 if (ost->encoding_needed) {
3123 const AVCodec *codec = ost->enc;
3124 AVCodecContext *dec = NULL;
3125 InputStream *ist;
3126
3127 ret = init_output_stream_encode(ost, frame);
3128 if (ret < 0)
3129 return ret;
3130
3131 if ((ist = get_input_stream(ost)))
3132 dec = ist->dec_ctx;
3133 if (dec && dec->subtitle_header) {
3134 /* ASS code assumes this buffer is null terminated so add extra byte. */
3135 ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3136 if (!ost->enc_ctx->subtitle_header)
3137 return AVERROR(ENOMEM);
3138 memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3139 ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3140 }
3141 if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3142 av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3143
3144 ret = hw_device_setup_for_encode(ost);
3145 if (ret < 0) {
3146 snprintf(error, error_len, "Device setup failed for "
3147 "encoder on output stream #%d:%d : %s",
3148 ost->file_index, ost->index, av_err2str(ret));
3149 return ret;
3150 }
3151
3152 if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3153 int input_props = 0, output_props = 0;
3154 AVCodecDescriptor const *input_descriptor =
3155 avcodec_descriptor_get(dec->codec_id);
3156 AVCodecDescriptor const *output_descriptor =
3157 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3158 if (input_descriptor)
3159 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3160 if (output_descriptor)
3161 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3162 if (input_props && output_props && input_props != output_props) {
3163 snprintf(error, error_len,
3164 "Subtitle encoding currently only possible from text to text "
3165 "or bitmap to bitmap");
3166 return AVERROR_INVALIDDATA;
3167 }
3168 }
3169
3170 if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3171 if (ret == AVERROR_EXPERIMENTAL)
3172 abort_codec_experimental(codec, 1);
3173 snprintf(error, error_len,
3174 "Error while opening encoder for output stream #%d:%d - "
3175 "maybe incorrect parameters such as bit_rate, rate, width or height",
3176 ost->file_index, ost->index);
3177 return ret;
3178 }
3179 if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3180 !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3181 av_buffersink_set_frame_size(ost->filter->filter,
3182 ost->enc_ctx->frame_size);
3183 assert_avoptions(ost->encoder_opts);
3184 if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3185 ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3186 av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3187 " It takes bits/s as argument, not kbits/s\n");
3188
3189 ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3190 if (ret < 0) {
3191 av_log(NULL, AV_LOG_FATAL,
3192 "Error initializing the output stream codec context.\n");
3193 exit_program(1);
3194 }
3195
3196 if (ost->enc_ctx->nb_coded_side_data) {
3197 int i;
3198
3199 for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3200 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3201 uint8_t *dst_data;
3202
3203 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3204 if (!dst_data)
3205 return AVERROR(ENOMEM);
3206 memcpy(dst_data, sd_src->data, sd_src->size);
3207 }
3208 }
3209
3210 /*
3211 * Add global input side data. For now this is naive, and copies it
3212 * from the input stream's global side data. All side data should
3213 * really be funneled over AVFrame and libavfilter, then added back to
3214 * packet side data, and then potentially using the first packet for
3215 * global side data.
3216 */
3217 if (ist) {
3218 int i;
3219 for (i = 0; i < ist->st->nb_side_data; i++) {
3220 AVPacketSideData *sd = &ist->st->side_data[i];
3221 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3222 uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3223 if (!dst)
3224 return AVERROR(ENOMEM);
3225 memcpy(dst, sd->data, sd->size);
3226 if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3227 av_display_rotation_set((uint32_t *)dst, 0);
3228 }
3229 }
3230 }
3231
3232 // copy timebase while removing common factors
3233 if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3234 ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3235
3236 // copy estimated duration as a hint to the muxer
3237 if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3238 ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3239 } else if (ost->stream_copy) {
3240 ret = init_output_stream_streamcopy(ost);
3241 if (ret < 0)
3242 return ret;
3243 }
3244
3245 /* initialize bitstream filters for the output stream
3246 * needs to be done here, because the codec id for streamcopy is not
3247 * known until now */
3248 ret = init_output_bsfs(ost);
3249 if (ret < 0)
3250 return ret;
3251
3252 ost->initialized = 1;
3253
3254 ret = of_check_init(output_files[ost->file_index]);
3255 if (ret < 0)
3256 return ret;
3257
3258 return ret;
3259 }
3260
report_new_stream(int input_index,AVPacket * pkt)3261 static void report_new_stream(int input_index, AVPacket *pkt)
3262 {
3263 InputFile *file = input_files[input_index];
3264 AVStream *st = file->ctx->streams[pkt->stream_index];
3265
3266 if (pkt->stream_index < file->nb_streams_warn)
3267 return;
3268 av_log(file->ctx, AV_LOG_WARNING,
3269 "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3270 av_get_media_type_string(st->codecpar->codec_type),
3271 input_index, pkt->stream_index,
3272 pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3273 file->nb_streams_warn = pkt->stream_index + 1;
3274 }
3275
transcode_init(void)3276 static int transcode_init(void)
3277 {
3278 int ret = 0, i, j, k;
3279 AVFormatContext *oc;
3280 OutputStream *ost;
3281 InputStream *ist;
3282 char error[1024] = {0};
3283
3284 for (i = 0; i < nb_filtergraphs; i++) {
3285 FilterGraph *fg = filtergraphs[i];
3286 for (j = 0; j < fg->nb_outputs; j++) {
3287 OutputFilter *ofilter = fg->outputs[j];
3288 if (!ofilter->ost || ofilter->ost->source_index >= 0)
3289 continue;
3290 if (fg->nb_inputs != 1)
3291 continue;
3292 for (k = nb_input_streams-1; k >= 0 ; k--)
3293 if (fg->inputs[0]->ist == input_streams[k])
3294 break;
3295 ofilter->ost->source_index = k;
3296 }
3297 }
3298
3299 /* init framerate emulation */
3300 for (i = 0; i < nb_input_files; i++) {
3301 InputFile *ifile = input_files[i];
3302 if (ifile->readrate || ifile->rate_emu)
3303 for (j = 0; j < ifile->nb_streams; j++)
3304 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3305 }
3306
3307 /* init input streams */
3308 for (i = 0; i < nb_input_streams; i++)
3309 if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3310 for (i = 0; i < nb_output_streams; i++) {
3311 ost = output_streams[i];
3312 avcodec_close(ost->enc_ctx);
3313 }
3314 goto dump_format;
3315 }
3316
3317 /*
3318 * initialize stream copy and subtitle/data streams.
3319 * Encoded AVFrame based streams will get initialized as follows:
3320 * - when the first AVFrame is received in do_video_out
3321 * - just before the first AVFrame is received in either transcode_step
3322 * or reap_filters due to us requiring the filter chain buffer sink
3323 * to be configured with the correct audio frame size, which is only
3324 * known after the encoder is initialized.
3325 */
3326 for (i = 0; i < nb_output_streams; i++) {
3327 if (!output_streams[i]->stream_copy &&
3328 (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3329 output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3330 continue;
3331
3332 ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3333 if (ret < 0)
3334 goto dump_format;
3335 }
3336
3337 /* discard unused programs */
3338 for (i = 0; i < nb_input_files; i++) {
3339 InputFile *ifile = input_files[i];
3340 for (j = 0; j < ifile->ctx->nb_programs; j++) {
3341 AVProgram *p = ifile->ctx->programs[j];
3342 int discard = AVDISCARD_ALL;
3343
3344 for (k = 0; k < p->nb_stream_indexes; k++)
3345 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3346 discard = AVDISCARD_DEFAULT;
3347 break;
3348 }
3349 p->discard = discard;
3350 }
3351 }
3352
3353 /* write headers for files with no streams */
3354 for (i = 0; i < nb_output_files; i++) {
3355 oc = output_files[i]->ctx;
3356 if (output_files[i]->format->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3357 ret = of_check_init(output_files[i]);
3358 if (ret < 0)
3359 goto dump_format;
3360 }
3361 }
3362
3363 dump_format:
3364 /* dump the stream mapping */
3365 av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3366 for (i = 0; i < nb_input_streams; i++) {
3367 ist = input_streams[i];
3368
3369 for (j = 0; j < ist->nb_filters; j++) {
3370 if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3371 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3372 ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3373 ist->filters[j]->name);
3374 if (nb_filtergraphs > 1)
3375 av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3376 av_log(NULL, AV_LOG_INFO, "\n");
3377 }
3378 }
3379 }
3380
3381 for (i = 0; i < nb_output_streams; i++) {
3382 ost = output_streams[i];
3383
3384 if (ost->attachment_filename) {
3385 /* an attached file */
3386 av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3387 ost->attachment_filename, ost->file_index, ost->index);
3388 continue;
3389 }
3390
3391 if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3392 /* output from a complex graph */
3393 av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3394 if (nb_filtergraphs > 1)
3395 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3396
3397 av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3398 ost->index, ost->enc ? ost->enc->name : "?");
3399 continue;
3400 }
3401
3402 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3403 input_streams[ost->source_index]->file_index,
3404 input_streams[ost->source_index]->st->index,
3405 ost->file_index,
3406 ost->index);
3407 if (ost->sync_ist != input_streams[ost->source_index])
3408 av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3409 ost->sync_ist->file_index,
3410 ost->sync_ist->st->index);
3411 if (ost->stream_copy)
3412 av_log(NULL, AV_LOG_INFO, " (copy)");
3413 else {
3414 const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3415 const AVCodec *out_codec = ost->enc;
3416 const char *decoder_name = "?";
3417 const char *in_codec_name = "?";
3418 const char *encoder_name = "?";
3419 const char *out_codec_name = "?";
3420 const AVCodecDescriptor *desc;
3421
3422 if (in_codec) {
3423 decoder_name = in_codec->name;
3424 desc = avcodec_descriptor_get(in_codec->id);
3425 if (desc)
3426 in_codec_name = desc->name;
3427 if (!strcmp(decoder_name, in_codec_name))
3428 decoder_name = "native";
3429 }
3430
3431 if (out_codec) {
3432 encoder_name = out_codec->name;
3433 desc = avcodec_descriptor_get(out_codec->id);
3434 if (desc)
3435 out_codec_name = desc->name;
3436 if (!strcmp(encoder_name, out_codec_name))
3437 encoder_name = "native";
3438 }
3439
3440 av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3441 in_codec_name, decoder_name,
3442 out_codec_name, encoder_name);
3443 }
3444 av_log(NULL, AV_LOG_INFO, "\n");
3445 }
3446
3447 if (ret) {
3448 av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3449 return ret;
3450 }
3451
3452 atomic_store(&transcode_init_done, 1);
3453
3454 return 0;
3455 }
3456
3457 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
need_output(void)3458 static int need_output(void)
3459 {
3460 int i;
3461
3462 for (i = 0; i < nb_output_streams; i++) {
3463 OutputStream *ost = output_streams[i];
3464 OutputFile *of = output_files[ost->file_index];
3465 AVFormatContext *os = output_files[ost->file_index]->ctx;
3466
3467 if (ost->finished ||
3468 (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3469 continue;
3470 if (ost->frame_number >= ost->max_frames) {
3471 int j;
3472 for (j = 0; j < of->ctx->nb_streams; j++)
3473 close_output_stream(output_streams[of->ost_index + j]);
3474 continue;
3475 }
3476
3477 return 1;
3478 }
3479
3480 return 0;
3481 }
3482
3483 /**
3484 * Select the output stream to process.
3485 *
3486 * @return selected output stream, or NULL if none available
3487 */
choose_output(void)3488 static OutputStream *choose_output(void)
3489 {
3490 int i;
3491 int64_t opts_min = INT64_MAX;
3492 OutputStream *ost_min = NULL;
3493
3494 for (i = 0; i < nb_output_streams; i++) {
3495 OutputStream *ost = output_streams[i];
3496 int64_t opts = ost->last_mux_dts == AV_NOPTS_VALUE ? INT64_MIN :
3497 av_rescale_q(ost->last_mux_dts, ost->st->time_base,
3498 AV_TIME_BASE_Q);
3499 if (ost->last_mux_dts == AV_NOPTS_VALUE)
3500 av_log(NULL, AV_LOG_DEBUG,
3501 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3502 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3503
3504 if (!ost->initialized && !ost->inputs_done)
3505 return ost->unavailable ? NULL : ost;
3506
3507 if (!ost->finished && opts < opts_min) {
3508 opts_min = opts;
3509 ost_min = ost->unavailable ? NULL : ost;
3510 }
3511 }
3512 return ost_min;
3513 }
3514
set_tty_echo(int on)3515 static void set_tty_echo(int on)
3516 {
3517 #if HAVE_TERMIOS_H
3518 struct termios tty;
3519 if (tcgetattr(0, &tty) == 0) {
3520 if (on) tty.c_lflag |= ECHO;
3521 else tty.c_lflag &= ~ECHO;
3522 tcsetattr(0, TCSANOW, &tty);
3523 }
3524 #endif
3525 }
3526
check_keyboard_interaction(int64_t cur_time)3527 static int check_keyboard_interaction(int64_t cur_time)
3528 {
3529 int i, ret, key;
3530 static int64_t last_time;
3531 if (received_nb_signals)
3532 return AVERROR_EXIT;
3533 /* read_key() returns 0 on EOF */
3534 if (cur_time - last_time >= 100000) {
3535 key = read_key();
3536 last_time = cur_time;
3537 }else
3538 key = -1;
3539 if (key == 'q') {
3540 av_log(NULL, AV_LOG_INFO, "\n\n[q] command received. Exiting.\n\n");
3541 return AVERROR_EXIT;
3542 }
3543 if (key == '+') av_log_set_level(av_log_get_level()+10);
3544 if (key == '-') av_log_set_level(av_log_get_level()-10);
3545 if (key == 's') qp_hist ^= 1;
3546 if (key == 'h'){
3547 if (do_hex_dump){
3548 do_hex_dump = do_pkt_dump = 0;
3549 } else if(do_pkt_dump){
3550 do_hex_dump = 1;
3551 } else
3552 do_pkt_dump = 1;
3553 av_log_set_level(AV_LOG_DEBUG);
3554 }
3555 if (key == 'c' || key == 'C'){
3556 char buf[4096], target[64], command[256], arg[256] = {0};
3557 double time;
3558 int k, n = 0;
3559 fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3560 i = 0;
3561 set_tty_echo(1);
3562 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3563 if (k > 0)
3564 buf[i++] = k;
3565 buf[i] = 0;
3566 set_tty_echo(0);
3567 fprintf(stderr, "\n");
3568 if (k > 0 &&
3569 (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3570 av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3571 target, time, command, arg);
3572 for (i = 0; i < nb_filtergraphs; i++) {
3573 FilterGraph *fg = filtergraphs[i];
3574 if (fg->graph) {
3575 if (time < 0) {
3576 ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3577 key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3578 fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3579 } else if (key == 'c') {
3580 fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3581 ret = AVERROR_PATCHWELCOME;
3582 } else {
3583 ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3584 if (ret < 0)
3585 fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3586 }
3587 }
3588 }
3589 } else {
3590 av_log(NULL, AV_LOG_ERROR,
3591 "Parse error, at least 3 arguments were expected, "
3592 "only %d given in string '%s'\n", n, buf);
3593 }
3594 }
3595 if (key == 'd' || key == 'D'){
3596 int debug=0;
3597 if(key == 'D') {
3598 debug = input_streams[0]->dec_ctx->debug << 1;
3599 if(!debug) debug = 1;
3600 while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
3601 debug += debug;
3602 }else{
3603 char buf[32];
3604 int k = 0;
3605 i = 0;
3606 set_tty_echo(1);
3607 while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3608 if (k > 0)
3609 buf[i++] = k;
3610 buf[i] = 0;
3611 set_tty_echo(0);
3612 fprintf(stderr, "\n");
3613 if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3614 fprintf(stderr,"error parsing debug value\n");
3615 }
3616 for(i=0;i<nb_input_streams;i++) {
3617 input_streams[i]->dec_ctx->debug = debug;
3618 }
3619 for(i=0;i<nb_output_streams;i++) {
3620 OutputStream *ost = output_streams[i];
3621 ost->enc_ctx->debug = debug;
3622 }
3623 if(debug) av_log_set_level(AV_LOG_DEBUG);
3624 fprintf(stderr,"debug=%d\n", debug);
3625 }
3626 if (key == '?'){
3627 fprintf(stderr, "key function\n"
3628 "? show this help\n"
3629 "+ increase verbosity\n"
3630 "- decrease verbosity\n"
3631 "c Send command to first matching filter supporting it\n"
3632 "C Send/Queue command to all matching filters\n"
3633 "D cycle through available debug modes\n"
3634 "h dump packets/hex press to cycle through the 3 states\n"
3635 "q quit\n"
3636 "s Show QP histogram\n"
3637 );
3638 }
3639 return 0;
3640 }
3641
3642 #if HAVE_THREADS
input_thread(void * arg)3643 static void *input_thread(void *arg)
3644 {
3645 InputFile *f = arg;
3646 AVPacket *pkt = f->pkt, *queue_pkt;
3647 unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3648 int ret = 0;
3649
3650 while (1) {
3651 ret = av_read_frame(f->ctx, pkt);
3652
3653 if (ret == AVERROR(EAGAIN)) {
3654 av_usleep(10000);
3655 continue;
3656 }
3657 if (ret < 0) {
3658 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3659 break;
3660 }
3661 queue_pkt = av_packet_alloc();
3662 if (!queue_pkt) {
3663 av_packet_unref(pkt);
3664 av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
3665 break;
3666 }
3667 av_packet_move_ref(queue_pkt, pkt);
3668 ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
3669 if (flags && ret == AVERROR(EAGAIN)) {
3670 flags = 0;
3671 ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
3672 av_log(f->ctx, AV_LOG_WARNING,
3673 "Thread message queue blocking; consider raising the "
3674 "thread_queue_size option (current value: %d)\n",
3675 f->thread_queue_size);
3676 }
3677 if (ret < 0) {
3678 if (ret != AVERROR_EOF)
3679 av_log(f->ctx, AV_LOG_ERROR,
3680 "Unable to send packet to main thread: %s\n",
3681 av_err2str(ret));
3682 av_packet_free(&queue_pkt);
3683 av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
3684 break;
3685 }
3686 }
3687
3688 return NULL;
3689 }
3690
free_input_thread(int i)3691 static void free_input_thread(int i)
3692 {
3693 InputFile *f = input_files[i];
3694 AVPacket *pkt;
3695
3696 if (!f || !f->in_thread_queue)
3697 return;
3698 av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
3699 while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
3700 av_packet_free(&pkt);
3701
3702 pthread_join(f->thread, NULL);
3703 f->joined = 1;
3704 av_thread_message_queue_free(&f->in_thread_queue);
3705 }
3706
free_input_threads(void)3707 static void free_input_threads(void)
3708 {
3709 int i;
3710
3711 for (i = 0; i < nb_input_files; i++)
3712 free_input_thread(i);
3713 }
3714
init_input_thread(int i)3715 static int init_input_thread(int i)
3716 {
3717 int ret;
3718 InputFile *f = input_files[i];
3719
3720 if (f->thread_queue_size < 0)
3721 f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
3722 if (!f->thread_queue_size)
3723 return 0;
3724
3725 if (f->ctx->pb ? !f->ctx->pb->seekable :
3726 strcmp(f->ctx->iformat->name, "lavfi"))
3727 f->non_blocking = 1;
3728 ret = av_thread_message_queue_alloc(&f->in_thread_queue,
3729 f->thread_queue_size, sizeof(f->pkt));
3730 if (ret < 0)
3731 return ret;
3732
3733 if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
3734 av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
3735 av_thread_message_queue_free(&f->in_thread_queue);
3736 return AVERROR(ret);
3737 }
3738
3739 return 0;
3740 }
3741
init_input_threads(void)3742 static int init_input_threads(void)
3743 {
3744 int i, ret;
3745
3746 for (i = 0; i < nb_input_files; i++) {
3747 ret = init_input_thread(i);
3748 if (ret < 0)
3749 return ret;
3750 }
3751 return 0;
3752 }
3753
get_input_packet_mt(InputFile * f,AVPacket ** pkt)3754 static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
3755 {
3756 return av_thread_message_queue_recv(f->in_thread_queue, pkt,
3757 f->non_blocking ?
3758 AV_THREAD_MESSAGE_NONBLOCK : 0);
3759 }
3760 #endif
3761
get_input_packet(InputFile * f,AVPacket ** pkt)3762 static int get_input_packet(InputFile *f, AVPacket **pkt)
3763 {
3764 if (f->readrate || f->rate_emu) {
3765 int i;
3766 int64_t file_start = copy_ts * (
3767 (f->ctx->start_time != AV_NOPTS_VALUE ? f->ctx->start_time * !start_at_zero : 0) +
3768 (f->start_time != AV_NOPTS_VALUE ? f->start_time : 0)
3769 );
3770 float scale = f->rate_emu ? 1.0 : f->readrate;
3771 for (i = 0; i < f->nb_streams; i++) {
3772 InputStream *ist = input_streams[f->ist_index + i];
3773 int64_t stream_ts_offset, pts, now;
3774 if (!ist->nb_packets || (ist->decoding_needed && !ist->got_output)) continue;
3775 stream_ts_offset = FFMAX(ist->first_dts != AV_NOPTS_VALUE ? ist->first_dts : 0, file_start);
3776 pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
3777 now = (av_gettime_relative() - ist->start) * scale + stream_ts_offset;
3778 if (pts > now)
3779 return AVERROR(EAGAIN);
3780 }
3781 }
3782
3783 #if HAVE_THREADS
3784 if (f->thread_queue_size)
3785 return get_input_packet_mt(f, pkt);
3786 #endif
3787 *pkt = f->pkt;
3788 return av_read_frame(f->ctx, *pkt);
3789 }
3790
got_eagain(void)3791 static int got_eagain(void)
3792 {
3793 int i;
3794 for (i = 0; i < nb_output_streams; i++)
3795 if (output_streams[i]->unavailable)
3796 return 1;
3797 return 0;
3798 }
3799
reset_eagain(void)3800 static void reset_eagain(void)
3801 {
3802 int i;
3803 for (i = 0; i < nb_input_files; i++)
3804 input_files[i]->eagain = 0;
3805 for (i = 0; i < nb_output_streams; i++)
3806 output_streams[i]->unavailable = 0;
3807 }
3808
3809 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
duration_max(int64_t tmp,int64_t * duration,AVRational tmp_time_base,AVRational time_base)3810 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
3811 AVRational time_base)
3812 {
3813 int ret;
3814
3815 if (!*duration) {
3816 *duration = tmp;
3817 return tmp_time_base;
3818 }
3819
3820 ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
3821 if (ret < 0) {
3822 *duration = tmp;
3823 return tmp_time_base;
3824 }
3825
3826 return time_base;
3827 }
3828
seek_to_start(InputFile * ifile,AVFormatContext * is)3829 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
3830 {
3831 InputStream *ist;
3832 AVCodecContext *avctx;
3833 int i, ret, has_audio = 0;
3834 int64_t duration = 0;
3835
3836 ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
3837 if (ret < 0)
3838 return ret;
3839
3840 for (i = 0; i < ifile->nb_streams; i++) {
3841 ist = input_streams[ifile->ist_index + i];
3842 avctx = ist->dec_ctx;
3843
3844 /* duration is the length of the last frame in a stream
3845 * when audio stream is present we don't care about
3846 * last video frame length because it's not defined exactly */
3847 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
3848 has_audio = 1;
3849 }
3850
3851 for (i = 0; i < ifile->nb_streams; i++) {
3852 ist = input_streams[ifile->ist_index + i];
3853 avctx = ist->dec_ctx;
3854
3855 if (has_audio) {
3856 if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
3857 AVRational sample_rate = {1, avctx->sample_rate};
3858
3859 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
3860 } else {
3861 continue;
3862 }
3863 } else {
3864 if (ist->framerate.num) {
3865 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
3866 } else if (ist->st->avg_frame_rate.num) {
3867 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
3868 } else {
3869 duration = 1;
3870 }
3871 }
3872 if (!ifile->duration)
3873 ifile->time_base = ist->st->time_base;
3874 /* the total duration of the stream, max_pts - min_pts is
3875 * the duration of the stream without the last frame */
3876 if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
3877 duration += ist->max_pts - ist->min_pts;
3878 ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
3879 ifile->time_base);
3880 }
3881
3882 if (ifile->loop > 0)
3883 ifile->loop--;
3884
3885 return ret;
3886 }
3887
3888 /*
3889 * Return
3890 * - 0 -- one packet was read and processed
3891 * - AVERROR(EAGAIN) -- no packets were available for selected file,
3892 * this function should be called again
3893 * - AVERROR_EOF -- this function should not be called again
3894 */
process_input(int file_index)3895 static int process_input(int file_index)
3896 {
3897 InputFile *ifile = input_files[file_index];
3898 AVFormatContext *is;
3899 InputStream *ist;
3900 AVPacket *pkt;
3901 int ret, thread_ret, i, j;
3902 int64_t duration;
3903 int64_t pkt_dts;
3904 int disable_discontinuity_correction = copy_ts;
3905
3906 is = ifile->ctx;
3907 ret = get_input_packet(ifile, &pkt);
3908
3909 if (ret == AVERROR(EAGAIN)) {
3910 ifile->eagain = 1;
3911 return ret;
3912 }
3913 if (ret < 0 && ifile->loop) {
3914 AVCodecContext *avctx;
3915 for (i = 0; i < ifile->nb_streams; i++) {
3916 ist = input_streams[ifile->ist_index + i];
3917 avctx = ist->dec_ctx;
3918 if (ist->processing_needed) {
3919 ret = process_input_packet(ist, NULL, 1);
3920 if (ret>0)
3921 return 0;
3922 if (ist->decoding_needed)
3923 avcodec_flush_buffers(avctx);
3924 }
3925 }
3926 #if HAVE_THREADS
3927 free_input_thread(file_index);
3928 #endif
3929 ret = seek_to_start(ifile, is);
3930 #if HAVE_THREADS
3931 thread_ret = init_input_thread(file_index);
3932 if (thread_ret < 0)
3933 return thread_ret;
3934 #endif
3935 if (ret < 0)
3936 av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
3937 else
3938 ret = get_input_packet(ifile, &pkt);
3939 if (ret == AVERROR(EAGAIN)) {
3940 ifile->eagain = 1;
3941 return ret;
3942 }
3943 }
3944 if (ret < 0) {
3945 if (ret != AVERROR_EOF) {
3946 print_error(is->url, ret);
3947 if (exit_on_error)
3948 exit_program(1);
3949 }
3950
3951 for (i = 0; i < ifile->nb_streams; i++) {
3952 ist = input_streams[ifile->ist_index + i];
3953 if (ist->processing_needed) {
3954 ret = process_input_packet(ist, NULL, 0);
3955 if (ret>0)
3956 return 0;
3957 }
3958
3959 /* mark all outputs that don't go through lavfi as finished */
3960 for (j = 0; j < nb_output_streams; j++) {
3961 OutputStream *ost = output_streams[j];
3962
3963 if (ost->source_index == ifile->ist_index + i &&
3964 (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
3965 finish_output_stream(ost);
3966 }
3967 }
3968
3969 ifile->eof_reached = 1;
3970 return AVERROR(EAGAIN);
3971 }
3972
3973 reset_eagain();
3974
3975 if (do_pkt_dump) {
3976 av_pkt_dump_log2(NULL, AV_LOG_INFO, pkt, do_hex_dump,
3977 is->streams[pkt->stream_index]);
3978 }
3979 /* the following test is needed in case new streams appear
3980 dynamically in stream : we ignore them */
3981 if (pkt->stream_index >= ifile->nb_streams) {
3982 report_new_stream(file_index, pkt);
3983 goto discard_packet;
3984 }
3985
3986 ist = input_streams[ifile->ist_index + pkt->stream_index];
3987
3988 ist->data_size += pkt->size;
3989 ist->nb_packets++;
3990
3991 if (ist->discard)
3992 goto discard_packet;
3993
3994 if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
3995 av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
3996 "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
3997 if (exit_on_error)
3998 exit_program(1);
3999 }
4000
4001 if (debug_ts) {
4002 av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4003 "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
4004 ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4005 av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4006 av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4007 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4008 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4009 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &ist->st->time_base),
4010 av_ts2str(input_files[ist->file_index]->ts_offset),
4011 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4012 }
4013
4014 if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4015 int64_t stime, stime2;
4016 // Correcting starttime based on the enabled streams
4017 // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4018 // so we instead do it here as part of discontinuity handling
4019 if ( ist->next_dts == AV_NOPTS_VALUE
4020 && ifile->ts_offset == -is->start_time
4021 && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4022 int64_t new_start_time = INT64_MAX;
4023 for (i=0; i<is->nb_streams; i++) {
4024 AVStream *st = is->streams[i];
4025 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4026 continue;
4027 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4028 }
4029 if (new_start_time > is->start_time) {
4030 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4031 ifile->ts_offset = -new_start_time;
4032 }
4033 }
4034
4035 stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4036 stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4037 ist->wrap_correction_done = 1;
4038
4039 if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4040 pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
4041 ist->wrap_correction_done = 0;
4042 }
4043 if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4044 pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
4045 ist->wrap_correction_done = 0;
4046 }
4047 }
4048
4049 /* add the stream-global side data to the first packet */
4050 if (ist->nb_packets == 1) {
4051 for (i = 0; i < ist->st->nb_side_data; i++) {
4052 AVPacketSideData *src_sd = &ist->st->side_data[i];
4053 uint8_t *dst_data;
4054
4055 if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4056 continue;
4057
4058 if (av_packet_get_side_data(pkt, src_sd->type, NULL))
4059 continue;
4060
4061 dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
4062 if (!dst_data)
4063 exit_program(1);
4064
4065 memcpy(dst_data, src_sd->data, src_sd->size);
4066 }
4067 }
4068
4069 if (pkt->dts != AV_NOPTS_VALUE)
4070 pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4071 if (pkt->pts != AV_NOPTS_VALUE)
4072 pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4073
4074 if (pkt->pts != AV_NOPTS_VALUE)
4075 pkt->pts *= ist->ts_scale;
4076 if (pkt->dts != AV_NOPTS_VALUE)
4077 pkt->dts *= ist->ts_scale;
4078
4079 pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4080 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4081 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4082 pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4083 && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4084 int64_t delta = pkt_dts - ifile->last_ts;
4085 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4086 delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4087 ifile->ts_offset -= delta;
4088 av_log(NULL, AV_LOG_DEBUG,
4089 "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4090 delta, ifile->ts_offset);
4091 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4092 if (pkt->pts != AV_NOPTS_VALUE)
4093 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4094 }
4095 }
4096
4097 duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4098 if (pkt->pts != AV_NOPTS_VALUE) {
4099 pkt->pts += duration;
4100 ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
4101 ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
4102 }
4103
4104 if (pkt->dts != AV_NOPTS_VALUE)
4105 pkt->dts += duration;
4106
4107 pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4108
4109 if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4110 (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4111 int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
4112 ist->st->time_base, AV_TIME_BASE_Q,
4113 AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4114 if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4115 disable_discontinuity_correction = 0;
4116 }
4117
4118 if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4119 ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4120 pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4121 !disable_discontinuity_correction) {
4122 int64_t delta = pkt_dts - ist->next_dts;
4123 if (is->iformat->flags & AVFMT_TS_DISCONT) {
4124 if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4125 delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4126 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4127 ifile->ts_offset -= delta;
4128 av_log(NULL, AV_LOG_DEBUG,
4129 "timestamp discontinuity for stream #%d:%d "
4130 "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4131 ist->file_index, ist->st->index, ist->st->id,
4132 av_get_media_type_string(ist->dec_ctx->codec_type),
4133 delta, ifile->ts_offset);
4134 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4135 if (pkt->pts != AV_NOPTS_VALUE)
4136 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4137 }
4138 } else {
4139 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4140 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4141 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
4142 pkt->dts = AV_NOPTS_VALUE;
4143 }
4144 if (pkt->pts != AV_NOPTS_VALUE){
4145 int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
4146 delta = pkt_pts - ist->next_dts;
4147 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4148 delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4149 av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
4150 pkt->pts = AV_NOPTS_VALUE;
4151 }
4152 }
4153 }
4154 }
4155
4156 if (pkt->dts != AV_NOPTS_VALUE)
4157 ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
4158
4159 if (debug_ts) {
4160 av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s duration:%s duration_time:%s off:%s off_time:%s\n",
4161 ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4162 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4163 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4164 av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, &ist->st->time_base),
4165 av_ts2str(input_files[ist->file_index]->ts_offset),
4166 av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4167 }
4168
4169 sub2video_heartbeat(ist, pkt->pts);
4170
4171 process_input_packet(ist, pkt, 0);
4172
4173 discard_packet:
4174 #if HAVE_THREADS
4175 if (ifile->thread_queue_size)
4176 av_packet_free(&pkt);
4177 else
4178 #endif
4179 av_packet_unref(pkt);
4180
4181 return 0;
4182 }
4183
4184 /**
4185 * Perform a step of transcoding for the specified filter graph.
4186 *
4187 * @param[in] graph filter graph to consider
4188 * @param[out] best_ist input stream where a frame would allow to continue
4189 * @return 0 for success, <0 for error
4190 */
transcode_from_filter(FilterGraph * graph,InputStream ** best_ist)4191 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4192 {
4193 int i, ret;
4194 int nb_requests, nb_requests_max = 0;
4195 InputFilter *ifilter;
4196 InputStream *ist;
4197
4198 *best_ist = NULL;
4199 ret = avfilter_graph_request_oldest(graph->graph);
4200 if (ret >= 0)
4201 return reap_filters(0);
4202
4203 if (ret == AVERROR_EOF) {
4204 ret = reap_filters(1);
4205 for (i = 0; i < graph->nb_outputs; i++)
4206 close_output_stream(graph->outputs[i]->ost);
4207 return ret;
4208 }
4209 if (ret != AVERROR(EAGAIN))
4210 return ret;
4211
4212 for (i = 0; i < graph->nb_inputs; i++) {
4213 ifilter = graph->inputs[i];
4214 ist = ifilter->ist;
4215 if (input_files[ist->file_index]->eagain ||
4216 input_files[ist->file_index]->eof_reached)
4217 continue;
4218 nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4219 if (nb_requests > nb_requests_max) {
4220 nb_requests_max = nb_requests;
4221 *best_ist = ist;
4222 }
4223 }
4224
4225 if (!*best_ist)
4226 for (i = 0; i < graph->nb_outputs; i++)
4227 graph->outputs[i]->ost->unavailable = 1;
4228
4229 return 0;
4230 }
4231
4232 /**
4233 * Run a single step of transcoding.
4234 *
4235 * @return 0 for success, <0 for error
4236 */
transcode_step(void)4237 static int transcode_step(void)
4238 {
4239 OutputStream *ost;
4240 InputStream *ist = NULL;
4241 int ret;
4242
4243 ost = choose_output();
4244 if (!ost) {
4245 if (got_eagain()) {
4246 reset_eagain();
4247 av_usleep(10000);
4248 return 0;
4249 }
4250 av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4251 return AVERROR_EOF;
4252 }
4253
4254 if (ost->filter && !ost->filter->graph->graph) {
4255 if (ifilter_has_all_input_formats(ost->filter->graph)) {
4256 ret = configure_filtergraph(ost->filter->graph);
4257 if (ret < 0) {
4258 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4259 return ret;
4260 }
4261 }
4262 }
4263
4264 if (ost->filter && ost->filter->graph->graph) {
4265 /*
4266 * Similar case to the early audio initialization in reap_filters.
4267 * Audio is special in ffmpeg.c currently as we depend on lavfi's
4268 * audio frame buffering/creation to get the output audio frame size
4269 * in samples correct. The audio frame size for the filter chain is
4270 * configured during the output stream initialization.
4271 *
4272 * Apparently avfilter_graph_request_oldest (called in
4273 * transcode_from_filter just down the line) peeks. Peeking already
4274 * puts one frame "ready to be given out", which means that any
4275 * update in filter buffer sink configuration afterwards will not
4276 * help us. And yes, even if it would be utilized,
4277 * av_buffersink_get_samples is affected, as it internally utilizes
4278 * the same early exit for peeked frames.
4279 *
4280 * In other words, if avfilter_graph_request_oldest would not make
4281 * further filter chain configuration or usage of
4282 * av_buffersink_get_samples useless (by just causing the return
4283 * of the peeked AVFrame as-is), we could get rid of this additional
4284 * early encoder initialization.
4285 */
4286 if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4287 init_output_stream_wrapper(ost, NULL, 1);
4288
4289 if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4290 return ret;
4291 if (!ist)
4292 return 0;
4293 } else if (ost->filter) {
4294 int i;
4295 for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4296 InputFilter *ifilter = ost->filter->graph->inputs[i];
4297 if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4298 ist = ifilter->ist;
4299 break;
4300 }
4301 }
4302 if (!ist) {
4303 ost->inputs_done = 1;
4304 return 0;
4305 }
4306 } else {
4307 av_assert0(ost->source_index >= 0);
4308 ist = input_streams[ost->source_index];
4309 }
4310
4311 ret = process_input(ist->file_index);
4312 if (ret == AVERROR(EAGAIN)) {
4313 if (input_files[ist->file_index]->eagain)
4314 ost->unavailable = 1;
4315 return 0;
4316 }
4317
4318 if (ret < 0)
4319 return ret == AVERROR_EOF ? 0 : ret;
4320
4321 return reap_filters(0);
4322 }
4323
4324 /*
4325 * The following code is the main loop of the file converter
4326 */
transcode(void)4327 static int transcode(void)
4328 {
4329 int ret, i;
4330 AVFormatContext *os;
4331 OutputStream *ost;
4332 InputStream *ist;
4333 int64_t timer_start;
4334 int64_t total_packets_written = 0;
4335
4336 ret = transcode_init();
4337 if (ret < 0)
4338 goto fail;
4339
4340 if (stdin_interaction) {
4341 av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4342 }
4343
4344 timer_start = av_gettime_relative();
4345
4346 #if HAVE_THREADS
4347 if ((ret = init_input_threads()) < 0)
4348 goto fail;
4349 #endif
4350
4351 while (!received_sigterm) {
4352 int64_t cur_time= av_gettime_relative();
4353
4354 /* if 'q' pressed, exits */
4355 if (stdin_interaction)
4356 if (check_keyboard_interaction(cur_time) < 0)
4357 break;
4358
4359 /* check if there's any stream where output is still needed */
4360 if (!need_output()) {
4361 av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4362 break;
4363 }
4364
4365 ret = transcode_step();
4366 if (ret < 0 && ret != AVERROR_EOF) {
4367 av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4368 break;
4369 }
4370
4371 /* dump report by using the output first video and audio streams */
4372 print_report(0, timer_start, cur_time);
4373 }
4374 #if HAVE_THREADS
4375 free_input_threads();
4376 #endif
4377
4378 /* at the end of stream, we must flush the decoder buffers */
4379 for (i = 0; i < nb_input_streams; i++) {
4380 ist = input_streams[i];
4381 if (!input_files[ist->file_index]->eof_reached) {
4382 process_input_packet(ist, NULL, 0);
4383 }
4384 }
4385 flush_encoders();
4386
4387 term_exit();
4388
4389 /* write the trailer if needed */
4390 for (i = 0; i < nb_output_files; i++) {
4391 ret = of_write_trailer(output_files[i]);
4392 if (ret < 0 && exit_on_error)
4393 exit_program(1);
4394 }
4395
4396 /* dump report by using the first video and audio streams */
4397 print_report(1, timer_start, av_gettime_relative());
4398
4399 /* close the output files */
4400 for (i = 0; i < nb_output_files; i++) {
4401 os = output_files[i]->ctx;
4402 if (os && os->oformat && !(os->oformat->flags & AVFMT_NOFILE)) {
4403 if ((ret = avio_closep(&os->pb)) < 0) {
4404 av_log(NULL, AV_LOG_ERROR, "Error closing file %s: %s\n", os->url, av_err2str(ret));
4405 if (exit_on_error)
4406 exit_program(1);
4407 }
4408 }
4409 }
4410
4411 /* close each encoder */
4412 for (i = 0; i < nb_output_streams; i++) {
4413 ost = output_streams[i];
4414 if (ost->encoding_needed) {
4415 av_freep(&ost->enc_ctx->stats_in);
4416 }
4417 total_packets_written += ost->packets_written;
4418 if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4419 av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4420 exit_program(1);
4421 }
4422 }
4423
4424 if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4425 av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4426 exit_program(1);
4427 }
4428
4429 /* close each decoder */
4430 for (i = 0; i < nb_input_streams; i++) {
4431 ist = input_streams[i];
4432 if (ist->decoding_needed) {
4433 avcodec_close(ist->dec_ctx);
4434 if (ist->hwaccel_uninit)
4435 ist->hwaccel_uninit(ist->dec_ctx);
4436 }
4437 }
4438
4439 hw_device_free_all();
4440
4441 /* finished ! */
4442 ret = 0;
4443
4444 fail:
4445 #if HAVE_THREADS
4446 free_input_threads();
4447 #endif
4448
4449 if (output_streams) {
4450 for (i = 0; i < nb_output_streams; i++) {
4451 ost = output_streams[i];
4452 if (ost) {
4453 if (ost->logfile) {
4454 if (fclose(ost->logfile))
4455 av_log(NULL, AV_LOG_ERROR,
4456 "Error closing logfile, loss of information possible: %s\n",
4457 av_err2str(AVERROR(errno)));
4458 ost->logfile = NULL;
4459 }
4460 av_freep(&ost->forced_kf_pts);
4461 av_freep(&ost->apad);
4462 av_freep(&ost->disposition);
4463 av_dict_free(&ost->encoder_opts);
4464 av_dict_free(&ost->sws_dict);
4465 av_dict_free(&ost->swr_opts);
4466 }
4467 }
4468 }
4469 return ret;
4470 }
4471
get_benchmark_time_stamps(void)4472 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4473 {
4474 BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4475 #if HAVE_GETRUSAGE
4476 struct rusage rusage;
4477
4478 getrusage(RUSAGE_SELF, &rusage);
4479 time_stamps.user_usec =
4480 (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4481 time_stamps.sys_usec =
4482 (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4483 #elif HAVE_GETPROCESSTIMES
4484 HANDLE proc;
4485 FILETIME c, e, k, u;
4486 proc = GetCurrentProcess();
4487 GetProcessTimes(proc, &c, &e, &k, &u);
4488 time_stamps.user_usec =
4489 ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4490 time_stamps.sys_usec =
4491 ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4492 #else
4493 time_stamps.user_usec = time_stamps.sys_usec = 0;
4494 #endif
4495 return time_stamps;
4496 }
4497
getmaxrss(void)4498 static int64_t getmaxrss(void)
4499 {
4500 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4501 struct rusage rusage;
4502 getrusage(RUSAGE_SELF, &rusage);
4503 return (int64_t)rusage.ru_maxrss * 1024;
4504 #elif HAVE_GETPROCESSMEMORYINFO
4505 HANDLE proc;
4506 PROCESS_MEMORY_COUNTERS memcounters;
4507 proc = GetCurrentProcess();
4508 memcounters.cb = sizeof(memcounters);
4509 GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4510 return memcounters.PeakPagefileUsage;
4511 #else
4512 return 0;
4513 #endif
4514 }
4515
main(int argc,char ** argv)4516 int main(int argc, char **argv)
4517 {
4518 int i, ret;
4519 BenchmarkTimeStamps ti;
4520
4521 init_dynload();
4522
4523 register_exit(ffmpeg_cleanup);
4524
4525 setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4526
4527 av_log_set_flags(AV_LOG_SKIP_REPEATED);
4528 parse_loglevel(argc, argv, options);
4529
4530 #if CONFIG_AVDEVICE
4531 avdevice_register_all();
4532 #endif
4533 avformat_network_init();
4534
4535 show_banner(argc, argv, options);
4536
4537 /* parse options and open all input/output files */
4538 ret = ffmpeg_parse_options(argc, argv);
4539 if (ret < 0)
4540 exit_program(1);
4541
4542 if (nb_output_files <= 0 && nb_input_files == 0) {
4543 show_usage();
4544 av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4545 exit_program(1);
4546 }
4547
4548 /* file converter / grab */
4549 if (nb_output_files <= 0) {
4550 av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4551 exit_program(1);
4552 }
4553
4554 for (i = 0; i < nb_output_files; i++) {
4555 if (strcmp(output_files[i]->format->name, "rtp"))
4556 want_sdp = 0;
4557 }
4558
4559 current_time = ti = get_benchmark_time_stamps();
4560 if (transcode() < 0)
4561 exit_program(1);
4562 if (do_benchmark) {
4563 int64_t utime, stime, rtime;
4564 current_time = get_benchmark_time_stamps();
4565 utime = current_time.user_usec - ti.user_usec;
4566 stime = current_time.sys_usec - ti.sys_usec;
4567 rtime = current_time.real_usec - ti.real_usec;
4568 av_log(NULL, AV_LOG_INFO,
4569 "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4570 utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4571 }
4572 av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4573 decode_error_stat[0], decode_error_stat[1]);
4574 if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
4575 exit_program(69);
4576
4577 exit_program(received_nb_signals ? 255 : main_return_code);
4578 return main_return_code;
4579 }
4580