• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswresample/swresample.h"
46 #include "libavutil/opt.h"
47 #include "libavutil/channel_layout.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
65 #include "libavutil/threadmessage.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115     "n",
116     "n_forced",
117     "prev_forced_n",
118     "prev_forced_t",
119     "t",
120     NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124     int64_t real_usec;
125     int64_t user_usec;
126     int64_t sys_usec;
127 } BenchmarkTimeStamps;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
130 static BenchmarkTimeStamps get_benchmark_time_stamps(void);
131 static int64_t getmaxrss(void);
132 static int ifilter_has_all_input_formats(FilterGraph *fg);
133 
134 static int run_as_daemon  = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 static unsigned nb_output_dumped = 0;
140 
141 static int want_sdp = 1;
142 
143 static BenchmarkTimeStamps current_time;
144 AVIOContext *progress_avio = NULL;
145 
146 static uint8_t *subtitle_out;
147 
148 InputStream **input_streams = NULL;
149 int        nb_input_streams = 0;
150 InputFile   **input_files   = NULL;
151 int        nb_input_files   = 0;
152 
153 OutputStream **output_streams = NULL;
154 int         nb_output_streams = 0;
155 OutputFile   **output_files   = NULL;
156 int         nb_output_files   = 0;
157 
158 FilterGraph **filtergraphs;
159 int        nb_filtergraphs;
160 
161 #if HAVE_TERMIOS_H
162 
163 /* init terminal so that we can grab keys */
164 static struct termios oldtty;
165 static int restore_tty;
166 #endif
167 
168 #if HAVE_THREADS
169 static void free_input_threads(void);
170 #endif
171 
172 /* sub2video hack:
173    Convert subtitles to video with alpha to insert them in filter graphs.
174    This is a temporary solution until libavfilter gets real subtitles support.
175  */
176 
sub2video_get_blank_frame(InputStream * ist)177 static int sub2video_get_blank_frame(InputStream *ist)
178 {
179     int ret;
180     AVFrame *frame = ist->sub2video.frame;
181 
182     av_frame_unref(frame);
183     ist->sub2video.frame->width  = ist->dec_ctx->width  ? ist->dec_ctx->width  : ist->sub2video.w;
184     ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185     ist->sub2video.frame->format = AV_PIX_FMT_RGB32;
186     if ((ret = av_frame_get_buffer(frame, 0)) < 0)
187         return ret;
188     memset(frame->data[0], 0, frame->height * frame->linesize[0]);
189     return 0;
190 }
191 
sub2video_copy_rect(uint8_t * dst,int dst_linesize,int w,int h,AVSubtitleRect * r)192 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
193                                 AVSubtitleRect *r)
194 {
195     uint32_t *pal, *dst2;
196     uint8_t *src, *src2;
197     int x, y;
198 
199     if (r->type != SUBTITLE_BITMAP) {
200         av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
201         return;
202     }
203     if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
204         av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
205             r->x, r->y, r->w, r->h, w, h
206         );
207         return;
208     }
209 
210     dst += r->y * dst_linesize + r->x * 4;
211     src = r->data[0];
212     pal = (uint32_t *)r->data[1];
213     for (y = 0; y < r->h; y++) {
214         dst2 = (uint32_t *)dst;
215         src2 = src;
216         for (x = 0; x < r->w; x++)
217             *(dst2++) = pal[*(src2++)];
218         dst += dst_linesize;
219         src += r->linesize[0];
220     }
221 }
222 
sub2video_push_ref(InputStream * ist,int64_t pts)223 static void sub2video_push_ref(InputStream *ist, int64_t pts)
224 {
225     AVFrame *frame = ist->sub2video.frame;
226     int i;
227     int ret;
228 
229     av_assert1(frame->data[0]);
230     ist->sub2video.last_pts = frame->pts = pts;
231     for (i = 0; i < ist->nb_filters; i++) {
232         ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
233                                            AV_BUFFERSRC_FLAG_KEEP_REF |
234                                            AV_BUFFERSRC_FLAG_PUSH);
235         if (ret != AVERROR_EOF && ret < 0)
236             av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
237                    av_err2str(ret));
238     }
239 }
240 
sub2video_update(InputStream * ist,int64_t heartbeat_pts,AVSubtitle * sub)241 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
242 {
243     AVFrame *frame = ist->sub2video.frame;
244     int8_t *dst;
245     int     dst_linesize;
246     int num_rects, i;
247     int64_t pts, end_pts;
248 
249     if (!frame)
250         return;
251     if (sub) {
252         pts       = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
253                                  AV_TIME_BASE_Q, ist->st->time_base);
254         end_pts   = av_rescale_q(sub->pts + sub->end_display_time   * 1000LL,
255                                  AV_TIME_BASE_Q, ist->st->time_base);
256         num_rects = sub->num_rects;
257     } else {
258         /* If we are initializing the system, utilize current heartbeat
259            PTS as the start time, and show until the following subpicture
260            is received. Otherwise, utilize the previous subpicture's end time
261            as the fall-back value. */
262         pts       = ist->sub2video.initialize ?
263                     heartbeat_pts : ist->sub2video.end_pts;
264         end_pts   = INT64_MAX;
265         num_rects = 0;
266     }
267     if (sub2video_get_blank_frame(ist) < 0) {
268         av_log(ist->dec_ctx, AV_LOG_ERROR,
269                "Impossible to get a blank canvas.\n");
270         return;
271     }
272     dst          = frame->data    [0];
273     dst_linesize = frame->linesize[0];
274     for (i = 0; i < num_rects; i++)
275         sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
276     sub2video_push_ref(ist, pts);
277     ist->sub2video.end_pts = end_pts;
278     ist->sub2video.initialize = 0;
279 }
280 
sub2video_heartbeat(InputStream * ist,int64_t pts)281 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
282 {
283     InputFile *infile = input_files[ist->file_index];
284     int i, j, nb_reqs;
285     int64_t pts2;
286 
287     /* When a frame is read from a file, examine all sub2video streams in
288        the same file and send the sub2video frame again. Otherwise, decoded
289        video frames could be accumulating in the filter graph while a filter
290        (possibly overlay) is desperately waiting for a subtitle frame. */
291     for (i = 0; i < infile->nb_streams; i++) {
292         InputStream *ist2 = input_streams[infile->ist_index + i];
293         if (!ist2->sub2video.frame)
294             continue;
295         /* subtitles seem to be usually muxed ahead of other streams;
296            if not, subtracting a larger time here is necessary */
297         pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
298         /* do not send the heartbeat frame if the subtitle is already ahead */
299         if (pts2 <= ist2->sub2video.last_pts)
300             continue;
301         if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
302             /* if we have hit the end of the current displayed subpicture,
303                or if we need to initialize the system, update the
304                overlayed subpicture and its start/end times */
305             sub2video_update(ist2, pts2 + 1, NULL);
306         for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
307             nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
308         if (nb_reqs)
309             sub2video_push_ref(ist2, pts2);
310     }
311 }
312 
sub2video_flush(InputStream * ist)313 static void sub2video_flush(InputStream *ist)
314 {
315     int i;
316     int ret;
317 
318     if (ist->sub2video.end_pts < INT64_MAX)
319         sub2video_update(ist, INT64_MAX, NULL);
320     for (i = 0; i < ist->nb_filters; i++) {
321         ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
322         if (ret != AVERROR_EOF && ret < 0)
323             av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
324     }
325 }
326 
327 /* end of sub2video hack */
328 
term_exit_sigsafe(void)329 static void term_exit_sigsafe(void)
330 {
331 #if HAVE_TERMIOS_H
332     if(restore_tty)
333         tcsetattr (0, TCSANOW, &oldtty);
334 #endif
335 }
336 
term_exit(void)337 void term_exit(void)
338 {
339     av_log(NULL, AV_LOG_QUIET, "%s", "");
340     term_exit_sigsafe();
341 }
342 
343 static volatile int received_sigterm = 0;
344 static volatile int received_nb_signals = 0;
345 static atomic_int transcode_init_done = ATOMIC_VAR_INIT(0);
346 static volatile int ffmpeg_exited = 0;
347 static int main_return_code = 0;
348 static int64_t copy_ts_first_pts = AV_NOPTS_VALUE;
349 
350 static void
sigterm_handler(int sig)351 sigterm_handler(int sig)
352 {
353     int ret;
354     received_sigterm = sig;
355     received_nb_signals++;
356     term_exit_sigsafe();
357     if(received_nb_signals > 3) {
358         ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
359                     strlen("Received > 3 system signals, hard exiting\n"));
360         if (ret < 0) { /* Do nothing */ };
361         exit(123);
362     }
363 }
364 
365 #if HAVE_SETCONSOLECTRLHANDLER
CtrlHandler(DWORD fdwCtrlType)366 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
367 {
368     av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
369 
370     switch (fdwCtrlType)
371     {
372     case CTRL_C_EVENT:
373     case CTRL_BREAK_EVENT:
374         sigterm_handler(SIGINT);
375         return TRUE;
376 
377     case CTRL_CLOSE_EVENT:
378     case CTRL_LOGOFF_EVENT:
379     case CTRL_SHUTDOWN_EVENT:
380         sigterm_handler(SIGTERM);
381         /* Basically, with these 3 events, when we return from this method the
382            process is hard terminated, so stall as long as we need to
383            to try and let the main thread(s) clean up and gracefully terminate
384            (we have at most 5 seconds, but should be done far before that). */
385         while (!ffmpeg_exited) {
386             Sleep(0);
387         }
388         return TRUE;
389 
390     default:
391         av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
392         return FALSE;
393     }
394 }
395 #endif
396 
397 #ifdef __linux__
398 #define SIGNAL(sig, func)               \
399     do {                                \
400         action.sa_handler = func;       \
401         sigaction(sig, &action, NULL);  \
402     } while (0)
403 #else
404 #define SIGNAL(sig, func) \
405     signal(sig, func)
406 #endif
407 
term_init(void)408 void term_init(void)
409 {
410 #if defined __linux__
411     struct sigaction action = {0};
412     action.sa_handler = sigterm_handler;
413 
414     /* block other interrupts while processing this one */
415     sigfillset(&action.sa_mask);
416 
417     /* restart interruptible functions (i.e. don't fail with EINTR)  */
418     action.sa_flags = SA_RESTART;
419 #endif
420 
421 #if HAVE_TERMIOS_H
422     if (!run_as_daemon && stdin_interaction) {
423         struct termios tty;
424         if (tcgetattr (0, &tty) == 0) {
425             oldtty = tty;
426             restore_tty = 1;
427 
428             tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
429                              |INLCR|IGNCR|ICRNL|IXON);
430             tty.c_oflag |= OPOST;
431             tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
432             tty.c_cflag &= ~(CSIZE|PARENB);
433             tty.c_cflag |= CS8;
434             tty.c_cc[VMIN] = 1;
435             tty.c_cc[VTIME] = 0;
436 
437             tcsetattr (0, TCSANOW, &tty);
438         }
439         SIGNAL(SIGQUIT, sigterm_handler); /* Quit (POSIX).  */
440     }
441 #endif
442 
443     SIGNAL(SIGINT , sigterm_handler); /* Interrupt (ANSI).    */
444     SIGNAL(SIGTERM, sigterm_handler); /* Termination (ANSI).  */
445 #ifdef SIGXCPU
446     SIGNAL(SIGXCPU, sigterm_handler);
447 #endif
448 #ifdef SIGPIPE
449     signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
450 #endif
451 #if HAVE_SETCONSOLECTRLHANDLER
452     SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
453 #endif
454 }
455 
456 /* read a key without blocking */
read_key(void)457 static int read_key(void)
458 {
459     unsigned char ch;
460 #if HAVE_TERMIOS_H
461     int n = 1;
462     struct timeval tv;
463     fd_set rfds;
464 
465     FD_ZERO(&rfds);
466     FD_SET(0, &rfds);
467     tv.tv_sec = 0;
468     tv.tv_usec = 0;
469     n = select(1, &rfds, NULL, NULL, &tv);
470     if (n > 0) {
471         n = read(0, &ch, 1);
472         if (n == 1)
473             return ch;
474 
475         return n;
476     }
477 #elif HAVE_KBHIT
478 #    if HAVE_PEEKNAMEDPIPE
479     static int is_pipe;
480     static HANDLE input_handle;
481     DWORD dw, nchars;
482     if(!input_handle){
483         input_handle = GetStdHandle(STD_INPUT_HANDLE);
484         is_pipe = !GetConsoleMode(input_handle, &dw);
485     }
486 
487     if (is_pipe) {
488         /* When running under a GUI, you will end here. */
489         if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
490             // input pipe may have been closed by the program that ran ffmpeg
491             return -1;
492         }
493         //Read it
494         if(nchars != 0) {
495             read(0, &ch, 1);
496             return ch;
497         }else{
498             return -1;
499         }
500     }
501 #    endif
502     if(kbhit())
503         return(getch());
504 #endif
505     return -1;
506 }
507 
decode_interrupt_cb(void * ctx)508 static int decode_interrupt_cb(void *ctx)
509 {
510     return received_nb_signals > atomic_load(&transcode_init_done);
511 }
512 
513 const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
514 
ffmpeg_cleanup(int ret)515 static void ffmpeg_cleanup(int ret)
516 {
517     int i, j;
518 
519     if (do_benchmark) {
520         int maxrss = getmaxrss() / 1024;
521         av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
522     }
523 
524     for (i = 0; i < nb_filtergraphs; i++) {
525         FilterGraph *fg = filtergraphs[i];
526         avfilter_graph_free(&fg->graph);
527         for (j = 0; j < fg->nb_inputs; j++) {
528             InputFilter *ifilter = fg->inputs[j];
529             struct InputStream *ist = ifilter->ist;
530 
531             while (av_fifo_size(ifilter->frame_queue)) {
532                 AVFrame *frame;
533                 av_fifo_generic_read(ifilter->frame_queue, &frame,
534                                      sizeof(frame), NULL);
535                 av_frame_free(&frame);
536             }
537             av_fifo_freep(&ifilter->frame_queue);
538             if (ist->sub2video.sub_queue) {
539                 while (av_fifo_size(ist->sub2video.sub_queue)) {
540                     AVSubtitle sub;
541                     av_fifo_generic_read(ist->sub2video.sub_queue,
542                                          &sub, sizeof(sub), NULL);
543                     avsubtitle_free(&sub);
544                 }
545                 av_fifo_freep(&ist->sub2video.sub_queue);
546             }
547             av_buffer_unref(&ifilter->hw_frames_ctx);
548             av_freep(&ifilter->name);
549             av_freep(&fg->inputs[j]);
550         }
551         av_freep(&fg->inputs);
552         for (j = 0; j < fg->nb_outputs; j++) {
553             OutputFilter *ofilter = fg->outputs[j];
554 
555             avfilter_inout_free(&ofilter->out_tmp);
556             av_freep(&ofilter->name);
557             av_freep(&ofilter->formats);
558             av_freep(&ofilter->channel_layouts);
559             av_freep(&ofilter->sample_rates);
560             av_freep(&fg->outputs[j]);
561         }
562         av_freep(&fg->outputs);
563         av_freep(&fg->graph_desc);
564 
565         av_freep(&filtergraphs[i]);
566     }
567     av_freep(&filtergraphs);
568 
569     av_freep(&subtitle_out);
570 
571     /* close files */
572     for (i = 0; i < nb_output_files; i++) {
573         OutputFile *of = output_files[i];
574         AVFormatContext *s;
575         if (!of)
576             continue;
577         s = of->ctx;
578         if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
579             avio_closep(&s->pb);
580         avformat_free_context(s);
581         av_dict_free(&of->opts);
582 
583         av_freep(&output_files[i]);
584     }
585     for (i = 0; i < nb_output_streams; i++) {
586         OutputStream *ost = output_streams[i];
587 
588         if (!ost)
589             continue;
590 
591         av_bsf_free(&ost->bsf_ctx);
592 
593         av_frame_free(&ost->filtered_frame);
594         av_frame_free(&ost->last_frame);
595         av_packet_free(&ost->pkt);
596         av_dict_free(&ost->encoder_opts);
597 
598         av_freep(&ost->forced_keyframes);
599         av_expr_free(ost->forced_keyframes_pexpr);
600         av_freep(&ost->avfilter);
601         av_freep(&ost->logfile_prefix);
602 
603         av_freep(&ost->audio_channels_map);
604         ost->audio_channels_mapped = 0;
605 
606         av_dict_free(&ost->sws_dict);
607         av_dict_free(&ost->swr_opts);
608 
609         avcodec_free_context(&ost->enc_ctx);
610         avcodec_parameters_free(&ost->ref_par);
611 
612         if (ost->muxing_queue) {
613             while (av_fifo_size(ost->muxing_queue)) {
614                 AVPacket *pkt;
615                 av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
616                 av_packet_free(&pkt);
617             }
618             av_fifo_freep(&ost->muxing_queue);
619         }
620 
621         av_freep(&output_streams[i]);
622     }
623 #if HAVE_THREADS
624     free_input_threads();
625 #endif
626     for (i = 0; i < nb_input_files; i++) {
627         avformat_close_input(&input_files[i]->ctx);
628         av_packet_free(&input_files[i]->pkt);
629         av_freep(&input_files[i]);
630     }
631     for (i = 0; i < nb_input_streams; i++) {
632         InputStream *ist = input_streams[i];
633 
634         av_frame_free(&ist->decoded_frame);
635         av_frame_free(&ist->filter_frame);
636         av_packet_free(&ist->pkt);
637         av_dict_free(&ist->decoder_opts);
638         avsubtitle_free(&ist->prev_sub.subtitle);
639         av_frame_free(&ist->sub2video.frame);
640         av_freep(&ist->filters);
641         av_freep(&ist->hwaccel_device);
642         av_freep(&ist->dts_buffer);
643 
644         avcodec_free_context(&ist->dec_ctx);
645 
646         av_freep(&input_streams[i]);
647     }
648 
649     if (vstats_file) {
650         if (fclose(vstats_file))
651             av_log(NULL, AV_LOG_ERROR,
652                    "Error closing vstats file, loss of information possible: %s\n",
653                    av_err2str(AVERROR(errno)));
654     }
655     av_freep(&vstats_filename);
656 
657     av_freep(&input_streams);
658     av_freep(&input_files);
659     av_freep(&output_streams);
660     av_freep(&output_files);
661 
662     uninit_opts();
663 
664     avformat_network_deinit();
665 
666     if (received_sigterm) {
667         av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
668                (int) received_sigterm);
669     } else if (ret && atomic_load(&transcode_init_done)) {
670         av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
671     }
672     term_exit();
673     ffmpeg_exited = 1;
674 }
675 
remove_avoptions(AVDictionary ** a,AVDictionary * b)676 void remove_avoptions(AVDictionary **a, AVDictionary *b)
677 {
678     AVDictionaryEntry *t = NULL;
679 
680     while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
681         av_dict_set(a, t->key, NULL, AV_DICT_MATCH_CASE);
682     }
683 }
684 
assert_avoptions(AVDictionary * m)685 void assert_avoptions(AVDictionary *m)
686 {
687     AVDictionaryEntry *t;
688     if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
689         av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
690         exit_program(1);
691     }
692 }
693 
abort_codec_experimental(const AVCodec * c,int encoder)694 static void abort_codec_experimental(const AVCodec *c, int encoder)
695 {
696     exit_program(1);
697 }
698 
update_benchmark(const char * fmt,...)699 static void update_benchmark(const char *fmt, ...)
700 {
701     if (do_benchmark_all) {
702         BenchmarkTimeStamps t = get_benchmark_time_stamps();
703         va_list va;
704         char buf[1024];
705 
706         if (fmt) {
707             va_start(va, fmt);
708             vsnprintf(buf, sizeof(buf), fmt, va);
709             va_end(va);
710             av_log(NULL, AV_LOG_INFO,
711                    "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
712                    t.user_usec - current_time.user_usec,
713                    t.sys_usec - current_time.sys_usec,
714                    t.real_usec - current_time.real_usec, buf);
715         }
716         current_time = t;
717     }
718 }
719 
close_all_output_streams(OutputStream * ost,OSTFinished this_stream,OSTFinished others)720 static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
721 {
722     int i;
723     for (i = 0; i < nb_output_streams; i++) {
724         OutputStream *ost2 = output_streams[i];
725         ost2->finished |= ost == ost2 ? this_stream : others;
726     }
727 }
728 
write_packet(OutputFile * of,AVPacket * pkt,OutputStream * ost,int unqueue)729 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
730 {
731     AVFormatContext *s = of->ctx;
732     AVStream *st = ost->st;
733     int ret;
734 
735     /*
736      * Audio encoders may split the packets --  #frames in != #packets out.
737      * But there is no reordering, so we can limit the number of output packets
738      * by simply dropping them here.
739      * Counting encoded video frames needs to be done separately because of
740      * reordering, see do_video_out().
741      * Do not count the packet when unqueued because it has been counted when queued.
742      */
743     if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
744         if (ost->frame_number >= ost->max_frames) {
745             av_packet_unref(pkt);
746             return;
747         }
748         ost->frame_number++;
749     }
750 
751     if (!of->header_written) {
752         AVPacket *tmp_pkt;
753         /* the muxer is not initialized yet, buffer the packet */
754         if (!av_fifo_space(ost->muxing_queue)) {
755             unsigned int are_we_over_size =
756                 (ost->muxing_queue_data_size + pkt->size) > ost->muxing_queue_data_threshold;
757             int new_size = are_we_over_size ?
758                            FFMIN(2 * av_fifo_size(ost->muxing_queue),
759                                  ost->max_muxing_queue_size) :
760                            2 * av_fifo_size(ost->muxing_queue);
761 
762             if (new_size <= av_fifo_size(ost->muxing_queue)) {
763                 av_log(NULL, AV_LOG_ERROR,
764                        "Too many packets buffered for output stream %d:%d.\n",
765                        ost->file_index, ost->st->index);
766                 exit_program(1);
767             }
768             ret = av_fifo_realloc2(ost->muxing_queue, new_size);
769             if (ret < 0)
770                 exit_program(1);
771         }
772         ret = av_packet_make_refcounted(pkt);
773         if (ret < 0)
774             exit_program(1);
775         tmp_pkt = av_packet_alloc();
776         if (!tmp_pkt)
777             exit_program(1);
778         av_packet_move_ref(tmp_pkt, pkt);
779         ost->muxing_queue_data_size += tmp_pkt->size;
780         av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
781         return;
782     }
783 
784     if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && video_sync_method == VSYNC_DROP) ||
785         (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
786         pkt->pts = pkt->dts = AV_NOPTS_VALUE;
787 
788     if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
789         int i;
790         uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
791                                               NULL);
792         ost->quality = sd ? AV_RL32(sd) : -1;
793         ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
794 
795         for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
796             if (sd && i < sd[5])
797                 ost->error[i] = AV_RL64(sd + 8 + 8*i);
798             else
799                 ost->error[i] = -1;
800         }
801 
802         if (ost->frame_rate.num && ost->is_cfr) {
803             if (pkt->duration > 0)
804                 av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
805             pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
806                                          ost->mux_timebase);
807         }
808     }
809 
810     av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
811 
812     if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
813         if (pkt->dts != AV_NOPTS_VALUE &&
814             pkt->pts != AV_NOPTS_VALUE &&
815             pkt->dts > pkt->pts) {
816             av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
817                    pkt->dts, pkt->pts,
818                    ost->file_index, ost->st->index);
819             pkt->pts =
820             pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
821                      - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
822                      - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
823         }
824         if ((st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO || st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE) &&
825             pkt->dts != AV_NOPTS_VALUE &&
826             !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
827             ost->last_mux_dts != AV_NOPTS_VALUE) {
828             int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
829             if (pkt->dts < max) {
830                 int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
831                 if (exit_on_error)
832                     loglevel = AV_LOG_ERROR;
833                 av_log(s, loglevel, "Non-monotonous DTS in output stream "
834                        "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
835                        ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
836                 if (exit_on_error) {
837                     av_log(NULL, AV_LOG_FATAL, "aborting.\n");
838                     exit_program(1);
839                 }
840                 av_log(s, loglevel, "changing to %"PRId64". This may result "
841                        "in incorrect timestamps in the output file.\n",
842                        max);
843                 if (pkt->pts >= pkt->dts)
844                     pkt->pts = FFMAX(pkt->pts, max);
845                 pkt->dts = max;
846             }
847         }
848     }
849     ost->last_mux_dts = pkt->dts;
850 
851     ost->data_size += pkt->size;
852     ost->packets_written++;
853 
854     pkt->stream_index = ost->index;
855 
856     if (debug_ts) {
857         av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
858                 "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
859                 av_get_media_type_string(ost->enc_ctx->codec_type),
860                 av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
861                 av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
862                 pkt->size
863               );
864     }
865 
866     ret = av_interleaved_write_frame(s, pkt);
867     if (ret < 0) {
868         print_error("av_interleaved_write_frame()", ret);
869         main_return_code = 1;
870         close_all_output_streams(ost, MUXER_FINISHED | ENCODER_FINISHED, ENCODER_FINISHED);
871     }
872     av_packet_unref(pkt);
873 }
874 
close_output_stream(OutputStream * ost)875 static void close_output_stream(OutputStream *ost)
876 {
877     OutputFile *of = output_files[ost->file_index];
878 
879     ost->finished |= ENCODER_FINISHED;
880     if (of->shortest) {
881         int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
882         of->recording_time = FFMIN(of->recording_time, end);
883     }
884 }
885 
886 /*
887  * Send a single packet to the output, applying any bitstream filters
888  * associated with the output stream.  This may result in any number
889  * of packets actually being written, depending on what bitstream
890  * filters are applied.  The supplied packet is consumed and will be
891  * blank (as if newly-allocated) when this function returns.
892  *
893  * If eof is set, instead indicate EOF to all bitstream filters and
894  * therefore flush any delayed packets to the output.  A blank packet
895  * must be supplied in this case.
896  */
output_packet(OutputFile * of,AVPacket * pkt,OutputStream * ost,int eof)897 static void output_packet(OutputFile *of, AVPacket *pkt,
898                           OutputStream *ost, int eof)
899 {
900     int ret = 0;
901 
902     /* apply the output bitstream filters */
903     if (ost->bsf_ctx) {
904         ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
905         if (ret < 0)
906             goto finish;
907         while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
908             write_packet(of, pkt, ost, 0);
909         if (ret == AVERROR(EAGAIN))
910             ret = 0;
911     } else if (!eof)
912         write_packet(of, pkt, ost, 0);
913 
914 finish:
915     if (ret < 0 && ret != AVERROR_EOF) {
916         av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
917                "packet for stream #%d:%d.\n", ost->file_index, ost->index);
918         if(exit_on_error)
919             exit_program(1);
920     }
921 }
922 
check_recording_time(OutputStream * ost)923 static int check_recording_time(OutputStream *ost)
924 {
925     OutputFile *of = output_files[ost->file_index];
926 
927     if (of->recording_time != INT64_MAX &&
928         av_compare_ts(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, of->recording_time,
929                       AV_TIME_BASE_Q) >= 0) {
930         close_output_stream(ost);
931         return 0;
932     }
933     return 1;
934 }
935 
adjust_frame_pts_to_encoder_tb(OutputFile * of,OutputStream * ost,AVFrame * frame)936 static double adjust_frame_pts_to_encoder_tb(OutputFile *of, OutputStream *ost,
937                                              AVFrame *frame)
938 {
939     double float_pts = AV_NOPTS_VALUE; // this is identical to frame.pts but with higher precision
940     AVCodecContext *enc = ost->enc_ctx;
941     if (!frame || frame->pts == AV_NOPTS_VALUE ||
942         !enc || !ost->filter || !ost->filter->graph->graph)
943         goto early_exit;
944 
945     {
946         AVFilterContext *filter = ost->filter->filter;
947 
948         int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
949         AVRational filter_tb = av_buffersink_get_time_base(filter);
950         AVRational tb = enc->time_base;
951         int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
952 
953         tb.den <<= extra_bits;
954         float_pts =
955             av_rescale_q(frame->pts, filter_tb, tb) -
956             av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
957         float_pts /= 1 << extra_bits;
958         // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
959         float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
960 
961         frame->pts =
962             av_rescale_q(frame->pts, filter_tb, enc->time_base) -
963             av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
964     }
965 
966 early_exit:
967 
968     if (debug_ts) {
969         av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
970                frame ? av_ts2str(frame->pts) : "NULL",
971                frame ? av_ts2timestr(frame->pts, &enc->time_base) : "NULL",
972                float_pts,
973                enc ? enc->time_base.num : -1,
974                enc ? enc->time_base.den : -1);
975     }
976 
977     return float_pts;
978 }
979 
980 static int init_output_stream(OutputStream *ost, AVFrame *frame,
981                               char *error, int error_len);
982 
init_output_stream_wrapper(OutputStream * ost,AVFrame * frame,unsigned int fatal)983 static int init_output_stream_wrapper(OutputStream *ost, AVFrame *frame,
984                                       unsigned int fatal)
985 {
986     int ret = AVERROR_BUG;
987     char error[1024] = {0};
988 
989     if (ost->initialized)
990         return 0;
991 
992     ret = init_output_stream(ost, frame, error, sizeof(error));
993     if (ret < 0) {
994         av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
995                ost->file_index, ost->index, error);
996 
997         if (fatal)
998             exit_program(1);
999     }
1000 
1001     return ret;
1002 }
1003 
do_audio_out(OutputFile * of,OutputStream * ost,AVFrame * frame)1004 static void do_audio_out(OutputFile *of, OutputStream *ost,
1005                          AVFrame *frame)
1006 {
1007     AVCodecContext *enc = ost->enc_ctx;
1008     AVPacket *pkt = ost->pkt;
1009     int ret;
1010 
1011     adjust_frame_pts_to_encoder_tb(of, ost, frame);
1012 
1013     if (!check_recording_time(ost))
1014         return;
1015 
1016     if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
1017         frame->pts = ost->sync_opts;
1018     ost->sync_opts = frame->pts + frame->nb_samples;
1019     ost->samples_encoded += frame->nb_samples;
1020     ost->frames_encoded++;
1021 
1022     update_benchmark(NULL);
1023     if (debug_ts) {
1024         av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
1025                "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1026                av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
1027                enc->time_base.num, enc->time_base.den);
1028     }
1029 
1030     ret = avcodec_send_frame(enc, frame);
1031     if (ret < 0)
1032         goto error;
1033 
1034     while (1) {
1035         av_packet_unref(pkt);
1036         ret = avcodec_receive_packet(enc, pkt);
1037         if (ret == AVERROR(EAGAIN))
1038             break;
1039         if (ret < 0)
1040             goto error;
1041 
1042         update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
1043 
1044         av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1045 
1046         if (debug_ts) {
1047             av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
1048                    "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1049                    av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1050                    av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
1051         }
1052 
1053         output_packet(of, pkt, ost, 0);
1054     }
1055 
1056     return;
1057 error:
1058     av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
1059     exit_program(1);
1060 }
1061 
do_subtitle_out(OutputFile * of,OutputStream * ost,AVSubtitle * sub)1062 static void do_subtitle_out(OutputFile *of,
1063                             OutputStream *ost,
1064                             AVSubtitle *sub)
1065 {
1066     int subtitle_out_max_size = 1024 * 1024;
1067     int subtitle_out_size, nb, i;
1068     AVCodecContext *enc;
1069     AVPacket *pkt = ost->pkt;
1070     int64_t pts;
1071 
1072     if (sub->pts == AV_NOPTS_VALUE) {
1073         av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
1074         if (exit_on_error)
1075             exit_program(1);
1076         return;
1077     }
1078 
1079     enc = ost->enc_ctx;
1080 
1081     if (!subtitle_out) {
1082         subtitle_out = av_malloc(subtitle_out_max_size);
1083         if (!subtitle_out) {
1084             av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
1085             exit_program(1);
1086         }
1087     }
1088 
1089     /* Note: DVB subtitle need one packet to draw them and one other
1090        packet to clear them */
1091     /* XXX: signal it in the codec context ? */
1092     if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1093         nb = 2;
1094     else
1095         nb = 1;
1096 
1097     /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1098     pts = sub->pts;
1099     if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1100         pts -= output_files[ost->file_index]->start_time;
1101     for (i = 0; i < nb; i++) {
1102         unsigned save_num_rects = sub->num_rects;
1103 
1104         ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1105         if (!check_recording_time(ost))
1106             return;
1107 
1108         sub->pts = pts;
1109         // start_display_time is required to be 0
1110         sub->pts               += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1111         sub->end_display_time  -= sub->start_display_time;
1112         sub->start_display_time = 0;
1113         if (i == 1)
1114             sub->num_rects = 0;
1115 
1116         ost->frames_encoded++;
1117 
1118         subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1119                                                     subtitle_out_max_size, sub);
1120         if (i == 1)
1121             sub->num_rects = save_num_rects;
1122         if (subtitle_out_size < 0) {
1123             av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1124             exit_program(1);
1125         }
1126 
1127         av_packet_unref(pkt);
1128         pkt->data = subtitle_out;
1129         pkt->size = subtitle_out_size;
1130         pkt->pts  = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1131         pkt->duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1132         if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1133             /* XXX: the pts correction is handled here. Maybe handling
1134                it in the codec would be better */
1135             if (i == 0)
1136                 pkt->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1137             else
1138                 pkt->pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1139         }
1140         pkt->dts = pkt->pts;
1141         output_packet(of, pkt, ost, 0);
1142     }
1143 }
1144 
do_video_out(OutputFile * of,OutputStream * ost,AVFrame * next_picture)1145 static void do_video_out(OutputFile *of,
1146                          OutputStream *ost,
1147                          AVFrame *next_picture)
1148 {
1149     int ret, format_video_sync;
1150     AVPacket *pkt = ost->pkt;
1151     AVCodecContext *enc = ost->enc_ctx;
1152     AVRational frame_rate;
1153     int nb_frames, nb0_frames, i;
1154     double delta, delta0;
1155     double duration = 0;
1156     double sync_ipts = AV_NOPTS_VALUE;
1157     int frame_size = 0;
1158     InputStream *ist = NULL;
1159     AVFilterContext *filter = ost->filter->filter;
1160 
1161     init_output_stream_wrapper(ost, next_picture, 1);
1162     sync_ipts = adjust_frame_pts_to_encoder_tb(of, ost, next_picture);
1163 
1164     if (ost->source_index >= 0)
1165         ist = input_streams[ost->source_index];
1166 
1167     frame_rate = av_buffersink_get_frame_rate(filter);
1168     if (frame_rate.num > 0 && frame_rate.den > 0)
1169         duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1170 
1171     if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1172         duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1173 
1174     if (!ost->filters_script &&
1175         !ost->filters &&
1176         (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1177         next_picture &&
1178         ist &&
1179         lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1180         duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1181     }
1182 
1183     if (!next_picture) {
1184         //end, flushing
1185         nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1186                                           ost->last_nb0_frames[1],
1187                                           ost->last_nb0_frames[2]);
1188     } else {
1189         delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1190         delta  = delta0 + duration;
1191 
1192         /* by default, we output a single frame */
1193         nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1194         nb_frames = 1;
1195 
1196         format_video_sync = video_sync_method;
1197         if (format_video_sync == VSYNC_AUTO) {
1198             if(!strcmp(of->ctx->oformat->name, "avi")) {
1199                 format_video_sync = VSYNC_VFR;
1200             } else
1201                 format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1202             if (   ist
1203                 && format_video_sync == VSYNC_CFR
1204                 && input_files[ist->file_index]->ctx->nb_streams == 1
1205                 && input_files[ist->file_index]->input_ts_offset == 0) {
1206                 format_video_sync = VSYNC_VSCFR;
1207             }
1208             if (format_video_sync == VSYNC_CFR && copy_ts) {
1209                 format_video_sync = VSYNC_VSCFR;
1210             }
1211         }
1212         ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1213 
1214         if (delta0 < 0 &&
1215             delta > 0 &&
1216             format_video_sync != VSYNC_PASSTHROUGH &&
1217             format_video_sync != VSYNC_DROP) {
1218             if (delta0 < -0.6) {
1219                 av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1220             } else
1221                 av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1222             sync_ipts = ost->sync_opts;
1223             duration += delta0;
1224             delta0 = 0;
1225         }
1226 
1227         switch (format_video_sync) {
1228         case VSYNC_VSCFR:
1229             if (ost->frame_number == 0 && delta0 >= 0.5) {
1230                 av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1231                 delta = duration;
1232                 delta0 = 0;
1233                 ost->sync_opts = llrint(sync_ipts);
1234             }
1235         case VSYNC_CFR:
1236             // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1237             if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1238                 nb_frames = 0;
1239             } else if (delta < -1.1)
1240                 nb_frames = 0;
1241             else if (delta > 1.1) {
1242                 nb_frames = lrintf(delta);
1243                 if (delta0 > 1.1)
1244                     nb0_frames = llrintf(delta0 - 0.6);
1245             }
1246             break;
1247         case VSYNC_VFR:
1248             if (delta <= -0.6)
1249                 nb_frames = 0;
1250             else if (delta > 0.6)
1251                 ost->sync_opts = llrint(sync_ipts);
1252             break;
1253         case VSYNC_DROP:
1254         case VSYNC_PASSTHROUGH:
1255             ost->sync_opts = llrint(sync_ipts);
1256             break;
1257         default:
1258             av_assert0(0);
1259         }
1260     }
1261 
1262     nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1263     nb0_frames = FFMIN(nb0_frames, nb_frames);
1264 
1265     memmove(ost->last_nb0_frames + 1,
1266             ost->last_nb0_frames,
1267             sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1268     ost->last_nb0_frames[0] = nb0_frames;
1269 
1270     if (nb0_frames == 0 && ost->last_dropped) {
1271         nb_frames_drop++;
1272         av_log(NULL, AV_LOG_VERBOSE,
1273                "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1274                ost->frame_number, ost->st->index, ost->last_frame->pts);
1275     }
1276     if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1277         if (nb_frames > dts_error_threshold * 30) {
1278             av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1279             nb_frames_drop++;
1280             return;
1281         }
1282         nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1283         av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1284         if (nb_frames_dup > dup_warning) {
1285             av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1286             dup_warning *= 10;
1287         }
1288     }
1289     ost->last_dropped = nb_frames == nb0_frames && next_picture;
1290 
1291     /* duplicates frame if needed */
1292     for (i = 0; i < nb_frames; i++) {
1293         AVFrame *in_picture;
1294         int forced_keyframe = 0;
1295         double pts_time;
1296 
1297         if (i < nb0_frames && ost->last_frame) {
1298             in_picture = ost->last_frame;
1299         } else
1300             in_picture = next_picture;
1301 
1302         if (!in_picture)
1303             return;
1304 
1305         in_picture->pts = ost->sync_opts;
1306 
1307         if (!check_recording_time(ost))
1308             return;
1309 
1310         in_picture->quality = enc->global_quality;
1311         in_picture->pict_type = 0;
1312 
1313         if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1314             in_picture->pts != AV_NOPTS_VALUE)
1315             ost->forced_kf_ref_pts = in_picture->pts;
1316 
1317         pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1318             (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1319         if (ost->forced_kf_index < ost->forced_kf_count &&
1320             in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1321             ost->forced_kf_index++;
1322             forced_keyframe = 1;
1323         } else if (ost->forced_keyframes_pexpr) {
1324             double res;
1325             ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1326             res = av_expr_eval(ost->forced_keyframes_pexpr,
1327                                ost->forced_keyframes_expr_const_values, NULL);
1328             ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1329                     ost->forced_keyframes_expr_const_values[FKF_N],
1330                     ost->forced_keyframes_expr_const_values[FKF_N_FORCED],
1331                     ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N],
1332                     ost->forced_keyframes_expr_const_values[FKF_T],
1333                     ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T],
1334                     res);
1335             if (res) {
1336                 forced_keyframe = 1;
1337                 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] =
1338                     ost->forced_keyframes_expr_const_values[FKF_N];
1339                 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] =
1340                     ost->forced_keyframes_expr_const_values[FKF_T];
1341                 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] += 1;
1342             }
1343 
1344             ost->forced_keyframes_expr_const_values[FKF_N] += 1;
1345         } else if (   ost->forced_keyframes
1346                    && !strncmp(ost->forced_keyframes, "source", 6)
1347                    && in_picture->key_frame==1
1348                    && !i) {
1349             forced_keyframe = 1;
1350         }
1351 
1352         if (forced_keyframe) {
1353             in_picture->pict_type = AV_PICTURE_TYPE_I;
1354             av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1355         }
1356 
1357         update_benchmark(NULL);
1358         if (debug_ts) {
1359             av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1360                    "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1361                    av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1362                    enc->time_base.num, enc->time_base.den);
1363         }
1364 
1365         ost->frames_encoded++;
1366 
1367         ret = avcodec_send_frame(enc, in_picture);
1368         if (ret < 0)
1369             goto error;
1370         // Make sure Closed Captions will not be duplicated
1371         av_frame_remove_side_data(in_picture, AV_FRAME_DATA_A53_CC);
1372 
1373         while (1) {
1374             av_packet_unref(pkt);
1375             ret = avcodec_receive_packet(enc, pkt);
1376             update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1377             if (ret == AVERROR(EAGAIN))
1378                 break;
1379             if (ret < 0)
1380                 goto error;
1381 
1382             if (debug_ts) {
1383                 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1384                        "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1385                        av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &enc->time_base),
1386                        av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &enc->time_base));
1387             }
1388 
1389             if (pkt->pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1390                 pkt->pts = ost->sync_opts;
1391 
1392             av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
1393 
1394             if (debug_ts) {
1395                 av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1396                     "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1397                     av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->mux_timebase),
1398                     av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->mux_timebase));
1399             }
1400 
1401             frame_size = pkt->size;
1402             output_packet(of, pkt, ost, 0);
1403 
1404             /* if two pass, output log */
1405             if (ost->logfile && enc->stats_out) {
1406                 fprintf(ost->logfile, "%s", enc->stats_out);
1407             }
1408         }
1409         ost->sync_opts++;
1410         /*
1411          * For video, number of frames in == number of packets out.
1412          * But there may be reordering, so we can't throw away frames on encoder
1413          * flush, we need to limit them here, before they go into encoder.
1414          */
1415         ost->frame_number++;
1416 
1417         if (vstats_filename && frame_size)
1418             do_video_stats(ost, frame_size);
1419     }
1420 
1421     if (!ost->last_frame)
1422         ost->last_frame = av_frame_alloc();
1423     av_frame_unref(ost->last_frame);
1424     if (next_picture && ost->last_frame)
1425         av_frame_ref(ost->last_frame, next_picture);
1426     else
1427         av_frame_free(&ost->last_frame);
1428 
1429     return;
1430 error:
1431     av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1432     exit_program(1);
1433 }
1434 
psnr(double d)1435 static double psnr(double d)
1436 {
1437     return -10.0 * log10(d);
1438 }
1439 
do_video_stats(OutputStream * ost,int frame_size)1440 static void do_video_stats(OutputStream *ost, int frame_size)
1441 {
1442     AVCodecContext *enc;
1443     int frame_number;
1444     double ti1, bitrate, avg_bitrate;
1445 
1446     /* this is executed just the first time do_video_stats is called */
1447     if (!vstats_file) {
1448         vstats_file = fopen(vstats_filename, "w");
1449         if (!vstats_file) {
1450             perror("fopen");
1451             exit_program(1);
1452         }
1453     }
1454 
1455     enc = ost->enc_ctx;
1456     if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1457         frame_number = ost->st->nb_frames;
1458         if (vstats_version <= 1) {
1459             fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1460                     ost->quality / (float)FF_QP2LAMBDA);
1461         } else  {
1462             fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1463                     ost->quality / (float)FF_QP2LAMBDA);
1464         }
1465 
1466         if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1467             fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1468 
1469         fprintf(vstats_file,"f_size= %6d ", frame_size);
1470         /* compute pts value */
1471         ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1472         if (ti1 < 0.01)
1473             ti1 = 0.01;
1474 
1475         bitrate     = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1476         avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1477         fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1478                (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1479         fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1480     }
1481 }
1482 
finish_output_stream(OutputStream * ost)1483 static void finish_output_stream(OutputStream *ost)
1484 {
1485     OutputFile *of = output_files[ost->file_index];
1486     int i;
1487 
1488     ost->finished = ENCODER_FINISHED | MUXER_FINISHED;
1489 
1490     if (of->shortest) {
1491         for (i = 0; i < of->ctx->nb_streams; i++)
1492             output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1493     }
1494 }
1495 
1496 /**
1497  * Get and encode new output from any of the filtergraphs, without causing
1498  * activity.
1499  *
1500  * @return  0 for success, <0 for severe errors
1501  */
reap_filters(int flush)1502 static int reap_filters(int flush)
1503 {
1504     AVFrame *filtered_frame = NULL;
1505     int i;
1506 
1507     /* Reap all buffers present in the buffer sinks */
1508     for (i = 0; i < nb_output_streams; i++) {
1509         OutputStream *ost = output_streams[i];
1510         OutputFile    *of = output_files[ost->file_index];
1511         AVFilterContext *filter;
1512         AVCodecContext *enc = ost->enc_ctx;
1513         int ret = 0;
1514 
1515         if (!ost->filter || !ost->filter->graph->graph)
1516             continue;
1517         filter = ost->filter->filter;
1518 
1519         /*
1520          * Unlike video, with audio the audio frame size matters.
1521          * Currently we are fully reliant on the lavfi filter chain to
1522          * do the buffering deed for us, and thus the frame size parameter
1523          * needs to be set accordingly. Where does one get the required
1524          * frame size? From the initialized AVCodecContext of an audio
1525          * encoder. Thus, if we have gotten to an audio stream, initialize
1526          * the encoder earlier than receiving the first AVFrame.
1527          */
1528         if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_AUDIO)
1529             init_output_stream_wrapper(ost, NULL, 1);
1530 
1531         if (!ost->pkt && !(ost->pkt = av_packet_alloc())) {
1532             return AVERROR(ENOMEM);
1533         }
1534         if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1535             return AVERROR(ENOMEM);
1536         }
1537         filtered_frame = ost->filtered_frame;
1538 
1539         while (1) {
1540             ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1541                                                AV_BUFFERSINK_FLAG_NO_REQUEST);
1542             if (ret < 0) {
1543                 if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1544                     av_log(NULL, AV_LOG_WARNING,
1545                            "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1546                 } else if (flush && ret == AVERROR_EOF) {
1547                     if (av_buffersink_get_type(filter) == AVMEDIA_TYPE_VIDEO)
1548                         do_video_out(of, ost, NULL);
1549                 }
1550                 break;
1551             }
1552             if (ost->finished) {
1553                 av_frame_unref(filtered_frame);
1554                 continue;
1555             }
1556 
1557             switch (av_buffersink_get_type(filter)) {
1558             case AVMEDIA_TYPE_VIDEO:
1559                 if (!ost->frame_aspect_ratio.num)
1560                     enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1561 
1562                 do_video_out(of, ost, filtered_frame);
1563                 break;
1564             case AVMEDIA_TYPE_AUDIO:
1565                 if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1566                     enc->channels != filtered_frame->channels) {
1567                     av_log(NULL, AV_LOG_ERROR,
1568                            "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1569                     break;
1570                 }
1571                 do_audio_out(of, ost, filtered_frame);
1572                 break;
1573             default:
1574                 // TODO support subtitle filters
1575                 av_assert0(0);
1576             }
1577 
1578             av_frame_unref(filtered_frame);
1579         }
1580     }
1581 
1582     return 0;
1583 }
1584 
print_final_stats(int64_t total_size)1585 static void print_final_stats(int64_t total_size)
1586 {
1587     uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1588     uint64_t subtitle_size = 0;
1589     uint64_t data_size = 0;
1590     float percent = -1.0;
1591     int i, j;
1592     int pass1_used = 1;
1593 
1594     for (i = 0; i < nb_output_streams; i++) {
1595         OutputStream *ost = output_streams[i];
1596         switch (ost->enc_ctx->codec_type) {
1597             case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1598             case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1599             case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1600             default:                 other_size += ost->data_size; break;
1601         }
1602         extra_size += ost->enc_ctx->extradata_size;
1603         data_size  += ost->data_size;
1604         if (   (ost->enc_ctx->flags & (AV_CODEC_FLAG_PASS1 | AV_CODEC_FLAG_PASS2))
1605             != AV_CODEC_FLAG_PASS1)
1606             pass1_used = 0;
1607     }
1608 
1609     if (data_size && total_size>0 && total_size >= data_size)
1610         percent = 100.0 * (total_size - data_size) / data_size;
1611 
1612     av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1613            video_size / 1024.0,
1614            audio_size / 1024.0,
1615            subtitle_size / 1024.0,
1616            other_size / 1024.0,
1617            extra_size / 1024.0);
1618     if (percent >= 0.0)
1619         av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1620     else
1621         av_log(NULL, AV_LOG_INFO, "unknown");
1622     av_log(NULL, AV_LOG_INFO, "\n");
1623 
1624     /* print verbose per-stream stats */
1625     for (i = 0; i < nb_input_files; i++) {
1626         InputFile *f = input_files[i];
1627         uint64_t total_packets = 0, total_size = 0;
1628 
1629         av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1630                i, f->ctx->url);
1631 
1632         for (j = 0; j < f->nb_streams; j++) {
1633             InputStream *ist = input_streams[f->ist_index + j];
1634             enum AVMediaType type = ist->dec_ctx->codec_type;
1635 
1636             total_size    += ist->data_size;
1637             total_packets += ist->nb_packets;
1638 
1639             av_log(NULL, AV_LOG_VERBOSE, "  Input stream #%d:%d (%s): ",
1640                    i, j, media_type_string(type));
1641             av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1642                    ist->nb_packets, ist->data_size);
1643 
1644             if (ist->decoding_needed) {
1645                 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1646                        ist->frames_decoded);
1647                 if (type == AVMEDIA_TYPE_AUDIO)
1648                     av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1649                 av_log(NULL, AV_LOG_VERBOSE, "; ");
1650             }
1651 
1652             av_log(NULL, AV_LOG_VERBOSE, "\n");
1653         }
1654 
1655         av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1656                total_packets, total_size);
1657     }
1658 
1659     for (i = 0; i < nb_output_files; i++) {
1660         OutputFile *of = output_files[i];
1661         uint64_t total_packets = 0, total_size = 0;
1662 
1663         av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1664                i, of->ctx->url);
1665 
1666         for (j = 0; j < of->ctx->nb_streams; j++) {
1667             OutputStream *ost = output_streams[of->ost_index + j];
1668             enum AVMediaType type = ost->enc_ctx->codec_type;
1669 
1670             total_size    += ost->data_size;
1671             total_packets += ost->packets_written;
1672 
1673             av_log(NULL, AV_LOG_VERBOSE, "  Output stream #%d:%d (%s): ",
1674                    i, j, media_type_string(type));
1675             if (ost->encoding_needed) {
1676                 av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1677                        ost->frames_encoded);
1678                 if (type == AVMEDIA_TYPE_AUDIO)
1679                     av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1680                 av_log(NULL, AV_LOG_VERBOSE, "; ");
1681             }
1682 
1683             av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1684                    ost->packets_written, ost->data_size);
1685 
1686             av_log(NULL, AV_LOG_VERBOSE, "\n");
1687         }
1688 
1689         av_log(NULL, AV_LOG_VERBOSE, "  Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1690                total_packets, total_size);
1691     }
1692     if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1693         av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1694         if (pass1_used) {
1695             av_log(NULL, AV_LOG_WARNING, "\n");
1696         } else {
1697             av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1698         }
1699     }
1700 }
1701 
print_report(int is_last_report,int64_t timer_start,int64_t cur_time)1702 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1703 {
1704     AVBPrint buf, buf_script;
1705     OutputStream *ost;
1706     AVFormatContext *oc;
1707     int64_t total_size;
1708     AVCodecContext *enc;
1709     int frame_number, vid, i;
1710     double bitrate;
1711     double speed;
1712     int64_t pts = INT64_MIN + 1;
1713     static int64_t last_time = -1;
1714     static int first_report = 1;
1715     static int qp_histogram[52];
1716     int hours, mins, secs, us;
1717     const char *hours_sign;
1718     int ret;
1719     float t;
1720 
1721     if (!print_stats && !is_last_report && !progress_avio)
1722         return;
1723 
1724     if (!is_last_report) {
1725         if (last_time == -1) {
1726             last_time = cur_time;
1727         }
1728         if (((cur_time - last_time) < stats_period && !first_report) ||
1729             (first_report && nb_output_dumped < nb_output_files))
1730             return;
1731         last_time = cur_time;
1732     }
1733 
1734     t = (cur_time-timer_start) / 1000000.0;
1735 
1736 
1737     oc = output_files[0]->ctx;
1738 
1739     total_size = avio_size(oc->pb);
1740     if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1741         total_size = avio_tell(oc->pb);
1742 
1743     vid = 0;
1744     av_bprint_init(&buf, 0, AV_BPRINT_SIZE_AUTOMATIC);
1745     av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1746     for (i = 0; i < nb_output_streams; i++) {
1747         float q = -1;
1748         ost = output_streams[i];
1749         enc = ost->enc_ctx;
1750         if (!ost->stream_copy)
1751             q = ost->quality / (float) FF_QP2LAMBDA;
1752 
1753         if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1754             av_bprintf(&buf, "q=%2.1f ", q);
1755             av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1756                        ost->file_index, ost->index, q);
1757         }
1758         if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1759             float fps;
1760 
1761             frame_number = ost->frame_number;
1762             fps = t > 1 ? frame_number / t : 0;
1763             av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1764                      frame_number, fps < 9.95, fps, q);
1765             av_bprintf(&buf_script, "frame=%d\n", frame_number);
1766             av_bprintf(&buf_script, "fps=%.2f\n", fps);
1767             av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1768                        ost->file_index, ost->index, q);
1769             if (is_last_report)
1770                 av_bprintf(&buf, "L");
1771             if (qp_hist) {
1772                 int j;
1773                 int qp = lrintf(q);
1774                 if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1775                     qp_histogram[qp]++;
1776                 for (j = 0; j < 32; j++)
1777                     av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1778             }
1779 
1780             if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1781                 int j;
1782                 double error, error_sum = 0;
1783                 double scale, scale_sum = 0;
1784                 double p;
1785                 char type[3] = { 'Y','U','V' };
1786                 av_bprintf(&buf, "PSNR=");
1787                 for (j = 0; j < 3; j++) {
1788                     if (is_last_report) {
1789                         error = enc->error[j];
1790                         scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1791                     } else {
1792                         error = ost->error[j];
1793                         scale = enc->width * enc->height * 255.0 * 255.0;
1794                     }
1795                     if (j)
1796                         scale /= 4;
1797                     error_sum += error;
1798                     scale_sum += scale;
1799                     p = psnr(error / scale);
1800                     av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1801                     av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1802                                ost->file_index, ost->index, type[j] | 32, p);
1803                 }
1804                 p = psnr(error_sum / scale_sum);
1805                 av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1806                 av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1807                            ost->file_index, ost->index, p);
1808             }
1809             vid = 1;
1810         }
1811         /* compute min output value */
1812         if (av_stream_get_end_pts(ost->st) != AV_NOPTS_VALUE) {
1813             pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1814                                           ost->st->time_base, AV_TIME_BASE_Q));
1815             if (copy_ts) {
1816                 if (copy_ts_first_pts == AV_NOPTS_VALUE && pts > 1)
1817                     copy_ts_first_pts = pts;
1818                 if (copy_ts_first_pts != AV_NOPTS_VALUE)
1819                     pts -= copy_ts_first_pts;
1820             }
1821         }
1822 
1823         if (is_last_report)
1824             nb_frames_drop += ost->last_dropped;
1825     }
1826 
1827     secs = FFABS(pts) / AV_TIME_BASE;
1828     us = FFABS(pts) % AV_TIME_BASE;
1829     mins = secs / 60;
1830     secs %= 60;
1831     hours = mins / 60;
1832     mins %= 60;
1833     hours_sign = (pts < 0) ? "-" : "";
1834 
1835     bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1836     speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1837 
1838     if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1839     else                av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1840     if (pts == AV_NOPTS_VALUE) {
1841         av_bprintf(&buf, "N/A ");
1842     } else {
1843         av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1844                    hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1845     }
1846 
1847     if (bitrate < 0) {
1848         av_bprintf(&buf, "bitrate=N/A");
1849         av_bprintf(&buf_script, "bitrate=N/A\n");
1850     }else{
1851         av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1852         av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1853     }
1854 
1855     if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1856     else                av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1857     if (pts == AV_NOPTS_VALUE) {
1858         av_bprintf(&buf_script, "out_time_us=N/A\n");
1859         av_bprintf(&buf_script, "out_time_ms=N/A\n");
1860         av_bprintf(&buf_script, "out_time=N/A\n");
1861     } else {
1862         av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1863         av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1864         av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1865                    hours_sign, hours, mins, secs, us);
1866     }
1867 
1868     if (nb_frames_dup || nb_frames_drop)
1869         av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1870     av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1871     av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1872 
1873     if (speed < 0) {
1874         av_bprintf(&buf, " speed=N/A");
1875         av_bprintf(&buf_script, "speed=N/A\n");
1876     } else {
1877         av_bprintf(&buf, " speed=%4.3gx", speed);
1878         av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1879     }
1880 
1881     if (print_stats || is_last_report) {
1882         const char end = is_last_report ? '\n' : '\r';
1883         if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1884             fprintf(stderr, "%s    %c", buf.str, end);
1885         } else
1886             av_log(NULL, AV_LOG_INFO, "%s    %c", buf.str, end);
1887 
1888         fflush(stderr);
1889     }
1890     av_bprint_finalize(&buf, NULL);
1891 
1892     if (progress_avio) {
1893         av_bprintf(&buf_script, "progress=%s\n",
1894                    is_last_report ? "end" : "continue");
1895         avio_write(progress_avio, buf_script.str,
1896                    FFMIN(buf_script.len, buf_script.size - 1));
1897         avio_flush(progress_avio);
1898         av_bprint_finalize(&buf_script, NULL);
1899         if (is_last_report) {
1900             if ((ret = avio_closep(&progress_avio)) < 0)
1901                 av_log(NULL, AV_LOG_ERROR,
1902                        "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1903         }
1904     }
1905 
1906     first_report = 0;
1907 
1908     if (is_last_report)
1909         print_final_stats(total_size);
1910 }
1911 
ifilter_parameters_from_codecpar(InputFilter * ifilter,AVCodecParameters * par)1912 static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
1913 {
1914     // We never got any input. Set a fake format, which will
1915     // come from libavformat.
1916     ifilter->format                 = par->format;
1917     ifilter->sample_rate            = par->sample_rate;
1918     ifilter->channels               = par->channels;
1919     ifilter->channel_layout         = par->channel_layout;
1920     ifilter->width                  = par->width;
1921     ifilter->height                 = par->height;
1922     ifilter->sample_aspect_ratio    = par->sample_aspect_ratio;
1923 }
1924 
flush_encoders(void)1925 static void flush_encoders(void)
1926 {
1927     int i, ret;
1928 
1929     for (i = 0; i < nb_output_streams; i++) {
1930         OutputStream   *ost = output_streams[i];
1931         AVCodecContext *enc = ost->enc_ctx;
1932         OutputFile      *of = output_files[ost->file_index];
1933 
1934         if (!ost->encoding_needed)
1935             continue;
1936 
1937         // Try to enable encoding with no input frames.
1938         // Maybe we should just let encoding fail instead.
1939         if (!ost->initialized) {
1940             FilterGraph *fg = ost->filter->graph;
1941 
1942             av_log(NULL, AV_LOG_WARNING,
1943                    "Finishing stream %d:%d without any data written to it.\n",
1944                    ost->file_index, ost->st->index);
1945 
1946             if (ost->filter && !fg->graph) {
1947                 int x;
1948                 for (x = 0; x < fg->nb_inputs; x++) {
1949                     InputFilter *ifilter = fg->inputs[x];
1950                     if (ifilter->format < 0)
1951                         ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1952                 }
1953 
1954                 if (!ifilter_has_all_input_formats(fg))
1955                     continue;
1956 
1957                 ret = configure_filtergraph(fg);
1958                 if (ret < 0) {
1959                     av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1960                     exit_program(1);
1961                 }
1962 
1963                 finish_output_stream(ost);
1964             }
1965 
1966             init_output_stream_wrapper(ost, NULL, 1);
1967         }
1968 
1969         if (enc->codec_type != AVMEDIA_TYPE_VIDEO && enc->codec_type != AVMEDIA_TYPE_AUDIO)
1970             continue;
1971 
1972         for (;;) {
1973             const char *desc = NULL;
1974             AVPacket *pkt = ost->pkt;
1975             int pkt_size;
1976 
1977             if (!pkt)
1978                 break;
1979 
1980             switch (enc->codec_type) {
1981             case AVMEDIA_TYPE_AUDIO:
1982                 desc   = "audio";
1983                 break;
1984             case AVMEDIA_TYPE_VIDEO:
1985                 desc   = "video";
1986                 break;
1987             default:
1988                 av_assert0(0);
1989             }
1990 
1991             update_benchmark(NULL);
1992 
1993             av_packet_unref(pkt);
1994             while ((ret = avcodec_receive_packet(enc, pkt)) == AVERROR(EAGAIN)) {
1995                 ret = avcodec_send_frame(enc, NULL);
1996                 if (ret < 0) {
1997                     av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1998                            desc,
1999                            av_err2str(ret));
2000                     exit_program(1);
2001                 }
2002             }
2003 
2004             update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
2005             if (ret < 0 && ret != AVERROR_EOF) {
2006                 av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
2007                        desc,
2008                        av_err2str(ret));
2009                 exit_program(1);
2010             }
2011             if (ost->logfile && enc->stats_out) {
2012                 fprintf(ost->logfile, "%s", enc->stats_out);
2013             }
2014             if (ret == AVERROR_EOF) {
2015                 output_packet(of, pkt, ost, 1);
2016                 break;
2017             }
2018             if (ost->finished & MUXER_FINISHED) {
2019                 av_packet_unref(pkt);
2020                 continue;
2021             }
2022             av_packet_rescale_ts(pkt, enc->time_base, ost->mux_timebase);
2023             pkt_size = pkt->size;
2024             output_packet(of, pkt, ost, 0);
2025             if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO && vstats_filename) {
2026                 do_video_stats(ost, pkt_size);
2027             }
2028         }
2029     }
2030 }
2031 
2032 /*
2033  * Check whether a packet from ist should be written into ost at this time
2034  */
check_output_constraints(InputStream * ist,OutputStream * ost)2035 static int check_output_constraints(InputStream *ist, OutputStream *ost)
2036 {
2037     OutputFile *of = output_files[ost->file_index];
2038     int ist_index  = input_files[ist->file_index]->ist_index + ist->st->index;
2039 
2040     if (ost->source_index != ist_index)
2041         return 0;
2042 
2043     if (ost->finished)
2044         return 0;
2045 
2046     if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
2047         return 0;
2048 
2049     return 1;
2050 }
2051 
do_streamcopy(InputStream * ist,OutputStream * ost,const AVPacket * pkt)2052 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2053 {
2054     OutputFile *of = output_files[ost->file_index];
2055     InputFile   *f = input_files [ist->file_index];
2056     int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2057     int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2058     AVPacket *opkt = ost->pkt;
2059 
2060     av_packet_unref(opkt);
2061     // EOF: flush output bitstream filters.
2062     if (!pkt) {
2063         output_packet(of, opkt, ost, 1);
2064         return;
2065     }
2066 
2067     if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2068         !ost->copy_initial_nonkeyframes)
2069         return;
2070 
2071     if (!ost->frame_number && !ost->copy_prior_start) {
2072         int64_t comp_start = start_time;
2073         if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2074             comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2075         if (pkt->pts == AV_NOPTS_VALUE ?
2076             ist->pts < comp_start :
2077             pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2078             return;
2079     }
2080 
2081     if (of->recording_time != INT64_MAX &&
2082         ist->pts >= of->recording_time + start_time) {
2083         close_output_stream(ost);
2084         return;
2085     }
2086 
2087     if (f->recording_time != INT64_MAX) {
2088         start_time = f->ctx->start_time;
2089         if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2090             start_time += f->start_time;
2091         if (ist->pts >= f->recording_time + start_time) {
2092             close_output_stream(ost);
2093             return;
2094         }
2095     }
2096 
2097     /* force the input stream PTS */
2098     if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2099         ost->sync_opts++;
2100 
2101     if (av_packet_ref(opkt, pkt) < 0)
2102         exit_program(1);
2103 
2104     if (pkt->pts != AV_NOPTS_VALUE)
2105         opkt->pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2106 
2107     if (pkt->dts == AV_NOPTS_VALUE) {
2108         opkt->dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2109     } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2110         int duration = av_get_audio_frame_duration(ist->dec_ctx, pkt->size);
2111         if(!duration)
2112             duration = ist->dec_ctx->frame_size;
2113         opkt->dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2114                                     (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2115                                     &ist->filter_in_rescale_delta_last, ost->mux_timebase);
2116         /* dts will be set immediately afterwards to what pts is now */
2117         opkt->pts = opkt->dts - ost_tb_start_time;
2118     } else
2119         opkt->dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2120     opkt->dts -= ost_tb_start_time;
2121 
2122     opkt->duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2123 
2124     output_packet(of, opkt, ost, 0);
2125 }
2126 
guess_input_channel_layout(InputStream * ist)2127 int guess_input_channel_layout(InputStream *ist)
2128 {
2129     AVCodecContext *dec = ist->dec_ctx;
2130 
2131     if (!dec->channel_layout) {
2132         char layout_name[256];
2133 
2134         if (dec->channels > ist->guess_layout_max)
2135             return 0;
2136         dec->channel_layout = av_get_default_channel_layout(dec->channels);
2137         if (!dec->channel_layout)
2138             return 0;
2139         av_get_channel_layout_string(layout_name, sizeof(layout_name),
2140                                      dec->channels, dec->channel_layout);
2141         av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2142                "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2143     }
2144     return 1;
2145 }
2146 
check_decode_result(InputStream * ist,int * got_output,int ret)2147 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2148 {
2149     if (*got_output || ret<0)
2150         decode_error_stat[ret<0] ++;
2151 
2152     if (ret < 0 && exit_on_error)
2153         exit_program(1);
2154 
2155     if (*got_output && ist) {
2156         if (ist->decoded_frame->decode_error_flags || (ist->decoded_frame->flags & AV_FRAME_FLAG_CORRUPT)) {
2157             av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
2158                    "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2159             if (exit_on_error)
2160                 exit_program(1);
2161         }
2162     }
2163 }
2164 
2165 // Filters can be configured only if the formats of all inputs are known.
ifilter_has_all_input_formats(FilterGraph * fg)2166 static int ifilter_has_all_input_formats(FilterGraph *fg)
2167 {
2168     int i;
2169     for (i = 0; i < fg->nb_inputs; i++) {
2170         if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2171                                           fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2172             return 0;
2173     }
2174     return 1;
2175 }
2176 
ifilter_send_frame(InputFilter * ifilter,AVFrame * frame)2177 static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
2178 {
2179     FilterGraph *fg = ifilter->graph;
2180     int need_reinit, ret, i;
2181 
2182     /* determine if the parameters for this input changed */
2183     need_reinit = ifilter->format != frame->format;
2184 
2185     switch (ifilter->ist->st->codecpar->codec_type) {
2186     case AVMEDIA_TYPE_AUDIO:
2187         need_reinit |= ifilter->sample_rate    != frame->sample_rate ||
2188                        ifilter->channels       != frame->channels ||
2189                        ifilter->channel_layout != frame->channel_layout;
2190         break;
2191     case AVMEDIA_TYPE_VIDEO:
2192         need_reinit |= ifilter->width  != frame->width ||
2193                        ifilter->height != frame->height;
2194         break;
2195     }
2196 
2197     if (!ifilter->ist->reinit_filters && fg->graph)
2198         need_reinit = 0;
2199 
2200     if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2201         (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2202         need_reinit = 1;
2203 
2204     if (need_reinit) {
2205         ret = ifilter_parameters_from_frame(ifilter, frame);
2206         if (ret < 0)
2207             return ret;
2208     }
2209 
2210     /* (re)init the graph if possible, otherwise buffer the frame and return */
2211     if (need_reinit || !fg->graph) {
2212         for (i = 0; i < fg->nb_inputs; i++) {
2213             if (!ifilter_has_all_input_formats(fg)) {
2214                 AVFrame *tmp = av_frame_clone(frame);
2215                 if (!tmp)
2216                     return AVERROR(ENOMEM);
2217                 av_frame_unref(frame);
2218 
2219                 if (!av_fifo_space(ifilter->frame_queue)) {
2220                     ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2221                     if (ret < 0) {
2222                         av_frame_free(&tmp);
2223                         return ret;
2224                     }
2225                 }
2226                 av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2227                 return 0;
2228             }
2229         }
2230 
2231         ret = reap_filters(1);
2232         if (ret < 0 && ret != AVERROR_EOF) {
2233             av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2234             return ret;
2235         }
2236 
2237         ret = configure_filtergraph(fg);
2238         if (ret < 0) {
2239             av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2240             return ret;
2241         }
2242     }
2243 
2244     ret = av_buffersrc_add_frame_flags(ifilter->filter, frame, AV_BUFFERSRC_FLAG_PUSH);
2245     if (ret < 0) {
2246         if (ret != AVERROR_EOF)
2247             av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2248         return ret;
2249     }
2250 
2251     return 0;
2252 }
2253 
ifilter_send_eof(InputFilter * ifilter,int64_t pts)2254 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2255 {
2256     int ret;
2257 
2258     ifilter->eof = 1;
2259 
2260     if (ifilter->filter) {
2261         ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2262         if (ret < 0)
2263             return ret;
2264     } else {
2265         // the filtergraph was never configured
2266         if (ifilter->format < 0)
2267             ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2268         if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2269             av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2270             return AVERROR_INVALIDDATA;
2271         }
2272     }
2273 
2274     return 0;
2275 }
2276 
2277 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2278 // There is the following difference: if you got a frame, you must call
2279 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2280 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
decode(AVCodecContext * avctx,AVFrame * frame,int * got_frame,AVPacket * pkt)2281 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2282 {
2283     int ret;
2284 
2285     *got_frame = 0;
2286 
2287     if (pkt) {
2288         ret = avcodec_send_packet(avctx, pkt);
2289         // In particular, we don't expect AVERROR(EAGAIN), because we read all
2290         // decoded frames with avcodec_receive_frame() until done.
2291         if (ret < 0 && ret != AVERROR_EOF)
2292             return ret;
2293     }
2294 
2295     ret = avcodec_receive_frame(avctx, frame);
2296     if (ret < 0 && ret != AVERROR(EAGAIN))
2297         return ret;
2298     if (ret >= 0)
2299         *got_frame = 1;
2300 
2301     return 0;
2302 }
2303 
send_frame_to_filters(InputStream * ist,AVFrame * decoded_frame)2304 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2305 {
2306     int i, ret;
2307     AVFrame *f;
2308 
2309     av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2310     for (i = 0; i < ist->nb_filters; i++) {
2311         if (i < ist->nb_filters - 1) {
2312             f = ist->filter_frame;
2313             ret = av_frame_ref(f, decoded_frame);
2314             if (ret < 0)
2315                 break;
2316         } else
2317             f = decoded_frame;
2318         ret = ifilter_send_frame(ist->filters[i], f);
2319         if (ret == AVERROR_EOF)
2320             ret = 0; /* ignore */
2321         if (ret < 0) {
2322             av_log(NULL, AV_LOG_ERROR,
2323                    "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2324             break;
2325         }
2326     }
2327     return ret;
2328 }
2329 
decode_audio(InputStream * ist,AVPacket * pkt,int * got_output,int * decode_failed)2330 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2331                         int *decode_failed)
2332 {
2333     AVFrame *decoded_frame;
2334     AVCodecContext *avctx = ist->dec_ctx;
2335     int ret, err = 0;
2336     AVRational decoded_frame_tb;
2337 
2338     if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2339         return AVERROR(ENOMEM);
2340     if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2341         return AVERROR(ENOMEM);
2342     decoded_frame = ist->decoded_frame;
2343 
2344     update_benchmark(NULL);
2345     ret = decode(avctx, decoded_frame, got_output, pkt);
2346     update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2347     if (ret < 0)
2348         *decode_failed = 1;
2349 
2350     if (ret >= 0 && avctx->sample_rate <= 0) {
2351         av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2352         ret = AVERROR_INVALIDDATA;
2353     }
2354 
2355     if (ret != AVERROR_EOF)
2356         check_decode_result(ist, got_output, ret);
2357 
2358     if (!*got_output || ret < 0)
2359         return ret;
2360 
2361     ist->samples_decoded += decoded_frame->nb_samples;
2362     ist->frames_decoded++;
2363 
2364     /* increment next_dts to use for the case where the input stream does not
2365        have timestamps or there are multiple frames in the packet */
2366     ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2367                      avctx->sample_rate;
2368     ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2369                      avctx->sample_rate;
2370 
2371     if (decoded_frame->pts != AV_NOPTS_VALUE) {
2372         decoded_frame_tb   = ist->st->time_base;
2373     } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2374         decoded_frame->pts = pkt->pts;
2375         decoded_frame_tb   = ist->st->time_base;
2376     }else {
2377         decoded_frame->pts = ist->dts;
2378         decoded_frame_tb   = AV_TIME_BASE_Q;
2379     }
2380     if (decoded_frame->pts != AV_NOPTS_VALUE)
2381         decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2382                                               (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2383                                               (AVRational){1, avctx->sample_rate});
2384     ist->nb_samples = decoded_frame->nb_samples;
2385     err = send_frame_to_filters(ist, decoded_frame);
2386 
2387     av_frame_unref(ist->filter_frame);
2388     av_frame_unref(decoded_frame);
2389     return err < 0 ? err : ret;
2390 }
2391 
decode_video(InputStream * ist,AVPacket * pkt,int * got_output,int64_t * duration_pts,int eof,int * decode_failed)2392 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2393                         int *decode_failed)
2394 {
2395     AVFrame *decoded_frame;
2396     int i, ret = 0, err = 0;
2397     int64_t best_effort_timestamp;
2398     int64_t dts = AV_NOPTS_VALUE;
2399 
2400     // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2401     // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2402     // skip the packet.
2403     if (!eof && pkt && pkt->size == 0)
2404         return 0;
2405 
2406     if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2407         return AVERROR(ENOMEM);
2408     if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2409         return AVERROR(ENOMEM);
2410     decoded_frame = ist->decoded_frame;
2411     if (ist->dts != AV_NOPTS_VALUE)
2412         dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2413     if (pkt) {
2414         pkt->dts = dts; // ffmpeg.c probably shouldn't do this
2415     }
2416 
2417     // The old code used to set dts on the drain packet, which does not work
2418     // with the new API anymore.
2419     if (eof) {
2420         void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2421         if (!new)
2422             return AVERROR(ENOMEM);
2423         ist->dts_buffer = new;
2424         ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2425     }
2426 
2427     update_benchmark(NULL);
2428     ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt);
2429     update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2430     if (ret < 0)
2431         *decode_failed = 1;
2432 
2433     // The following line may be required in some cases where there is no parser
2434     // or the parser does not has_b_frames correctly
2435     if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2436         if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2437             ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2438         } else
2439             av_log(ist->dec_ctx, AV_LOG_WARNING,
2440                    "video_delay is larger in decoder than demuxer %d > %d.\n"
2441                    "If you want to help, upload a sample "
2442                    "of this file to https://streams.videolan.org/upload/ "
2443                    "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2444                    ist->dec_ctx->has_b_frames,
2445                    ist->st->codecpar->video_delay);
2446     }
2447 
2448     if (ret != AVERROR_EOF)
2449         check_decode_result(ist, got_output, ret);
2450 
2451     if (*got_output && ret >= 0) {
2452         if (ist->dec_ctx->width  != decoded_frame->width ||
2453             ist->dec_ctx->height != decoded_frame->height ||
2454             ist->dec_ctx->pix_fmt != decoded_frame->format) {
2455             av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2456                 decoded_frame->width,
2457                 decoded_frame->height,
2458                 decoded_frame->format,
2459                 ist->dec_ctx->width,
2460                 ist->dec_ctx->height,
2461                 ist->dec_ctx->pix_fmt);
2462         }
2463     }
2464 
2465     if (!*got_output || ret < 0)
2466         return ret;
2467 
2468     if(ist->top_field_first>=0)
2469         decoded_frame->top_field_first = ist->top_field_first;
2470 
2471     ist->frames_decoded++;
2472 
2473     if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2474         err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2475         if (err < 0)
2476             goto fail;
2477     }
2478     ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2479 
2480     best_effort_timestamp= decoded_frame->best_effort_timestamp;
2481     *duration_pts = decoded_frame->pkt_duration;
2482 
2483     if (ist->framerate.num)
2484         best_effort_timestamp = ist->cfr_next_pts++;
2485 
2486     if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2487         best_effort_timestamp = ist->dts_buffer[0];
2488 
2489         for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2490             ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2491         ist->nb_dts_buffer--;
2492     }
2493 
2494     if(best_effort_timestamp != AV_NOPTS_VALUE) {
2495         int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2496 
2497         if (ts != AV_NOPTS_VALUE)
2498             ist->next_pts = ist->pts = ts;
2499     }
2500 
2501     if (debug_ts) {
2502         av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2503                "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2504                ist->st->index, av_ts2str(decoded_frame->pts),
2505                av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2506                best_effort_timestamp,
2507                av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2508                decoded_frame->key_frame, decoded_frame->pict_type,
2509                ist->st->time_base.num, ist->st->time_base.den);
2510     }
2511 
2512     if (ist->st->sample_aspect_ratio.num)
2513         decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2514 
2515     err = send_frame_to_filters(ist, decoded_frame);
2516 
2517 fail:
2518     av_frame_unref(ist->filter_frame);
2519     av_frame_unref(decoded_frame);
2520     return err < 0 ? err : ret;
2521 }
2522 
transcode_subtitles(InputStream * ist,AVPacket * pkt,int * got_output,int * decode_failed)2523 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2524                                int *decode_failed)
2525 {
2526     AVSubtitle subtitle;
2527     int free_sub = 1;
2528     int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2529                                           &subtitle, got_output, pkt);
2530 
2531     check_decode_result(NULL, got_output, ret);
2532 
2533     if (ret < 0 || !*got_output) {
2534         *decode_failed = 1;
2535         if (!pkt->size)
2536             sub2video_flush(ist);
2537         return ret;
2538     }
2539 
2540     if (ist->fix_sub_duration) {
2541         int end = 1;
2542         if (ist->prev_sub.got_output) {
2543             end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2544                              1000, AV_TIME_BASE);
2545             if (end < ist->prev_sub.subtitle.end_display_time) {
2546                 av_log(ist->dec_ctx, AV_LOG_DEBUG,
2547                        "Subtitle duration reduced from %"PRId32" to %d%s\n",
2548                        ist->prev_sub.subtitle.end_display_time, end,
2549                        end <= 0 ? ", dropping it" : "");
2550                 ist->prev_sub.subtitle.end_display_time = end;
2551             }
2552         }
2553         FFSWAP(int,        *got_output, ist->prev_sub.got_output);
2554         FFSWAP(int,        ret,         ist->prev_sub.ret);
2555         FFSWAP(AVSubtitle, subtitle,    ist->prev_sub.subtitle);
2556         if (end <= 0)
2557             goto out;
2558     }
2559 
2560     if (!*got_output)
2561         return ret;
2562 
2563     if (ist->sub2video.frame) {
2564         sub2video_update(ist, INT64_MIN, &subtitle);
2565     } else if (ist->nb_filters) {
2566         if (!ist->sub2video.sub_queue)
2567             ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2568         if (!ist->sub2video.sub_queue)
2569             exit_program(1);
2570         if (!av_fifo_space(ist->sub2video.sub_queue)) {
2571             ret = av_fifo_realloc2(ist->sub2video.sub_queue, 2 * av_fifo_size(ist->sub2video.sub_queue));
2572             if (ret < 0)
2573                 exit_program(1);
2574         }
2575         av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2576         free_sub = 0;
2577     }
2578 
2579     if (!subtitle.num_rects)
2580         goto out;
2581 
2582     ist->frames_decoded++;
2583 
2584     for (i = 0; i < nb_output_streams; i++) {
2585         OutputStream *ost = output_streams[i];
2586 
2587         if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2588             exit_program(1);
2589         if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2590             || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2591             continue;
2592 
2593         do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2594     }
2595 
2596 out:
2597     if (free_sub)
2598         avsubtitle_free(&subtitle);
2599     return ret;
2600 }
2601 
send_filter_eof(InputStream * ist)2602 static int send_filter_eof(InputStream *ist)
2603 {
2604     int i, ret;
2605     /* TODO keep pts also in stream time base to avoid converting back */
2606     int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2607                                    AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
2608 
2609     for (i = 0; i < ist->nb_filters; i++) {
2610         ret = ifilter_send_eof(ist->filters[i], pts);
2611         if (ret < 0)
2612             return ret;
2613     }
2614     return 0;
2615 }
2616 
2617 /* pkt = NULL means EOF (needed to flush decoder buffers) */
process_input_packet(InputStream * ist,const AVPacket * pkt,int no_eof)2618 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2619 {
2620     int ret = 0, i;
2621     int repeating = 0;
2622     int eof_reached = 0;
2623 
2624     AVPacket *avpkt;
2625 
2626     if (!ist->pkt && !(ist->pkt = av_packet_alloc()))
2627         return AVERROR(ENOMEM);
2628     avpkt = ist->pkt;
2629 
2630     if (!ist->saw_first_ts) {
2631         ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2632         ist->pts = 0;
2633         if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2634             ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2635             ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2636         }
2637         ist->saw_first_ts = 1;
2638     }
2639 
2640     if (ist->next_dts == AV_NOPTS_VALUE)
2641         ist->next_dts = ist->dts;
2642     if (ist->next_pts == AV_NOPTS_VALUE)
2643         ist->next_pts = ist->pts;
2644 
2645     if (pkt) {
2646         av_packet_unref(avpkt);
2647         ret = av_packet_ref(avpkt, pkt);
2648         if (ret < 0)
2649             return ret;
2650     }
2651 
2652     if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2653         ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2654         if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2655             ist->next_pts = ist->pts = ist->dts;
2656     }
2657 
2658     // while we have more to decode or while the decoder did output something on EOF
2659     while (ist->decoding_needed) {
2660         int64_t duration_dts = 0;
2661         int64_t duration_pts = 0;
2662         int got_output = 0;
2663         int decode_failed = 0;
2664 
2665         ist->pts = ist->next_pts;
2666         ist->dts = ist->next_dts;
2667 
2668         switch (ist->dec_ctx->codec_type) {
2669         case AVMEDIA_TYPE_AUDIO:
2670             ret = decode_audio    (ist, repeating ? NULL : avpkt, &got_output,
2671                                    &decode_failed);
2672             av_packet_unref(avpkt);
2673             break;
2674         case AVMEDIA_TYPE_VIDEO:
2675             ret = decode_video    (ist, repeating ? NULL : avpkt, &got_output, &duration_pts, !pkt,
2676                                    &decode_failed);
2677             if (!repeating || !pkt || got_output) {
2678                 if (pkt && pkt->duration) {
2679                     duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2680                 } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2681                     int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict+1 : ist->dec_ctx->ticks_per_frame;
2682                     duration_dts = ((int64_t)AV_TIME_BASE *
2683                                     ist->dec_ctx->framerate.den * ticks) /
2684                                     ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2685                 }
2686 
2687                 if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2688                     ist->next_dts += duration_dts;
2689                 }else
2690                     ist->next_dts = AV_NOPTS_VALUE;
2691             }
2692 
2693             if (got_output) {
2694                 if (duration_pts > 0) {
2695                     ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2696                 } else {
2697                     ist->next_pts += duration_dts;
2698                 }
2699             }
2700             av_packet_unref(avpkt);
2701             break;
2702         case AVMEDIA_TYPE_SUBTITLE:
2703             if (repeating)
2704                 break;
2705             ret = transcode_subtitles(ist, avpkt, &got_output, &decode_failed);
2706             if (!pkt && ret >= 0)
2707                 ret = AVERROR_EOF;
2708             av_packet_unref(avpkt);
2709             break;
2710         default:
2711             return -1;
2712         }
2713 
2714         if (ret == AVERROR_EOF) {
2715             eof_reached = 1;
2716             break;
2717         }
2718 
2719         if (ret < 0) {
2720             if (decode_failed) {
2721                 av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2722                        ist->file_index, ist->st->index, av_err2str(ret));
2723             } else {
2724                 av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2725                        "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2726             }
2727             if (!decode_failed || exit_on_error)
2728                 exit_program(1);
2729             break;
2730         }
2731 
2732         if (got_output)
2733             ist->got_output = 1;
2734 
2735         if (!got_output)
2736             break;
2737 
2738         // During draining, we might get multiple output frames in this loop.
2739         // ffmpeg.c does not drain the filter chain on configuration changes,
2740         // which means if we send multiple frames at once to the filters, and
2741         // one of those frames changes configuration, the buffered frames will
2742         // be lost. This can upset certain FATE tests.
2743         // Decode only 1 frame per call on EOF to appease these FATE tests.
2744         // The ideal solution would be to rewrite decoding to use the new
2745         // decoding API in a better way.
2746         if (!pkt)
2747             break;
2748 
2749         repeating = 1;
2750     }
2751 
2752     /* after flushing, send an EOF on all the filter inputs attached to the stream */
2753     /* except when looping we need to flush but not to send an EOF */
2754     if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2755         int ret = send_filter_eof(ist);
2756         if (ret < 0) {
2757             av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2758             exit_program(1);
2759         }
2760     }
2761 
2762     /* handle stream copy */
2763     if (!ist->decoding_needed && pkt) {
2764         ist->dts = ist->next_dts;
2765         switch (ist->dec_ctx->codec_type) {
2766         case AVMEDIA_TYPE_AUDIO:
2767             av_assert1(pkt->duration >= 0);
2768             if (ist->dec_ctx->sample_rate) {
2769                 ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2770                                   ist->dec_ctx->sample_rate;
2771             } else {
2772                 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2773             }
2774             break;
2775         case AVMEDIA_TYPE_VIDEO:
2776             if (ist->framerate.num) {
2777                 // TODO: Remove work-around for c99-to-c89 issue 7
2778                 AVRational time_base_q = AV_TIME_BASE_Q;
2779                 int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2780                 ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2781             } else if (pkt->duration) {
2782                 ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2783             } else if(ist->dec_ctx->framerate.num != 0) {
2784                 int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2785                 ist->next_dts += ((int64_t)AV_TIME_BASE *
2786                                   ist->dec_ctx->framerate.den * ticks) /
2787                                   ist->dec_ctx->framerate.num / ist->dec_ctx->ticks_per_frame;
2788             }
2789             break;
2790         }
2791         ist->pts = ist->dts;
2792         ist->next_pts = ist->next_dts;
2793     }
2794     for (i = 0; i < nb_output_streams; i++) {
2795         OutputStream *ost = output_streams[i];
2796 
2797         if (!ost->pkt && !(ost->pkt = av_packet_alloc()))
2798             exit_program(1);
2799         if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2800             continue;
2801 
2802         do_streamcopy(ist, ost, pkt);
2803     }
2804 
2805     return !eof_reached;
2806 }
2807 
print_sdp(void)2808 static void print_sdp(void)
2809 {
2810     char sdp[16384];
2811     int i;
2812     int j;
2813     AVIOContext *sdp_pb;
2814     AVFormatContext **avc;
2815 
2816     for (i = 0; i < nb_output_files; i++) {
2817         if (!output_files[i]->header_written)
2818             return;
2819     }
2820 
2821     avc = av_malloc_array(nb_output_files, sizeof(*avc));
2822     if (!avc)
2823         exit_program(1);
2824     for (i = 0, j = 0; i < nb_output_files; i++) {
2825         if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2826             avc[j] = output_files[i]->ctx;
2827             j++;
2828         }
2829     }
2830 
2831     if (!j)
2832         goto fail;
2833 
2834     av_sdp_create(avc, j, sdp, sizeof(sdp));
2835 
2836     if (!sdp_filename) {
2837         printf("SDP:\n%s\n", sdp);
2838         fflush(stdout);
2839     } else {
2840         if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2841             av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2842         } else {
2843             avio_print(sdp_pb, sdp);
2844             avio_closep(&sdp_pb);
2845             av_freep(&sdp_filename);
2846         }
2847     }
2848 
2849 fail:
2850     av_freep(&avc);
2851 }
2852 
get_format(AVCodecContext * s,const enum AVPixelFormat * pix_fmts)2853 static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
2854 {
2855     InputStream *ist = s->opaque;
2856     const enum AVPixelFormat *p;
2857     int ret;
2858 
2859     for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2860         const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(*p);
2861         const AVCodecHWConfig  *config = NULL;
2862         int i;
2863 
2864         if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2865             break;
2866 
2867         if (ist->hwaccel_id == HWACCEL_GENERIC ||
2868             ist->hwaccel_id == HWACCEL_AUTO) {
2869             for (i = 0;; i++) {
2870                 config = avcodec_get_hw_config(s->codec, i);
2871                 if (!config)
2872                     break;
2873                 if (!(config->methods &
2874                       AV_CODEC_HW_CONFIG_METHOD_HW_DEVICE_CTX))
2875                     continue;
2876                 if (config->pix_fmt == *p)
2877                     break;
2878             }
2879         }
2880         if (config) {
2881             if (config->device_type != ist->hwaccel_device_type) {
2882                 // Different hwaccel offered, ignore.
2883                 continue;
2884             }
2885 
2886             ret = hwaccel_decode_init(s);
2887             if (ret < 0) {
2888                 if (ist->hwaccel_id == HWACCEL_GENERIC) {
2889                     av_log(NULL, AV_LOG_FATAL,
2890                            "%s hwaccel requested for input stream #%d:%d, "
2891                            "but cannot be initialized.\n",
2892                            av_hwdevice_get_type_name(config->device_type),
2893                            ist->file_index, ist->st->index);
2894                     return AV_PIX_FMT_NONE;
2895                 }
2896                 continue;
2897             }
2898         } else {
2899             const HWAccel *hwaccel = NULL;
2900             int i;
2901             for (i = 0; hwaccels[i].name; i++) {
2902                 if (hwaccels[i].pix_fmt == *p) {
2903                     hwaccel = &hwaccels[i];
2904                     break;
2905                 }
2906             }
2907             if (!hwaccel) {
2908                 // No hwaccel supporting this pixfmt.
2909                 continue;
2910             }
2911             if (hwaccel->id != ist->hwaccel_id) {
2912                 // Does not match requested hwaccel.
2913                 continue;
2914             }
2915 
2916             ret = hwaccel->init(s);
2917             if (ret < 0) {
2918                 av_log(NULL, AV_LOG_FATAL,
2919                        "%s hwaccel requested for input stream #%d:%d, "
2920                        "but cannot be initialized.\n", hwaccel->name,
2921                        ist->file_index, ist->st->index);
2922                 return AV_PIX_FMT_NONE;
2923             }
2924         }
2925 
2926         if (ist->hw_frames_ctx) {
2927             s->hw_frames_ctx = av_buffer_ref(ist->hw_frames_ctx);
2928             if (!s->hw_frames_ctx)
2929                 return AV_PIX_FMT_NONE;
2930         }
2931 
2932         ist->hwaccel_pix_fmt = *p;
2933         break;
2934     }
2935 
2936     return *p;
2937 }
2938 
get_buffer(AVCodecContext * s,AVFrame * frame,int flags)2939 static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
2940 {
2941     InputStream *ist = s->opaque;
2942 
2943     if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2944         return ist->hwaccel_get_buffer(s, frame, flags);
2945 
2946     return avcodec_default_get_buffer2(s, frame, flags);
2947 }
2948 
init_input_stream(int ist_index,char * error,int error_len)2949 static int init_input_stream(int ist_index, char *error, int error_len)
2950 {
2951     int ret;
2952     InputStream *ist = input_streams[ist_index];
2953 
2954     if (ist->decoding_needed) {
2955         const AVCodec *codec = ist->dec;
2956         if (!codec) {
2957             snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2958                     avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2959             return AVERROR(EINVAL);
2960         }
2961 
2962         ist->dec_ctx->opaque                = ist;
2963         ist->dec_ctx->get_format            = get_format;
2964         ist->dec_ctx->get_buffer2           = get_buffer;
2965 #if LIBAVCODEC_VERSION_MAJOR < 60
2966         ist->dec_ctx->thread_safe_callbacks = 1;
2967 #endif
2968 
2969         if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2970            (ist->decoding_needed & DECODING_FOR_OST)) {
2971             av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2972             if (ist->decoding_needed & DECODING_FOR_FILTER)
2973                 av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2974         }
2975 
2976         av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2977 
2978         /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2979          * audio, and video decoders such as cuvid or mediacodec */
2980         ist->dec_ctx->pkt_timebase = ist->st->time_base;
2981 
2982         if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2983             av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2984         /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2985         if (ist->st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2986             av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2987 
2988         ret = hw_device_setup_for_decode(ist);
2989         if (ret < 0) {
2990             snprintf(error, error_len, "Device setup failed for "
2991                      "decoder on input stream #%d:%d : %s",
2992                      ist->file_index, ist->st->index, av_err2str(ret));
2993             return ret;
2994         }
2995 
2996         if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2997             if (ret == AVERROR_EXPERIMENTAL)
2998                 abort_codec_experimental(codec, 0);
2999 
3000             snprintf(error, error_len,
3001                      "Error while opening decoder for input stream "
3002                      "#%d:%d : %s",
3003                      ist->file_index, ist->st->index, av_err2str(ret));
3004             return ret;
3005         }
3006         assert_avoptions(ist->decoder_opts);
3007     }
3008 
3009     ist->next_pts = AV_NOPTS_VALUE;
3010     ist->next_dts = AV_NOPTS_VALUE;
3011 
3012     return 0;
3013 }
3014 
get_input_stream(OutputStream * ost)3015 static InputStream *get_input_stream(OutputStream *ost)
3016 {
3017     if (ost->source_index >= 0)
3018         return input_streams[ost->source_index];
3019     return NULL;
3020 }
3021 
compare_int64(const void * a,const void * b)3022 static int compare_int64(const void *a, const void *b)
3023 {
3024     return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
3025 }
3026 
3027 /* open the muxer when all the streams are initialized */
check_init_output_file(OutputFile * of,int file_index)3028 static int check_init_output_file(OutputFile *of, int file_index)
3029 {
3030     int ret, i;
3031 
3032     for (i = 0; i < of->ctx->nb_streams; i++) {
3033         OutputStream *ost = output_streams[of->ost_index + i];
3034         if (!ost->initialized)
3035             return 0;
3036     }
3037 
3038     of->ctx->interrupt_callback = int_cb;
3039 
3040     ret = avformat_write_header(of->ctx, &of->opts);
3041     if (ret < 0) {
3042         av_log(NULL, AV_LOG_ERROR,
3043                "Could not write header for output file #%d "
3044                "(incorrect codec parameters ?): %s\n",
3045                file_index, av_err2str(ret));
3046         return ret;
3047     }
3048     //assert_avoptions(of->opts);
3049     of->header_written = 1;
3050 
3051     av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3052     nb_output_dumped++;
3053 
3054     if (sdp_filename || want_sdp)
3055         print_sdp();
3056 
3057     /* flush the muxing queues */
3058     for (i = 0; i < of->ctx->nb_streams; i++) {
3059         OutputStream *ost = output_streams[of->ost_index + i];
3060 
3061         /* try to improve muxing time_base (only possible if nothing has been written yet) */
3062         if (!av_fifo_size(ost->muxing_queue))
3063             ost->mux_timebase = ost->st->time_base;
3064 
3065         while (av_fifo_size(ost->muxing_queue)) {
3066             AVPacket *pkt;
3067             av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3068             ost->muxing_queue_data_size -= pkt->size;
3069             write_packet(of, pkt, ost, 1);
3070             av_packet_free(&pkt);
3071         }
3072     }
3073 
3074     return 0;
3075 }
3076 
init_output_bsfs(OutputStream * ost)3077 static int init_output_bsfs(OutputStream *ost)
3078 {
3079     AVBSFContext *ctx = ost->bsf_ctx;
3080     int ret;
3081 
3082     if (!ctx)
3083         return 0;
3084 
3085     ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3086     if (ret < 0)
3087         return ret;
3088 
3089     ctx->time_base_in = ost->st->time_base;
3090 
3091     ret = av_bsf_init(ctx);
3092     if (ret < 0) {
3093         av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3094                ctx->filter->name);
3095         return ret;
3096     }
3097 
3098     ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3099     if (ret < 0)
3100         return ret;
3101     ost->st->time_base = ctx->time_base_out;
3102 
3103     return 0;
3104 }
3105 
init_output_stream_streamcopy(OutputStream * ost)3106 static int init_output_stream_streamcopy(OutputStream *ost)
3107 {
3108     OutputFile *of = output_files[ost->file_index];
3109     InputStream *ist = get_input_stream(ost);
3110     AVCodecParameters *par_dst = ost->st->codecpar;
3111     AVCodecParameters *par_src = ost->ref_par;
3112     AVRational sar;
3113     int i, ret;
3114     uint32_t codec_tag = par_dst->codec_tag;
3115 
3116     av_assert0(ist && !ost->filter);
3117 
3118     ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3119     if (ret >= 0)
3120         ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3121     if (ret < 0) {
3122         av_log(NULL, AV_LOG_FATAL,
3123                "Error setting up codec context options.\n");
3124         return ret;
3125     }
3126 
3127     ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3128     if (ret < 0) {
3129         av_log(NULL, AV_LOG_FATAL,
3130                "Error getting reference codec parameters.\n");
3131         return ret;
3132     }
3133 
3134     if (!codec_tag) {
3135         unsigned int codec_tag_tmp;
3136         if (!of->ctx->oformat->codec_tag ||
3137             av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3138             !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3139             codec_tag = par_src->codec_tag;
3140     }
3141 
3142     ret = avcodec_parameters_copy(par_dst, par_src);
3143     if (ret < 0)
3144         return ret;
3145 
3146     par_dst->codec_tag = codec_tag;
3147 
3148     if (!ost->frame_rate.num)
3149         ost->frame_rate = ist->framerate;
3150     ost->st->avg_frame_rate = ost->frame_rate;
3151 
3152     ret = avformat_transfer_internal_stream_timing_info(of->ctx->oformat, ost->st, ist->st, copy_tb);
3153     if (ret < 0)
3154         return ret;
3155 
3156     // copy timebase while removing common factors
3157     if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3158         ost->st->time_base = av_add_q(av_stream_get_codec_timebase(ost->st), (AVRational){0, 1});
3159 
3160     // copy estimated duration as a hint to the muxer
3161     if (ost->st->duration <= 0 && ist->st->duration > 0)
3162         ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3163 
3164     // copy disposition
3165     ost->st->disposition = ist->st->disposition;
3166 
3167     if (ist->st->nb_side_data) {
3168         for (i = 0; i < ist->st->nb_side_data; i++) {
3169             const AVPacketSideData *sd_src = &ist->st->side_data[i];
3170             uint8_t *dst_data;
3171 
3172             dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3173             if (!dst_data)
3174                 return AVERROR(ENOMEM);
3175             memcpy(dst_data, sd_src->data, sd_src->size);
3176         }
3177     }
3178 
3179     if (ost->rotate_overridden) {
3180         uint8_t *sd = av_stream_new_side_data(ost->st, AV_PKT_DATA_DISPLAYMATRIX,
3181                                               sizeof(int32_t) * 9);
3182         if (sd)
3183             av_display_rotation_set((int32_t *)sd, -ost->rotate_override_value);
3184     }
3185 
3186     switch (par_dst->codec_type) {
3187     case AVMEDIA_TYPE_AUDIO:
3188         if (audio_volume != 256) {
3189             av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3190             exit_program(1);
3191         }
3192         if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3193             par_dst->block_align= 0;
3194         if(par_dst->codec_id == AV_CODEC_ID_AC3)
3195             par_dst->block_align= 0;
3196         break;
3197     case AVMEDIA_TYPE_VIDEO:
3198         if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3199             sar =
3200                 av_mul_q(ost->frame_aspect_ratio,
3201                          (AVRational){ par_dst->height, par_dst->width });
3202             av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3203                    "with stream copy may produce invalid files\n");
3204             }
3205         else if (ist->st->sample_aspect_ratio.num)
3206             sar = ist->st->sample_aspect_ratio;
3207         else
3208             sar = par_src->sample_aspect_ratio;
3209         ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3210         ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3211         ost->st->r_frame_rate = ist->st->r_frame_rate;
3212         break;
3213     }
3214 
3215     ost->mux_timebase = ist->st->time_base;
3216 
3217     return 0;
3218 }
3219 
set_encoder_id(OutputFile * of,OutputStream * ost)3220 static void set_encoder_id(OutputFile *of, OutputStream *ost)
3221 {
3222     AVDictionaryEntry *e;
3223 
3224     uint8_t *encoder_string;
3225     int encoder_string_len;
3226     int format_flags = 0;
3227     int codec_flags = ost->enc_ctx->flags;
3228 
3229     if (av_dict_get(ost->st->metadata, "encoder",  NULL, 0))
3230         return;
3231 
3232     e = av_dict_get(of->opts, "fflags", NULL, 0);
3233     if (e) {
3234         const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3235         if (!o)
3236             return;
3237         av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3238     }
3239     e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3240     if (e) {
3241         const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3242         if (!o)
3243             return;
3244         av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3245     }
3246 
3247     encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3248     encoder_string     = av_mallocz(encoder_string_len);
3249     if (!encoder_string)
3250         exit_program(1);
3251 
3252     if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3253         av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3254     else
3255         av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3256     av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3257     av_dict_set(&ost->st->metadata, "encoder",  encoder_string,
3258                 AV_DICT_DONT_STRDUP_VAL | AV_DICT_DONT_OVERWRITE);
3259 }
3260 
parse_forced_key_frames(char * kf,OutputStream * ost,AVCodecContext * avctx)3261 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3262                                     AVCodecContext *avctx)
3263 {
3264     char *p;
3265     int n = 1, i, size, index = 0;
3266     int64_t t, *pts;
3267 
3268     for (p = kf; *p; p++)
3269         if (*p == ',')
3270             n++;
3271     size = n;
3272     pts = av_malloc_array(size, sizeof(*pts));
3273     if (!pts) {
3274         av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3275         exit_program(1);
3276     }
3277 
3278     p = kf;
3279     for (i = 0; i < n; i++) {
3280         char *next = strchr(p, ',');
3281 
3282         if (next)
3283             *next++ = 0;
3284 
3285         if (!memcmp(p, "chapters", 8)) {
3286 
3287             AVFormatContext *avf = output_files[ost->file_index]->ctx;
3288             int j;
3289 
3290             if (avf->nb_chapters > INT_MAX - size ||
3291                 !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3292                                      sizeof(*pts)))) {
3293                 av_log(NULL, AV_LOG_FATAL,
3294                        "Could not allocate forced key frames array.\n");
3295                 exit_program(1);
3296             }
3297             t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3298             t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3299 
3300             for (j = 0; j < avf->nb_chapters; j++) {
3301                 AVChapter *c = avf->chapters[j];
3302                 av_assert1(index < size);
3303                 pts[index++] = av_rescale_q(c->start, c->time_base,
3304                                             avctx->time_base) + t;
3305             }
3306 
3307         } else {
3308 
3309             t = parse_time_or_die("force_key_frames", p, 1);
3310             av_assert1(index < size);
3311             pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3312 
3313         }
3314 
3315         p = next;
3316     }
3317 
3318     av_assert0(index == size);
3319     qsort(pts, size, sizeof(*pts), compare_int64);
3320     ost->forced_kf_count = size;
3321     ost->forced_kf_pts   = pts;
3322 }
3323 
init_encoder_time_base(OutputStream * ost,AVRational default_time_base)3324 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3325 {
3326     InputStream *ist = get_input_stream(ost);
3327     AVCodecContext *enc_ctx = ost->enc_ctx;
3328     AVFormatContext *oc;
3329 
3330     if (ost->enc_timebase.num > 0) {
3331         enc_ctx->time_base = ost->enc_timebase;
3332         return;
3333     }
3334 
3335     if (ost->enc_timebase.num < 0) {
3336         if (ist) {
3337             enc_ctx->time_base = ist->st->time_base;
3338             return;
3339         }
3340 
3341         oc = output_files[ost->file_index]->ctx;
3342         av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3343     }
3344 
3345     enc_ctx->time_base = default_time_base;
3346 }
3347 
init_output_stream_encode(OutputStream * ost,AVFrame * frame)3348 static int init_output_stream_encode(OutputStream *ost, AVFrame *frame)
3349 {
3350     InputStream *ist = get_input_stream(ost);
3351     AVCodecContext *enc_ctx = ost->enc_ctx;
3352     AVCodecContext *dec_ctx = NULL;
3353     AVFormatContext *oc = output_files[ost->file_index]->ctx;
3354     int j, ret;
3355 
3356     set_encoder_id(output_files[ost->file_index], ost);
3357 
3358     // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3359     // hand, the legacy API makes demuxers set "rotate" metadata entries,
3360     // which have to be filtered out to prevent leaking them to output files.
3361     av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3362 
3363     if (ist) {
3364         ost->st->disposition          = ist->st->disposition;
3365 
3366         dec_ctx = ist->dec_ctx;
3367 
3368         enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3369     } else {
3370         for (j = 0; j < oc->nb_streams; j++) {
3371             AVStream *st = oc->streams[j];
3372             if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3373                 break;
3374         }
3375         if (j == oc->nb_streams)
3376             if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3377                 ost->st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
3378                 ost->st->disposition = AV_DISPOSITION_DEFAULT;
3379     }
3380 
3381     if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3382         if (!ost->frame_rate.num)
3383             ost->frame_rate = av_buffersink_get_frame_rate(ost->filter->filter);
3384         if (ist && !ost->frame_rate.num)
3385             ost->frame_rate = ist->framerate;
3386         if (ist && !ost->frame_rate.num)
3387             ost->frame_rate = ist->st->r_frame_rate;
3388         if (ist && !ost->frame_rate.num && !ost->max_frame_rate.num) {
3389             ost->frame_rate = (AVRational){25, 1};
3390             av_log(NULL, AV_LOG_WARNING,
3391                    "No information "
3392                    "about the input framerate is available. Falling "
3393                    "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3394                    "if you want a different framerate.\n",
3395                    ost->file_index, ost->index);
3396         }
3397 
3398         if (ost->max_frame_rate.num &&
3399             (av_q2d(ost->frame_rate) > av_q2d(ost->max_frame_rate) ||
3400             !ost->frame_rate.den))
3401             ost->frame_rate = ost->max_frame_rate;
3402 
3403         if (ost->enc->supported_framerates && !ost->force_fps) {
3404             int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3405             ost->frame_rate = ost->enc->supported_framerates[idx];
3406         }
3407         // reduce frame rate for mpeg4 to be within the spec limits
3408         if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3409             av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3410                       ost->frame_rate.num, ost->frame_rate.den, 65535);
3411         }
3412     }
3413 
3414     switch (enc_ctx->codec_type) {
3415     case AVMEDIA_TYPE_AUDIO:
3416         enc_ctx->sample_fmt     = av_buffersink_get_format(ost->filter->filter);
3417         if (dec_ctx)
3418             enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3419                                                  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3420         enc_ctx->sample_rate    = av_buffersink_get_sample_rate(ost->filter->filter);
3421         enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
3422         enc_ctx->channels       = av_buffersink_get_channels(ost->filter->filter);
3423 
3424         init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3425         break;
3426 
3427     case AVMEDIA_TYPE_VIDEO:
3428         init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
3429 
3430         if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3431             enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
3432         if (   av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3433            && (video_sync_method == VSYNC_CFR || video_sync_method == VSYNC_VSCFR || (video_sync_method == VSYNC_AUTO && !(oc->oformat->flags & AVFMT_VARIABLE_FPS)))){
3434             av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3435                                        "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3436         }
3437 
3438         enc_ctx->width  = av_buffersink_get_w(ost->filter->filter);
3439         enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3440         enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3441             ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3442             av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3443             av_buffersink_get_sample_aspect_ratio(ost->filter->filter);
3444 
3445         enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3446         if (dec_ctx)
3447             enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3448                                                  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3449 
3450         if (frame) {
3451             enc_ctx->color_range            = frame->color_range;
3452             enc_ctx->color_primaries        = frame->color_primaries;
3453             enc_ctx->color_trc              = frame->color_trc;
3454             enc_ctx->colorspace             = frame->colorspace;
3455             enc_ctx->chroma_sample_location = frame->chroma_location;
3456         }
3457 
3458         enc_ctx->framerate = ost->frame_rate;
3459 
3460         ost->st->avg_frame_rate = ost->frame_rate;
3461 
3462         if (!dec_ctx ||
3463             enc_ctx->width   != dec_ctx->width  ||
3464             enc_ctx->height  != dec_ctx->height ||
3465             enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3466             enc_ctx->bits_per_raw_sample = frame_bits_per_raw_sample;
3467         }
3468 
3469         // Field order: autodetection
3470         if (frame) {
3471             if (enc_ctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT | AV_CODEC_FLAG_INTERLACED_ME) &&
3472                 ost->top_field_first >= 0)
3473                 frame->top_field_first = !!ost->top_field_first;
3474 
3475             if (frame->interlaced_frame) {
3476                 if (enc_ctx->codec->id == AV_CODEC_ID_MJPEG)
3477                     enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
3478                 else
3479                     enc_ctx->field_order = frame->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
3480             } else
3481                 enc_ctx->field_order = AV_FIELD_PROGRESSIVE;
3482         }
3483 
3484         // Field order: override
3485         if (ost->top_field_first == 0) {
3486             enc_ctx->field_order = AV_FIELD_BB;
3487         } else if (ost->top_field_first == 1) {
3488             enc_ctx->field_order = AV_FIELD_TT;
3489         }
3490 
3491         if (ost->forced_keyframes) {
3492             if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3493                 ret = av_expr_parse(&ost->forced_keyframes_pexpr, ost->forced_keyframes+5,
3494                                     forced_keyframes_const_names, NULL, NULL, NULL, NULL, 0, NULL);
3495                 if (ret < 0) {
3496                     av_log(NULL, AV_LOG_ERROR,
3497                            "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3498                     return ret;
3499                 }
3500                 ost->forced_keyframes_expr_const_values[FKF_N] = 0;
3501                 ost->forced_keyframes_expr_const_values[FKF_N_FORCED] = 0;
3502                 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_N] = NAN;
3503                 ost->forced_keyframes_expr_const_values[FKF_PREV_FORCED_T] = NAN;
3504 
3505                 // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3506                 // parse it only for static kf timings
3507             } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3508                 parse_forced_key_frames(ost->forced_keyframes, ost, ost->enc_ctx);
3509             }
3510         }
3511         break;
3512     case AVMEDIA_TYPE_SUBTITLE:
3513         enc_ctx->time_base = AV_TIME_BASE_Q;
3514         if (!enc_ctx->width) {
3515             enc_ctx->width     = input_streams[ost->source_index]->st->codecpar->width;
3516             enc_ctx->height    = input_streams[ost->source_index]->st->codecpar->height;
3517         }
3518         break;
3519     case AVMEDIA_TYPE_DATA:
3520         break;
3521     default:
3522         abort();
3523         break;
3524     }
3525 
3526     ost->mux_timebase = enc_ctx->time_base;
3527 
3528     return 0;
3529 }
3530 
init_output_stream(OutputStream * ost,AVFrame * frame,char * error,int error_len)3531 static int init_output_stream(OutputStream *ost, AVFrame *frame,
3532                               char *error, int error_len)
3533 {
3534     int ret = 0;
3535 
3536     if (ost->encoding_needed) {
3537         const AVCodec *codec = ost->enc;
3538         AVCodecContext *dec = NULL;
3539         InputStream *ist;
3540 
3541         ret = init_output_stream_encode(ost, frame);
3542         if (ret < 0)
3543             return ret;
3544 
3545         if ((ist = get_input_stream(ost)))
3546             dec = ist->dec_ctx;
3547         if (dec && dec->subtitle_header) {
3548             /* ASS code assumes this buffer is null terminated so add extra byte. */
3549             ost->enc_ctx->subtitle_header = av_mallocz(dec->subtitle_header_size + 1);
3550             if (!ost->enc_ctx->subtitle_header)
3551                 return AVERROR(ENOMEM);
3552             memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3553             ost->enc_ctx->subtitle_header_size = dec->subtitle_header_size;
3554         }
3555         if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3556             av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3557         if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3558             !codec->defaults &&
3559             !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3560             !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3561             av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3562 
3563         ret = hw_device_setup_for_encode(ost);
3564         if (ret < 0) {
3565             snprintf(error, error_len, "Device setup failed for "
3566                      "encoder on output stream #%d:%d : %s",
3567                      ost->file_index, ost->index, av_err2str(ret));
3568             return ret;
3569         }
3570 
3571         if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3572             int input_props = 0, output_props = 0;
3573             AVCodecDescriptor const *input_descriptor =
3574                 avcodec_descriptor_get(dec->codec_id);
3575             AVCodecDescriptor const *output_descriptor =
3576                 avcodec_descriptor_get(ost->enc_ctx->codec_id);
3577             if (input_descriptor)
3578                 input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3579             if (output_descriptor)
3580                 output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3581             if (input_props && output_props && input_props != output_props) {
3582                 snprintf(error, error_len,
3583                          "Subtitle encoding currently only possible from text to text "
3584                          "or bitmap to bitmap");
3585                 return AVERROR_INVALIDDATA;
3586             }
3587         }
3588 
3589         if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3590             if (ret == AVERROR_EXPERIMENTAL)
3591                 abort_codec_experimental(codec, 1);
3592             snprintf(error, error_len,
3593                      "Error while opening encoder for output stream #%d:%d - "
3594                      "maybe incorrect parameters such as bit_rate, rate, width or height",
3595                     ost->file_index, ost->index);
3596             return ret;
3597         }
3598         if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3599             !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3600             av_buffersink_set_frame_size(ost->filter->filter,
3601                                             ost->enc_ctx->frame_size);
3602         assert_avoptions(ost->encoder_opts);
3603         if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3604             ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3605             av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3606                                          " It takes bits/s as argument, not kbits/s\n");
3607 
3608         ret = avcodec_parameters_from_context(ost->st->codecpar, ost->enc_ctx);
3609         if (ret < 0) {
3610             av_log(NULL, AV_LOG_FATAL,
3611                    "Error initializing the output stream codec context.\n");
3612             exit_program(1);
3613         }
3614 
3615         if (ost->enc_ctx->nb_coded_side_data) {
3616             int i;
3617 
3618             for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3619                 const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3620                 uint8_t *dst_data;
3621 
3622                 dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3623                 if (!dst_data)
3624                     return AVERROR(ENOMEM);
3625                 memcpy(dst_data, sd_src->data, sd_src->size);
3626             }
3627         }
3628 
3629         /*
3630          * Add global input side data. For now this is naive, and copies it
3631          * from the input stream's global side data. All side data should
3632          * really be funneled over AVFrame and libavfilter, then added back to
3633          * packet side data, and then potentially using the first packet for
3634          * global side data.
3635          */
3636         if (ist) {
3637             int i;
3638             for (i = 0; i < ist->st->nb_side_data; i++) {
3639                 AVPacketSideData *sd = &ist->st->side_data[i];
3640                 if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3641                     uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3642                     if (!dst)
3643                         return AVERROR(ENOMEM);
3644                     memcpy(dst, sd->data, sd->size);
3645                     if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3646                         av_display_rotation_set((uint32_t *)dst, 0);
3647                 }
3648             }
3649         }
3650 
3651         // copy timebase while removing common factors
3652         if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3653             ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3654 
3655         // copy estimated duration as a hint to the muxer
3656         if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3657             ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3658     } else if (ost->stream_copy) {
3659         ret = init_output_stream_streamcopy(ost);
3660         if (ret < 0)
3661             return ret;
3662     }
3663 
3664     // parse user provided disposition, and update stream values
3665     if (ost->disposition) {
3666         static const AVOption opts[] = {
3667             { "disposition"         , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3668             { "default"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT           },    .unit = "flags" },
3669             { "dub"                 , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB               },    .unit = "flags" },
3670             { "original"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL          },    .unit = "flags" },
3671             { "comment"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT           },    .unit = "flags" },
3672             { "lyrics"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS            },    .unit = "flags" },
3673             { "karaoke"             , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE           },    .unit = "flags" },
3674             { "forced"              , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED            },    .unit = "flags" },
3675             { "hearing_impaired"    , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED  },    .unit = "flags" },
3676             { "visual_impaired"     , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED   },    .unit = "flags" },
3677             { "clean_effects"       , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS     },    .unit = "flags" },
3678             { "attached_pic"        , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC      },    .unit = "flags" },
3679             { "captions"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS          },    .unit = "flags" },
3680             { "descriptions"        , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS      },    .unit = "flags" },
3681             { "dependent"           , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT         },    .unit = "flags" },
3682             { "metadata"            , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA          },    .unit = "flags" },
3683             { NULL },
3684         };
3685         static const AVClass class = {
3686             .class_name = "",
3687             .item_name  = av_default_item_name,
3688             .option     = opts,
3689             .version    = LIBAVUTIL_VERSION_INT,
3690         };
3691         const AVClass *pclass = &class;
3692 
3693         ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3694         if (ret < 0)
3695             return ret;
3696     }
3697 
3698     /* initialize bitstream filters for the output stream
3699      * needs to be done here, because the codec id for streamcopy is not
3700      * known until now */
3701     ret = init_output_bsfs(ost);
3702     if (ret < 0)
3703         return ret;
3704 
3705     ost->initialized = 1;
3706 
3707     ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3708     if (ret < 0)
3709         return ret;
3710 
3711     return ret;
3712 }
3713 
report_new_stream(int input_index,AVPacket * pkt)3714 static void report_new_stream(int input_index, AVPacket *pkt)
3715 {
3716     InputFile *file = input_files[input_index];
3717     AVStream *st = file->ctx->streams[pkt->stream_index];
3718 
3719     if (pkt->stream_index < file->nb_streams_warn)
3720         return;
3721     av_log(file->ctx, AV_LOG_WARNING,
3722            "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3723            av_get_media_type_string(st->codecpar->codec_type),
3724            input_index, pkt->stream_index,
3725            pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3726     file->nb_streams_warn = pkt->stream_index + 1;
3727 }
3728 
transcode_init(void)3729 static int transcode_init(void)
3730 {
3731     int ret = 0, i, j, k;
3732     AVFormatContext *oc;
3733     OutputStream *ost;
3734     InputStream *ist;
3735     char error[1024] = {0};
3736 
3737     for (i = 0; i < nb_filtergraphs; i++) {
3738         FilterGraph *fg = filtergraphs[i];
3739         for (j = 0; j < fg->nb_outputs; j++) {
3740             OutputFilter *ofilter = fg->outputs[j];
3741             if (!ofilter->ost || ofilter->ost->source_index >= 0)
3742                 continue;
3743             if (fg->nb_inputs != 1)
3744                 continue;
3745             for (k = nb_input_streams-1; k >= 0 ; k--)
3746                 if (fg->inputs[0]->ist == input_streams[k])
3747                     break;
3748             ofilter->ost->source_index = k;
3749         }
3750     }
3751 
3752     /* init framerate emulation */
3753     for (i = 0; i < nb_input_files; i++) {
3754         InputFile *ifile = input_files[i];
3755         if (ifile->rate_emu)
3756             for (j = 0; j < ifile->nb_streams; j++)
3757                 input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3758     }
3759 
3760     /* init input streams */
3761     for (i = 0; i < nb_input_streams; i++)
3762         if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3763             for (i = 0; i < nb_output_streams; i++) {
3764                 ost = output_streams[i];
3765                 avcodec_close(ost->enc_ctx);
3766             }
3767             goto dump_format;
3768         }
3769 
3770     /*
3771      * initialize stream copy and subtitle/data streams.
3772      * Encoded AVFrame based streams will get initialized as follows:
3773      * - when the first AVFrame is received in do_video_out
3774      * - just before the first AVFrame is received in either transcode_step
3775      *   or reap_filters due to us requiring the filter chain buffer sink
3776      *   to be configured with the correct audio frame size, which is only
3777      *   known after the encoder is initialized.
3778      */
3779     for (i = 0; i < nb_output_streams; i++) {
3780         if (!output_streams[i]->stream_copy &&
3781             (output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
3782              output_streams[i]->enc_ctx->codec_type == AVMEDIA_TYPE_AUDIO))
3783             continue;
3784 
3785         ret = init_output_stream_wrapper(output_streams[i], NULL, 0);
3786         if (ret < 0)
3787             goto dump_format;
3788     }
3789 
3790     /* discard unused programs */
3791     for (i = 0; i < nb_input_files; i++) {
3792         InputFile *ifile = input_files[i];
3793         for (j = 0; j < ifile->ctx->nb_programs; j++) {
3794             AVProgram *p = ifile->ctx->programs[j];
3795             int discard  = AVDISCARD_ALL;
3796 
3797             for (k = 0; k < p->nb_stream_indexes; k++)
3798                 if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3799                     discard = AVDISCARD_DEFAULT;
3800                     break;
3801                 }
3802             p->discard = discard;
3803         }
3804     }
3805 
3806     /* write headers for files with no streams */
3807     for (i = 0; i < nb_output_files; i++) {
3808         oc = output_files[i]->ctx;
3809         if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3810             ret = check_init_output_file(output_files[i], i);
3811             if (ret < 0)
3812                 goto dump_format;
3813         }
3814     }
3815 
3816  dump_format:
3817     /* dump the stream mapping */
3818     av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3819     for (i = 0; i < nb_input_streams; i++) {
3820         ist = input_streams[i];
3821 
3822         for (j = 0; j < ist->nb_filters; j++) {
3823             if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3824                 av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d (%s) -> %s",
3825                        ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3826                        ist->filters[j]->name);
3827                 if (nb_filtergraphs > 1)
3828                     av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3829                 av_log(NULL, AV_LOG_INFO, "\n");
3830             }
3831         }
3832     }
3833 
3834     for (i = 0; i < nb_output_streams; i++) {
3835         ost = output_streams[i];
3836 
3837         if (ost->attachment_filename) {
3838             /* an attached file */
3839             av_log(NULL, AV_LOG_INFO, "  File %s -> Stream #%d:%d\n",
3840                    ost->attachment_filename, ost->file_index, ost->index);
3841             continue;
3842         }
3843 
3844         if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3845             /* output from a complex graph */
3846             av_log(NULL, AV_LOG_INFO, "  %s", ost->filter->name);
3847             if (nb_filtergraphs > 1)
3848                 av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3849 
3850             av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3851                    ost->index, ost->enc ? ost->enc->name : "?");
3852             continue;
3853         }
3854 
3855         av_log(NULL, AV_LOG_INFO, "  Stream #%d:%d -> #%d:%d",
3856                input_streams[ost->source_index]->file_index,
3857                input_streams[ost->source_index]->st->index,
3858                ost->file_index,
3859                ost->index);
3860         if (ost->sync_ist != input_streams[ost->source_index])
3861             av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3862                    ost->sync_ist->file_index,
3863                    ost->sync_ist->st->index);
3864         if (ost->stream_copy)
3865             av_log(NULL, AV_LOG_INFO, " (copy)");
3866         else {
3867             const AVCodec *in_codec    = input_streams[ost->source_index]->dec;
3868             const AVCodec *out_codec   = ost->enc;
3869             const char *decoder_name   = "?";
3870             const char *in_codec_name  = "?";
3871             const char *encoder_name   = "?";
3872             const char *out_codec_name = "?";
3873             const AVCodecDescriptor *desc;
3874 
3875             if (in_codec) {
3876                 decoder_name  = in_codec->name;
3877                 desc = avcodec_descriptor_get(in_codec->id);
3878                 if (desc)
3879                     in_codec_name = desc->name;
3880                 if (!strcmp(decoder_name, in_codec_name))
3881                     decoder_name = "native";
3882             }
3883 
3884             if (out_codec) {
3885                 encoder_name   = out_codec->name;
3886                 desc = avcodec_descriptor_get(out_codec->id);
3887                 if (desc)
3888                     out_codec_name = desc->name;
3889                 if (!strcmp(encoder_name, out_codec_name))
3890                     encoder_name = "native";
3891             }
3892 
3893             av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3894                    in_codec_name, decoder_name,
3895                    out_codec_name, encoder_name);
3896         }
3897         av_log(NULL, AV_LOG_INFO, "\n");
3898     }
3899 
3900     if (ret) {
3901         av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3902         return ret;
3903     }
3904 
3905     atomic_store(&transcode_init_done, 1);
3906 
3907     return 0;
3908 }
3909 
3910 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
need_output(void)3911 static int need_output(void)
3912 {
3913     int i;
3914 
3915     for (i = 0; i < nb_output_streams; i++) {
3916         OutputStream *ost    = output_streams[i];
3917         OutputFile *of       = output_files[ost->file_index];
3918         AVFormatContext *os  = output_files[ost->file_index]->ctx;
3919 
3920         if (ost->finished ||
3921             (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3922             continue;
3923         if (ost->frame_number >= ost->max_frames) {
3924             int j;
3925             for (j = 0; j < of->ctx->nb_streams; j++)
3926                 close_output_stream(output_streams[of->ost_index + j]);
3927             continue;
3928         }
3929 
3930         return 1;
3931     }
3932 
3933     return 0;
3934 }
3935 
3936 /**
3937  * Select the output stream to process.
3938  *
3939  * @return  selected output stream, or NULL if none available
3940  */
choose_output(void)3941 static OutputStream *choose_output(void)
3942 {
3943     int i;
3944     int64_t opts_min = INT64_MAX;
3945     OutputStream *ost_min = NULL;
3946 
3947     for (i = 0; i < nb_output_streams; i++) {
3948         OutputStream *ost = output_streams[i];
3949         int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3950                        av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3951                                     AV_TIME_BASE_Q);
3952         if (ost->st->cur_dts == AV_NOPTS_VALUE)
3953             av_log(NULL, AV_LOG_DEBUG,
3954                 "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3955                 ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3956 
3957         if (!ost->initialized && !ost->inputs_done)
3958             return ost->unavailable ? NULL : ost;
3959 
3960         if (!ost->finished && opts < opts_min) {
3961             opts_min = opts;
3962             ost_min  = ost->unavailable ? NULL : ost;
3963         }
3964     }
3965     return ost_min;
3966 }
3967 
set_tty_echo(int on)3968 static void set_tty_echo(int on)
3969 {
3970 #if HAVE_TERMIOS_H
3971     struct termios tty;
3972     if (tcgetattr(0, &tty) == 0) {
3973         if (on) tty.c_lflag |= ECHO;
3974         else    tty.c_lflag &= ~ECHO;
3975         tcsetattr(0, TCSANOW, &tty);
3976     }
3977 #endif
3978 }
3979 
check_keyboard_interaction(int64_t cur_time)3980 static int check_keyboard_interaction(int64_t cur_time)
3981 {
3982     int i, ret, key;
3983     static int64_t last_time;
3984     if (received_nb_signals)
3985         return AVERROR_EXIT;
3986     /* read_key() returns 0 on EOF */
3987     if(cur_time - last_time >= 100000 && !run_as_daemon){
3988         key =  read_key();
3989         last_time = cur_time;
3990     }else
3991         key = -1;
3992     if (key == 'q')
3993         return AVERROR_EXIT;
3994     if (key == '+') av_log_set_level(av_log_get_level()+10);
3995     if (key == '-') av_log_set_level(av_log_get_level()-10);
3996     if (key == 's') qp_hist     ^= 1;
3997     if (key == 'h'){
3998         if (do_hex_dump){
3999             do_hex_dump = do_pkt_dump = 0;
4000         } else if(do_pkt_dump){
4001             do_hex_dump = 1;
4002         } else
4003             do_pkt_dump = 1;
4004         av_log_set_level(AV_LOG_DEBUG);
4005     }
4006     if (key == 'c' || key == 'C'){
4007         char buf[4096], target[64], command[256], arg[256] = {0};
4008         double time;
4009         int k, n = 0;
4010         fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
4011         i = 0;
4012         set_tty_echo(1);
4013         while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4014             if (k > 0)
4015                 buf[i++] = k;
4016         buf[i] = 0;
4017         set_tty_echo(0);
4018         fprintf(stderr, "\n");
4019         if (k > 0 &&
4020             (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
4021             av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
4022                    target, time, command, arg);
4023             for (i = 0; i < nb_filtergraphs; i++) {
4024                 FilterGraph *fg = filtergraphs[i];
4025                 if (fg->graph) {
4026                     if (time < 0) {
4027                         ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
4028                                                           key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
4029                         fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
4030                     } else if (key == 'c') {
4031                         fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
4032                         ret = AVERROR_PATCHWELCOME;
4033                     } else {
4034                         ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
4035                         if (ret < 0)
4036                             fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
4037                     }
4038                 }
4039             }
4040         } else {
4041             av_log(NULL, AV_LOG_ERROR,
4042                    "Parse error, at least 3 arguments were expected, "
4043                    "only %d given in string '%s'\n", n, buf);
4044         }
4045     }
4046     if (key == 'd' || key == 'D'){
4047         int debug=0;
4048         if(key == 'D') {
4049             debug = input_streams[0]->dec_ctx->debug << 1;
4050             if(!debug) debug = 1;
4051             while (debug & FF_DEBUG_DCT_COEFF) //unsupported, would just crash
4052                 debug += debug;
4053         }else{
4054             char buf[32];
4055             int k = 0;
4056             i = 0;
4057             set_tty_echo(1);
4058             while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
4059                 if (k > 0)
4060                     buf[i++] = k;
4061             buf[i] = 0;
4062             set_tty_echo(0);
4063             fprintf(stderr, "\n");
4064             if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4065                 fprintf(stderr,"error parsing debug value\n");
4066         }
4067         for(i=0;i<nb_input_streams;i++) {
4068             input_streams[i]->dec_ctx->debug = debug;
4069         }
4070         for(i=0;i<nb_output_streams;i++) {
4071             OutputStream *ost = output_streams[i];
4072             ost->enc_ctx->debug = debug;
4073         }
4074         if(debug) av_log_set_level(AV_LOG_DEBUG);
4075         fprintf(stderr,"debug=%d\n", debug);
4076     }
4077     if (key == '?'){
4078         fprintf(stderr, "key    function\n"
4079                         "?      show this help\n"
4080                         "+      increase verbosity\n"
4081                         "-      decrease verbosity\n"
4082                         "c      Send command to first matching filter supporting it\n"
4083                         "C      Send/Queue command to all matching filters\n"
4084                         "D      cycle through available debug modes\n"
4085                         "h      dump packets/hex press to cycle through the 3 states\n"
4086                         "q      quit\n"
4087                         "s      Show QP histogram\n"
4088         );
4089     }
4090     return 0;
4091 }
4092 
4093 #if HAVE_THREADS
input_thread(void * arg)4094 static void *input_thread(void *arg)
4095 {
4096     InputFile *f = arg;
4097     AVPacket *pkt = f->pkt, *queue_pkt;
4098     unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4099     int ret = 0;
4100 
4101     while (1) {
4102         ret = av_read_frame(f->ctx, pkt);
4103 
4104         if (ret == AVERROR(EAGAIN)) {
4105             av_usleep(10000);
4106             continue;
4107         }
4108         if (ret < 0) {
4109             av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4110             break;
4111         }
4112         queue_pkt = av_packet_alloc();
4113         if (!queue_pkt) {
4114             av_packet_unref(pkt);
4115             av_thread_message_queue_set_err_recv(f->in_thread_queue, AVERROR(ENOMEM));
4116             break;
4117         }
4118         av_packet_move_ref(queue_pkt, pkt);
4119         ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4120         if (flags && ret == AVERROR(EAGAIN)) {
4121             flags = 0;
4122             ret = av_thread_message_queue_send(f->in_thread_queue, &queue_pkt, flags);
4123             av_log(f->ctx, AV_LOG_WARNING,
4124                    "Thread message queue blocking; consider raising the "
4125                    "thread_queue_size option (current value: %d)\n",
4126                    f->thread_queue_size);
4127         }
4128         if (ret < 0) {
4129             if (ret != AVERROR_EOF)
4130                 av_log(f->ctx, AV_LOG_ERROR,
4131                        "Unable to send packet to main thread: %s\n",
4132                        av_err2str(ret));
4133             av_packet_free(&queue_pkt);
4134             av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4135             break;
4136         }
4137     }
4138 
4139     return NULL;
4140 }
4141 
free_input_thread(int i)4142 static void free_input_thread(int i)
4143 {
4144     InputFile *f = input_files[i];
4145     AVPacket *pkt;
4146 
4147     if (!f || !f->in_thread_queue)
4148         return;
4149     av_thread_message_queue_set_err_send(f->in_thread_queue, AVERROR_EOF);
4150     while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4151         av_packet_free(&pkt);
4152 
4153     pthread_join(f->thread, NULL);
4154     f->joined = 1;
4155     av_thread_message_queue_free(&f->in_thread_queue);
4156 }
4157 
free_input_threads(void)4158 static void free_input_threads(void)
4159 {
4160     int i;
4161 
4162     for (i = 0; i < nb_input_files; i++)
4163         free_input_thread(i);
4164 }
4165 
init_input_thread(int i)4166 static int init_input_thread(int i)
4167 {
4168     int ret;
4169     InputFile *f = input_files[i];
4170 
4171     if (f->thread_queue_size < 0)
4172         f->thread_queue_size = (nb_input_files > 1 ? 8 : 0);
4173     if (!f->thread_queue_size)
4174         return 0;
4175 
4176     if (f->ctx->pb ? !f->ctx->pb->seekable :
4177         strcmp(f->ctx->iformat->name, "lavfi"))
4178         f->non_blocking = 1;
4179     ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4180                                         f->thread_queue_size, sizeof(f->pkt));
4181     if (ret < 0)
4182         return ret;
4183 
4184     if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4185         av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4186         av_thread_message_queue_free(&f->in_thread_queue);
4187         return AVERROR(ret);
4188     }
4189 
4190     return 0;
4191 }
4192 
init_input_threads(void)4193 static int init_input_threads(void)
4194 {
4195     int i, ret;
4196 
4197     for (i = 0; i < nb_input_files; i++) {
4198         ret = init_input_thread(i);
4199         if (ret < 0)
4200             return ret;
4201     }
4202     return 0;
4203 }
4204 
get_input_packet_mt(InputFile * f,AVPacket ** pkt)4205 static int get_input_packet_mt(InputFile *f, AVPacket **pkt)
4206 {
4207     return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4208                                         f->non_blocking ?
4209                                         AV_THREAD_MESSAGE_NONBLOCK : 0);
4210 }
4211 #endif
4212 
get_input_packet(InputFile * f,AVPacket ** pkt)4213 static int get_input_packet(InputFile *f, AVPacket **pkt)
4214 {
4215     if (f->rate_emu) {
4216         int i;
4217         for (i = 0; i < f->nb_streams; i++) {
4218             InputStream *ist = input_streams[f->ist_index + i];
4219             int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4220             int64_t now = av_gettime_relative() - ist->start;
4221             if (pts > now)
4222                 return AVERROR(EAGAIN);
4223         }
4224     }
4225 
4226 #if HAVE_THREADS
4227     if (f->thread_queue_size)
4228         return get_input_packet_mt(f, pkt);
4229 #endif
4230     *pkt = f->pkt;
4231     return av_read_frame(f->ctx, *pkt);
4232 }
4233 
got_eagain(void)4234 static int got_eagain(void)
4235 {
4236     int i;
4237     for (i = 0; i < nb_output_streams; i++)
4238         if (output_streams[i]->unavailable)
4239             return 1;
4240     return 0;
4241 }
4242 
reset_eagain(void)4243 static void reset_eagain(void)
4244 {
4245     int i;
4246     for (i = 0; i < nb_input_files; i++)
4247         input_files[i]->eagain = 0;
4248     for (i = 0; i < nb_output_streams; i++)
4249         output_streams[i]->unavailable = 0;
4250 }
4251 
4252 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
duration_max(int64_t tmp,int64_t * duration,AVRational tmp_time_base,AVRational time_base)4253 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4254                                AVRational time_base)
4255 {
4256     int ret;
4257 
4258     if (!*duration) {
4259         *duration = tmp;
4260         return tmp_time_base;
4261     }
4262 
4263     ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4264     if (ret < 0) {
4265         *duration = tmp;
4266         return tmp_time_base;
4267     }
4268 
4269     return time_base;
4270 }
4271 
seek_to_start(InputFile * ifile,AVFormatContext * is)4272 static int seek_to_start(InputFile *ifile, AVFormatContext *is)
4273 {
4274     InputStream *ist;
4275     AVCodecContext *avctx;
4276     int i, ret, has_audio = 0;
4277     int64_t duration = 0;
4278 
4279     ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4280     if (ret < 0)
4281         return ret;
4282 
4283     for (i = 0; i < ifile->nb_streams; i++) {
4284         ist   = input_streams[ifile->ist_index + i];
4285         avctx = ist->dec_ctx;
4286 
4287         /* duration is the length of the last frame in a stream
4288          * when audio stream is present we don't care about
4289          * last video frame length because it's not defined exactly */
4290         if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4291             has_audio = 1;
4292     }
4293 
4294     for (i = 0; i < ifile->nb_streams; i++) {
4295         ist   = input_streams[ifile->ist_index + i];
4296         avctx = ist->dec_ctx;
4297 
4298         if (has_audio) {
4299             if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4300                 AVRational sample_rate = {1, avctx->sample_rate};
4301 
4302                 duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4303             } else {
4304                 continue;
4305             }
4306         } else {
4307             if (ist->framerate.num) {
4308                 duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4309             } else if (ist->st->avg_frame_rate.num) {
4310                 duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4311             } else {
4312                 duration = 1;
4313             }
4314         }
4315         if (!ifile->duration)
4316             ifile->time_base = ist->st->time_base;
4317         /* the total duration of the stream, max_pts - min_pts is
4318          * the duration of the stream without the last frame */
4319         if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4320             duration += ist->max_pts - ist->min_pts;
4321         ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4322                                         ifile->time_base);
4323     }
4324 
4325     if (ifile->loop > 0)
4326         ifile->loop--;
4327 
4328     return ret;
4329 }
4330 
4331 /*
4332  * Return
4333  * - 0 -- one packet was read and processed
4334  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4335  *   this function should be called again
4336  * - AVERROR_EOF -- this function should not be called again
4337  */
process_input(int file_index)4338 static int process_input(int file_index)
4339 {
4340     InputFile *ifile = input_files[file_index];
4341     AVFormatContext *is;
4342     InputStream *ist;
4343     AVPacket *pkt;
4344     int ret, thread_ret, i, j;
4345     int64_t duration;
4346     int64_t pkt_dts;
4347     int disable_discontinuity_correction = copy_ts;
4348 
4349     is  = ifile->ctx;
4350     ret = get_input_packet(ifile, &pkt);
4351 
4352     if (ret == AVERROR(EAGAIN)) {
4353         ifile->eagain = 1;
4354         return ret;
4355     }
4356     if (ret < 0 && ifile->loop) {
4357         AVCodecContext *avctx;
4358         for (i = 0; i < ifile->nb_streams; i++) {
4359             ist = input_streams[ifile->ist_index + i];
4360             avctx = ist->dec_ctx;
4361             if (ist->decoding_needed) {
4362                 ret = process_input_packet(ist, NULL, 1);
4363                 if (ret>0)
4364                     return 0;
4365                 avcodec_flush_buffers(avctx);
4366             }
4367         }
4368 #if HAVE_THREADS
4369         free_input_thread(file_index);
4370 #endif
4371         ret = seek_to_start(ifile, is);
4372 #if HAVE_THREADS
4373         thread_ret = init_input_thread(file_index);
4374         if (thread_ret < 0)
4375             return thread_ret;
4376 #endif
4377         if (ret < 0)
4378             av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4379         else
4380             ret = get_input_packet(ifile, &pkt);
4381         if (ret == AVERROR(EAGAIN)) {
4382             ifile->eagain = 1;
4383             return ret;
4384         }
4385     }
4386     if (ret < 0) {
4387         if (ret != AVERROR_EOF) {
4388             print_error(is->url, ret);
4389             if (exit_on_error)
4390                 exit_program(1);
4391         }
4392 
4393         for (i = 0; i < ifile->nb_streams; i++) {
4394             ist = input_streams[ifile->ist_index + i];
4395             if (ist->decoding_needed) {
4396                 ret = process_input_packet(ist, NULL, 0);
4397                 if (ret>0)
4398                     return 0;
4399             }
4400 
4401             /* mark all outputs that don't go through lavfi as finished */
4402             for (j = 0; j < nb_output_streams; j++) {
4403                 OutputStream *ost = output_streams[j];
4404 
4405                 if (ost->source_index == ifile->ist_index + i &&
4406                     (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4407                     finish_output_stream(ost);
4408             }
4409         }
4410 
4411         ifile->eof_reached = 1;
4412         return AVERROR(EAGAIN);
4413     }
4414 
4415     reset_eagain();
4416 
4417     if (do_pkt_dump) {
4418         av_pkt_dump_log2(NULL, AV_LOG_INFO, pkt, do_hex_dump,
4419                          is->streams[pkt->stream_index]);
4420     }
4421     /* the following test is needed in case new streams appear
4422        dynamically in stream : we ignore them */
4423     if (pkt->stream_index >= ifile->nb_streams) {
4424         report_new_stream(file_index, pkt);
4425         goto discard_packet;
4426     }
4427 
4428     ist = input_streams[ifile->ist_index + pkt->stream_index];
4429 
4430     ist->data_size += pkt->size;
4431     ist->nb_packets++;
4432 
4433     if (ist->discard)
4434         goto discard_packet;
4435 
4436     if (pkt->flags & AV_PKT_FLAG_CORRUPT) {
4437         av_log(NULL, exit_on_error ? AV_LOG_FATAL : AV_LOG_WARNING,
4438                "%s: corrupt input packet in stream %d\n", is->url, pkt->stream_index);
4439         if (exit_on_error)
4440             exit_program(1);
4441     }
4442 
4443     if (debug_ts) {
4444         av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4445                "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4446                ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4447                av_ts2str(ist->next_dts), av_ts2timestr(ist->next_dts, &AV_TIME_BASE_Q),
4448                av_ts2str(ist->next_pts), av_ts2timestr(ist->next_pts, &AV_TIME_BASE_Q),
4449                av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4450                av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4451                av_ts2str(input_files[ist->file_index]->ts_offset),
4452                av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4453     }
4454 
4455     if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4456         int64_t stime, stime2;
4457         // Correcting starttime based on the enabled streams
4458         // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4459         //       so we instead do it here as part of discontinuity handling
4460         if (   ist->next_dts == AV_NOPTS_VALUE
4461             && ifile->ts_offset == -is->start_time
4462             && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4463             int64_t new_start_time = INT64_MAX;
4464             for (i=0; i<is->nb_streams; i++) {
4465                 AVStream *st = is->streams[i];
4466                 if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4467                     continue;
4468                 new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4469             }
4470             if (new_start_time > is->start_time) {
4471                 av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4472                 ifile->ts_offset = -new_start_time;
4473             }
4474         }
4475 
4476         stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4477         stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4478         ist->wrap_correction_done = 1;
4479 
4480         if(stime2 > stime && pkt->dts != AV_NOPTS_VALUE && pkt->dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4481             pkt->dts -= 1ULL<<ist->st->pts_wrap_bits;
4482             ist->wrap_correction_done = 0;
4483         }
4484         if(stime2 > stime && pkt->pts != AV_NOPTS_VALUE && pkt->pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4485             pkt->pts -= 1ULL<<ist->st->pts_wrap_bits;
4486             ist->wrap_correction_done = 0;
4487         }
4488     }
4489 
4490     /* add the stream-global side data to the first packet */
4491     if (ist->nb_packets == 1) {
4492         for (i = 0; i < ist->st->nb_side_data; i++) {
4493             AVPacketSideData *src_sd = &ist->st->side_data[i];
4494             uint8_t *dst_data;
4495 
4496             if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4497                 continue;
4498 
4499             if (av_packet_get_side_data(pkt, src_sd->type, NULL))
4500                 continue;
4501 
4502             dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);
4503             if (!dst_data)
4504                 exit_program(1);
4505 
4506             memcpy(dst_data, src_sd->data, src_sd->size);
4507         }
4508     }
4509 
4510     if (pkt->dts != AV_NOPTS_VALUE)
4511         pkt->dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4512     if (pkt->pts != AV_NOPTS_VALUE)
4513         pkt->pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4514 
4515     if (pkt->pts != AV_NOPTS_VALUE)
4516         pkt->pts *= ist->ts_scale;
4517     if (pkt->dts != AV_NOPTS_VALUE)
4518         pkt->dts *= ist->ts_scale;
4519 
4520     pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4521     if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4522          ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4523         pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4524         && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4525         int64_t delta   = pkt_dts - ifile->last_ts;
4526         if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4527             delta >  1LL*dts_delta_threshold*AV_TIME_BASE){
4528             ifile->ts_offset -= delta;
4529             av_log(NULL, AV_LOG_DEBUG,
4530                    "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4531                    delta, ifile->ts_offset);
4532             pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4533             if (pkt->pts != AV_NOPTS_VALUE)
4534                 pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4535         }
4536     }
4537 
4538     duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4539     if (pkt->pts != AV_NOPTS_VALUE) {
4540         pkt->pts += duration;
4541         ist->max_pts = FFMAX(pkt->pts, ist->max_pts);
4542         ist->min_pts = FFMIN(pkt->pts, ist->min_pts);
4543     }
4544 
4545     if (pkt->dts != AV_NOPTS_VALUE)
4546         pkt->dts += duration;
4547 
4548     pkt_dts = av_rescale_q_rnd(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4549 
4550     if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4551         (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4552         int64_t wrap_dts = av_rescale_q_rnd(pkt->dts + (1LL<<ist->st->pts_wrap_bits),
4553                                             ist->st->time_base, AV_TIME_BASE_Q,
4554                                             AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
4555         if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4556             disable_discontinuity_correction = 0;
4557     }
4558 
4559     if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4560          ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) &&
4561          pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4562         !disable_discontinuity_correction) {
4563         int64_t delta   = pkt_dts - ist->next_dts;
4564         if (is->iformat->flags & AVFMT_TS_DISCONT) {
4565             if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4566                 delta >  1LL*dts_delta_threshold*AV_TIME_BASE ||
4567                 pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4568                 ifile->ts_offset -= delta;
4569                 av_log(NULL, AV_LOG_DEBUG,
4570                        "timestamp discontinuity for stream #%d:%d "
4571                        "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4572                        ist->file_index, ist->st->index, ist->st->id,
4573                        av_get_media_type_string(ist->dec_ctx->codec_type),
4574                        delta, ifile->ts_offset);
4575                 pkt->dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4576                 if (pkt->pts != AV_NOPTS_VALUE)
4577                     pkt->pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4578             }
4579         } else {
4580             if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4581                  delta >  1LL*dts_error_threshold*AV_TIME_BASE) {
4582                 av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt->dts, ist->next_dts, pkt->stream_index);
4583                 pkt->dts = AV_NOPTS_VALUE;
4584             }
4585             if (pkt->pts != AV_NOPTS_VALUE){
4586                 int64_t pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
4587                 delta   = pkt_pts - ist->next_dts;
4588                 if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4589                      delta >  1LL*dts_error_threshold*AV_TIME_BASE) {
4590                     av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt->pts, ist->next_dts, pkt->stream_index);
4591                     pkt->pts = AV_NOPTS_VALUE;
4592                 }
4593             }
4594         }
4595     }
4596 
4597     if (pkt->dts != AV_NOPTS_VALUE)
4598         ifile->last_ts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
4599 
4600     if (debug_ts) {
4601         av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4602                ifile->ist_index + pkt->stream_index, av_get_media_type_string(ist->dec_ctx->codec_type),
4603                av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ist->st->time_base),
4604                av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ist->st->time_base),
4605                av_ts2str(input_files[ist->file_index]->ts_offset),
4606                av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4607     }
4608 
4609     sub2video_heartbeat(ist, pkt->pts);
4610 
4611     process_input_packet(ist, pkt, 0);
4612 
4613 discard_packet:
4614 #if HAVE_THREADS
4615     if (ifile->thread_queue_size)
4616         av_packet_free(&pkt);
4617     else
4618 #endif
4619     av_packet_unref(pkt);
4620 
4621     return 0;
4622 }
4623 
4624 /**
4625  * Perform a step of transcoding for the specified filter graph.
4626  *
4627  * @param[in]  graph     filter graph to consider
4628  * @param[out] best_ist  input stream where a frame would allow to continue
4629  * @return  0 for success, <0 for error
4630  */
transcode_from_filter(FilterGraph * graph,InputStream ** best_ist)4631 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4632 {
4633     int i, ret;
4634     int nb_requests, nb_requests_max = 0;
4635     InputFilter *ifilter;
4636     InputStream *ist;
4637 
4638     *best_ist = NULL;
4639     ret = avfilter_graph_request_oldest(graph->graph);
4640     if (ret >= 0)
4641         return reap_filters(0);
4642 
4643     if (ret == AVERROR_EOF) {
4644         ret = reap_filters(1);
4645         for (i = 0; i < graph->nb_outputs; i++)
4646             close_output_stream(graph->outputs[i]->ost);
4647         return ret;
4648     }
4649     if (ret != AVERROR(EAGAIN))
4650         return ret;
4651 
4652     for (i = 0; i < graph->nb_inputs; i++) {
4653         ifilter = graph->inputs[i];
4654         ist = ifilter->ist;
4655         if (input_files[ist->file_index]->eagain ||
4656             input_files[ist->file_index]->eof_reached)
4657             continue;
4658         nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4659         if (nb_requests > nb_requests_max) {
4660             nb_requests_max = nb_requests;
4661             *best_ist = ist;
4662         }
4663     }
4664 
4665     if (!*best_ist)
4666         for (i = 0; i < graph->nb_outputs; i++)
4667             graph->outputs[i]->ost->unavailable = 1;
4668 
4669     return 0;
4670 }
4671 
4672 /**
4673  * Run a single step of transcoding.
4674  *
4675  * @return  0 for success, <0 for error
4676  */
transcode_step(void)4677 static int transcode_step(void)
4678 {
4679     OutputStream *ost;
4680     InputStream  *ist = NULL;
4681     int ret;
4682 
4683     ost = choose_output();
4684     if (!ost) {
4685         if (got_eagain()) {
4686             reset_eagain();
4687             av_usleep(10000);
4688             return 0;
4689         }
4690         av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4691         return AVERROR_EOF;
4692     }
4693 
4694     if (ost->filter && !ost->filter->graph->graph) {
4695         if (ifilter_has_all_input_formats(ost->filter->graph)) {
4696             ret = configure_filtergraph(ost->filter->graph);
4697             if (ret < 0) {
4698                 av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4699                 return ret;
4700             }
4701         }
4702     }
4703 
4704     if (ost->filter && ost->filter->graph->graph) {
4705         /*
4706          * Similar case to the early audio initialization in reap_filters.
4707          * Audio is special in ffmpeg.c currently as we depend on lavfi's
4708          * audio frame buffering/creation to get the output audio frame size
4709          * in samples correct. The audio frame size for the filter chain is
4710          * configured during the output stream initialization.
4711          *
4712          * Apparently avfilter_graph_request_oldest (called in
4713          * transcode_from_filter just down the line) peeks. Peeking already
4714          * puts one frame "ready to be given out", which means that any
4715          * update in filter buffer sink configuration afterwards will not
4716          * help us. And yes, even if it would be utilized,
4717          * av_buffersink_get_samples is affected, as it internally utilizes
4718          * the same early exit for peeked frames.
4719          *
4720          * In other words, if avfilter_graph_request_oldest would not make
4721          * further filter chain configuration or usage of
4722          * av_buffersink_get_samples useless (by just causing the return
4723          * of the peeked AVFrame as-is), we could get rid of this additional
4724          * early encoder initialization.
4725          */
4726         if (av_buffersink_get_type(ost->filter->filter) == AVMEDIA_TYPE_AUDIO)
4727             init_output_stream_wrapper(ost, NULL, 1);
4728 
4729         if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4730             return ret;
4731         if (!ist)
4732             return 0;
4733     } else if (ost->filter) {
4734         int i;
4735         for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4736             InputFilter *ifilter = ost->filter->graph->inputs[i];
4737             if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4738                 ist = ifilter->ist;
4739                 break;
4740             }
4741         }
4742         if (!ist) {
4743             ost->inputs_done = 1;
4744             return 0;
4745         }
4746     } else {
4747         av_assert0(ost->source_index >= 0);
4748         ist = input_streams[ost->source_index];
4749     }
4750 
4751     ret = process_input(ist->file_index);
4752     if (ret == AVERROR(EAGAIN)) {
4753         if (input_files[ist->file_index]->eagain)
4754             ost->unavailable = 1;
4755         return 0;
4756     }
4757 
4758     if (ret < 0)
4759         return ret == AVERROR_EOF ? 0 : ret;
4760 
4761     return reap_filters(0);
4762 }
4763 
4764 /*
4765  * The following code is the main loop of the file converter
4766  */
transcode(void)4767 static int transcode(void)
4768 {
4769     int ret, i;
4770     AVFormatContext *os;
4771     OutputStream *ost;
4772     InputStream *ist;
4773     int64_t timer_start;
4774     int64_t total_packets_written = 0;
4775 
4776     ret = transcode_init();
4777     if (ret < 0)
4778         goto fail;
4779 
4780     if (stdin_interaction) {
4781         av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4782     }
4783 
4784     timer_start = av_gettime_relative();
4785 
4786 #if HAVE_THREADS
4787     if ((ret = init_input_threads()) < 0)
4788         goto fail;
4789 #endif
4790 
4791     while (!received_sigterm) {
4792         int64_t cur_time= av_gettime_relative();
4793 
4794         /* if 'q' pressed, exits */
4795         if (stdin_interaction)
4796             if (check_keyboard_interaction(cur_time) < 0)
4797                 break;
4798 
4799         /* check if there's any stream where output is still needed */
4800         if (!need_output()) {
4801             av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4802             break;
4803         }
4804 
4805         ret = transcode_step();
4806         if (ret < 0 && ret != AVERROR_EOF) {
4807             av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4808             break;
4809         }
4810 
4811         /* dump report by using the output first video and audio streams */
4812         print_report(0, timer_start, cur_time);
4813     }
4814 #if HAVE_THREADS
4815     free_input_threads();
4816 #endif
4817 
4818     /* at the end of stream, we must flush the decoder buffers */
4819     for (i = 0; i < nb_input_streams; i++) {
4820         ist = input_streams[i];
4821         if (!input_files[ist->file_index]->eof_reached) {
4822             process_input_packet(ist, NULL, 0);
4823         }
4824     }
4825     flush_encoders();
4826 
4827     term_exit();
4828 
4829     /* write the trailer if needed and close file */
4830     for (i = 0; i < nb_output_files; i++) {
4831         os = output_files[i]->ctx;
4832         if (!output_files[i]->header_written) {
4833             av_log(NULL, AV_LOG_ERROR,
4834                    "Nothing was written into output file %d (%s), because "
4835                    "at least one of its streams received no packets.\n",
4836                    i, os->url);
4837             continue;
4838         }
4839         if ((ret = av_write_trailer(os)) < 0) {
4840             av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4841             if (exit_on_error)
4842                 exit_program(1);
4843         }
4844     }
4845 
4846     /* dump report by using the first video and audio streams */
4847     print_report(1, timer_start, av_gettime_relative());
4848 
4849     /* close each encoder */
4850     for (i = 0; i < nb_output_streams; i++) {
4851         ost = output_streams[i];
4852         if (ost->encoding_needed) {
4853             av_freep(&ost->enc_ctx->stats_in);
4854         }
4855         total_packets_written += ost->packets_written;
4856         if (!ost->packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM)) {
4857             av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4858             exit_program(1);
4859         }
4860     }
4861 
4862     if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4863         av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4864         exit_program(1);
4865     }
4866 
4867     /* close each decoder */
4868     for (i = 0; i < nb_input_streams; i++) {
4869         ist = input_streams[i];
4870         if (ist->decoding_needed) {
4871             avcodec_close(ist->dec_ctx);
4872             if (ist->hwaccel_uninit)
4873                 ist->hwaccel_uninit(ist->dec_ctx);
4874         }
4875     }
4876 
4877     hw_device_free_all();
4878 
4879     /* finished ! */
4880     ret = 0;
4881 
4882  fail:
4883 #if HAVE_THREADS
4884     free_input_threads();
4885 #endif
4886 
4887     if (output_streams) {
4888         for (i = 0; i < nb_output_streams; i++) {
4889             ost = output_streams[i];
4890             if (ost) {
4891                 if (ost->logfile) {
4892                     if (fclose(ost->logfile))
4893                         av_log(NULL, AV_LOG_ERROR,
4894                                "Error closing logfile, loss of information possible: %s\n",
4895                                av_err2str(AVERROR(errno)));
4896                     ost->logfile = NULL;
4897                 }
4898                 av_freep(&ost->forced_kf_pts);
4899                 av_freep(&ost->apad);
4900                 av_freep(&ost->disposition);
4901                 av_dict_free(&ost->encoder_opts);
4902                 av_dict_free(&ost->sws_dict);
4903                 av_dict_free(&ost->swr_opts);
4904                 av_dict_free(&ost->resample_opts);
4905             }
4906         }
4907     }
4908     return ret;
4909 }
4910 
get_benchmark_time_stamps(void)4911 static BenchmarkTimeStamps get_benchmark_time_stamps(void)
4912 {
4913     BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4914 #if HAVE_GETRUSAGE
4915     struct rusage rusage;
4916 
4917     getrusage(RUSAGE_SELF, &rusage);
4918     time_stamps.user_usec =
4919         (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4920     time_stamps.sys_usec =
4921         (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4922 #elif HAVE_GETPROCESSTIMES
4923     HANDLE proc;
4924     FILETIME c, e, k, u;
4925     proc = GetCurrentProcess();
4926     GetProcessTimes(proc, &c, &e, &k, &u);
4927     time_stamps.user_usec =
4928         ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4929     time_stamps.sys_usec =
4930         ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4931 #else
4932     time_stamps.user_usec = time_stamps.sys_usec = 0;
4933 #endif
4934     return time_stamps;
4935 }
4936 
getmaxrss(void)4937 static int64_t getmaxrss(void)
4938 {
4939 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4940     struct rusage rusage;
4941     getrusage(RUSAGE_SELF, &rusage);
4942     return (int64_t)rusage.ru_maxrss * 1024;
4943 #elif HAVE_GETPROCESSMEMORYINFO
4944     HANDLE proc;
4945     PROCESS_MEMORY_COUNTERS memcounters;
4946     proc = GetCurrentProcess();
4947     memcounters.cb = sizeof(memcounters);
4948     GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4949     return memcounters.PeakPagefileUsage;
4950 #else
4951     return 0;
4952 #endif
4953 }
4954 
log_callback_null(void * ptr,int level,const char * fmt,va_list vl)4955 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4956 {
4957 }
4958 
main(int argc,char ** argv)4959 int main(int argc, char **argv)
4960 {
4961     int i, ret;
4962     BenchmarkTimeStamps ti;
4963 
4964     init_dynload();
4965 
4966     register_exit(ffmpeg_cleanup);
4967 
4968     setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4969 
4970     av_log_set_flags(AV_LOG_SKIP_REPEATED);
4971     parse_loglevel(argc, argv, options);
4972 
4973     if(argc>1 && !strcmp(argv[1], "-d")){
4974         run_as_daemon=1;
4975         av_log_set_callback(log_callback_null);
4976         argc--;
4977         argv++;
4978     }
4979 
4980 #if CONFIG_AVDEVICE
4981     avdevice_register_all();
4982 #endif
4983     avformat_network_init();
4984 
4985     show_banner(argc, argv, options);
4986 
4987     /* parse options and open all input/output files */
4988     ret = ffmpeg_parse_options(argc, argv);
4989     if (ret < 0)
4990         exit_program(1);
4991 
4992     if (nb_output_files <= 0 && nb_input_files == 0) {
4993         show_usage();
4994         av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4995         exit_program(1);
4996     }
4997 
4998     /* file converter / grab */
4999     if (nb_output_files <= 0) {
5000         av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
5001         exit_program(1);
5002     }
5003 
5004     for (i = 0; i < nb_output_files; i++) {
5005         if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
5006             want_sdp = 0;
5007     }
5008 
5009     current_time = ti = get_benchmark_time_stamps();
5010     if (transcode() < 0)
5011         exit_program(1);
5012     if (do_benchmark) {
5013         int64_t utime, stime, rtime;
5014         current_time = get_benchmark_time_stamps();
5015         utime = current_time.user_usec - ti.user_usec;
5016         stime = current_time.sys_usec  - ti.sys_usec;
5017         rtime = current_time.real_usec - ti.real_usec;
5018         av_log(NULL, AV_LOG_INFO,
5019                "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
5020                utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
5021     }
5022     av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
5023            decode_error_stat[0], decode_error_stat[1]);
5024     if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
5025         exit_program(69);
5026 
5027     exit_program(received_nb_signals ? 255 : main_return_code);
5028     return main_return_code;
5029 }
5030