• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * muxing functions for use within FFmpeg
3  * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "avformat.h"
23 #include "internal.h"
24 #include "mux.h"
25 #include "version.h"
26 #include "libavcodec/bsf.h"
27 #include "libavcodec/internal.h"
28 #include "libavcodec/packet_internal.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/dict.h"
31 #include "libavutil/timestamp.h"
32 #include "libavutil/avassert.h"
33 #include "libavutil/internal.h"
34 #include "libavutil/mathematics.h"
35 
36 /**
37  * @file
38  * muxing functions for use within libavformat
39  */
40 
41 /* fraction handling */
42 
43 /**
44  * f = val + (num / den) + 0.5.
45  *
46  * 'num' is normalized so that it is such as 0 <= num < den.
47  *
48  * @param f fractional number
49  * @param val integer value
50  * @param num must be >= 0
51  * @param den must be >= 1
52  */
frac_init(FFFrac * f,int64_t val,int64_t num,int64_t den)53 static void frac_init(FFFrac *f, int64_t val, int64_t num, int64_t den)
54 {
55     num += (den >> 1);
56     if (num >= den) {
57         val += num / den;
58         num  = num % den;
59     }
60     f->val = val;
61     f->num = num;
62     f->den = den;
63 }
64 
65 /**
66  * Fractional addition to f: f = f + (incr / f->den).
67  *
68  * @param f fractional number
69  * @param incr increment, can be positive or negative
70  */
frac_add(FFFrac * f,int64_t incr)71 static void frac_add(FFFrac *f, int64_t incr)
72 {
73     int64_t num, den;
74 
75     num = f->num + incr;
76     den = f->den;
77     if (num < 0) {
78         f->val += num / den;
79         num     = num % den;
80         if (num < 0) {
81             num += den;
82             f->val--;
83         }
84     } else if (num >= den) {
85         f->val += num / den;
86         num     = num % den;
87     }
88     f->num = num;
89 }
90 
avformat_alloc_output_context2(AVFormatContext ** avctx,const AVOutputFormat * oformat,const char * format,const char * filename)91 int avformat_alloc_output_context2(AVFormatContext **avctx, const AVOutputFormat *oformat,
92                                    const char *format, const char *filename)
93 {
94     AVFormatContext *s = avformat_alloc_context();
95     int ret = 0;
96 
97     *avctx = NULL;
98     if (!s)
99         goto nomem;
100 
101     if (!oformat) {
102         if (format) {
103             oformat = av_guess_format(format, NULL, NULL);
104             if (!oformat) {
105                 av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
106                 ret = AVERROR(EINVAL);
107                 goto error;
108             }
109         } else {
110             oformat = av_guess_format(NULL, filename, NULL);
111             if (!oformat) {
112                 ret = AVERROR(EINVAL);
113                 av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
114                        filename);
115                 goto error;
116             }
117         }
118     }
119 
120     s->oformat = oformat;
121     if (s->oformat->priv_data_size > 0) {
122         s->priv_data = av_mallocz(s->oformat->priv_data_size);
123         if (!s->priv_data)
124             goto nomem;
125         if (s->oformat->priv_class) {
126             *(const AVClass**)s->priv_data= s->oformat->priv_class;
127             av_opt_set_defaults(s->priv_data);
128         }
129     } else
130         s->priv_data = NULL;
131 
132     if (filename) {
133         if (!(s->url = av_strdup(filename)))
134             goto nomem;
135 
136     }
137     *avctx = s;
138     return 0;
139 nomem:
140     av_log(s, AV_LOG_ERROR, "Out of memory\n");
141     ret = AVERROR(ENOMEM);
142 error:
143     avformat_free_context(s);
144     return ret;
145 }
146 
validate_codec_tag(AVFormatContext * s,AVStream * st)147 static int validate_codec_tag(AVFormatContext *s, AVStream *st)
148 {
149     const AVCodecTag *avctag;
150     enum AVCodecID id = AV_CODEC_ID_NONE;
151     int64_t tag  = -1;
152 
153     /**
154      * Check that tag + id is in the table
155      * If neither is in the table -> OK
156      * If tag is in the table with another id -> FAIL
157      * If id is in the table with another tag -> FAIL unless strict < normal
158      */
159     for (int n = 0; s->oformat->codec_tag[n]; n++) {
160         avctag = s->oformat->codec_tag[n];
161         while (avctag->id != AV_CODEC_ID_NONE) {
162             if (ff_toupper4(avctag->tag) == ff_toupper4(st->codecpar->codec_tag)) {
163                 id = avctag->id;
164                 if (id == st->codecpar->codec_id)
165                     return 1;
166             }
167             if (avctag->id == st->codecpar->codec_id)
168                 tag = avctag->tag;
169             avctag++;
170         }
171     }
172     if (id != AV_CODEC_ID_NONE)
173         return 0;
174     if (tag >= 0 && (s->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
175         return 0;
176     return 1;
177 }
178 
179 
init_muxer(AVFormatContext * s,AVDictionary ** options)180 static int init_muxer(AVFormatContext *s, AVDictionary **options)
181 {
182     FFFormatContext *const si = ffformatcontext(s);
183     AVDictionary *tmp = NULL;
184     const AVOutputFormat *of = s->oformat;
185     AVDictionaryEntry *e;
186     int ret = 0;
187 
188     if (options)
189         av_dict_copy(&tmp, *options, 0);
190 
191     if ((ret = av_opt_set_dict(s, &tmp)) < 0)
192         goto fail;
193     if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
194         (ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
195         goto fail;
196 
197     if (!s->url && !(s->url = av_strdup(""))) {
198         ret = AVERROR(ENOMEM);
199         goto fail;
200     }
201 
202     // some sanity checks
203     if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) {
204         av_log(s, AV_LOG_ERROR, "No streams to mux were specified\n");
205         ret = AVERROR(EINVAL);
206         goto fail;
207     }
208 
209     for (unsigned i = 0; i < s->nb_streams; i++) {
210         AVStream          *const  st = s->streams[i];
211         FFStream          *const sti = ffstream(st);
212         AVCodecParameters *const par = st->codecpar;
213         const AVCodecDescriptor *desc;
214 
215         if (!st->time_base.num) {
216             /* fall back on the default timebase values */
217 #ifdef OHOS_AUXILIARY_TRACK
218             if ((par->codec_type == AVMEDIA_TYPE_AUDIO || (par->codec_type == AVMEDIA_TYPE_AUXILIARY &&
219                 (par->codec_id == AV_CODEC_ID_AAC || par->codec_id == AV_CODEC_ID_MP3))) && par->sample_rate)
220 #else
221             if (par->codec_type == AVMEDIA_TYPE_AUDIO && par->sample_rate)
222 #endif
223                 avpriv_set_pts_info(st, 64, 1, par->sample_rate);
224             else
225                 avpriv_set_pts_info(st, 33, 1, 90000);
226         }
227 
228         switch (par->codec_type) {
229         case AVMEDIA_TYPE_AUDIO:
230             if (par->sample_rate <= 0) {
231                 av_log(s, AV_LOG_ERROR, "sample rate not set\n");
232                 ret = AVERROR(EINVAL);
233                 goto fail;
234             }
235 
236 #if FF_API_OLD_CHANNEL_LAYOUT
237 FF_DISABLE_DEPRECATION_WARNINGS
238             /* if the caller is using the deprecated channel layout API,
239              * convert it to the new style */
240             if (!par->ch_layout.nb_channels &&
241                 par->channels) {
242                 if (par->channel_layout) {
243                     av_channel_layout_from_mask(&par->ch_layout, par->channel_layout);
244                 } else {
245                     par->ch_layout.order       = AV_CHANNEL_ORDER_UNSPEC;
246                     par->ch_layout.nb_channels = par->channels;
247                 }
248             }
249 FF_ENABLE_DEPRECATION_WARNINGS
250 #endif
251 
252             if (!par->block_align)
253                 par->block_align = par->ch_layout.nb_channels *
254                                    av_get_bits_per_sample(par->codec_id) >> 3;
255             break;
256         case AVMEDIA_TYPE_VIDEO:
257             if ((par->width <= 0 || par->height <= 0) &&
258                 !(of->flags & AVFMT_NODIMENSIONS)) {
259                 av_log(s, AV_LOG_ERROR, "dimensions not set\n");
260                 ret = AVERROR(EINVAL);
261                 goto fail;
262             }
263             if (av_cmp_q(st->sample_aspect_ratio, par->sample_aspect_ratio)
264                 && fabs(av_q2d(st->sample_aspect_ratio) - av_q2d(par->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
265             ) {
266                 if (st->sample_aspect_ratio.num != 0 &&
267                     st->sample_aspect_ratio.den != 0 &&
268                     par->sample_aspect_ratio.num != 0 &&
269                     par->sample_aspect_ratio.den != 0) {
270                     av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
271                            "(%d/%d) and encoder layer (%d/%d)\n",
272                            st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
273                            par->sample_aspect_ratio.num,
274                            par->sample_aspect_ratio.den);
275                     ret = AVERROR(EINVAL);
276                     goto fail;
277                 }
278             }
279             break;
280 #ifdef OHOS_AUXILIARY_TRACK
281         case AVMEDIA_TYPE_AUXILIARY:
282             if (par->codec_id == AV_CODEC_ID_H264 || par->codec_id == AV_CODEC_ID_H265) {
283                 if ((par->width <= 0 || par->height <= 0) &&
284                     !(of->flags & AVFMT_NODIMENSIONS)) {
285                     av_log(s, AV_LOG_ERROR, "dimensions not set\n");
286                     ret = AVERROR(EINVAL);
287                     goto fail;
288                 }
289                 if (av_cmp_q(st->sample_aspect_ratio, par->sample_aspect_ratio)
290                     && fabs(av_q2d(st->sample_aspect_ratio) - av_q2d(par->sample_aspect_ratio)) >
291                     0.004*av_q2d(st->sample_aspect_ratio)    // 0.004 value same as video
292                 ) {
293                     if (st->sample_aspect_ratio.num != 0 &&
294                         st->sample_aspect_ratio.den != 0 &&
295                         par->sample_aspect_ratio.num != 0 &&
296                         par->sample_aspect_ratio.den != 0) {
297                         av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
298                             "(%d/%d) and encoder layer (%d/%d)\n",
299                             st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
300                             par->sample_aspect_ratio.num,
301                             par->sample_aspect_ratio.den);
302                         ret = AVERROR(EINVAL);
303                         goto fail;
304                     }
305                 }
306             } else if (par->codec_id == AV_CODEC_ID_AAC || par->codec_id == AV_CODEC_ID_MP3) {
307                 if (par->sample_rate <= 0) {
308                     av_log(s, AV_LOG_ERROR, "sample rate not set\n");
309                     ret = AVERROR(EINVAL);
310                     goto fail;
311                 }
312 
313 #if FF_API_OLD_CHANNEL_LAYOUT
314 FF_DISABLE_DEPRECATION_WARNINGS
315                 /* if the caller is using the deprecated channel layout API,
316                 * convert it to the new style */
317                 if (!par->ch_layout.nb_channels &&
318                     par->channels) {
319                     if (par->channel_layout) {
320                         av_channel_layout_from_mask(&par->ch_layout, par->channel_layout);
321                     } else {
322                         par->ch_layout.order       = AV_CHANNEL_ORDER_UNSPEC;
323                         par->ch_layout.nb_channels = par->channels;
324                     }
325                 }
326 FF_ENABLE_DEPRECATION_WARNINGS
327 #endif
328 
329                 if (!par->block_align)
330                     par->block_align = par->ch_layout.nb_channels *
331                                     av_get_bits_per_sample(par->codec_id) >> 3; // 3 means divide by 8
332             }
333             break;
334 #endif
335         }
336 
337         desc = avcodec_descriptor_get(par->codec_id);
338         if (desc && desc->props & AV_CODEC_PROP_REORDER)
339             sti->reorder = 1;
340 
341         sti->is_intra_only = ff_is_intra_only(par->codec_id);
342 
343         if (of->codec_tag) {
344             if (   par->codec_tag
345                 && par->codec_id == AV_CODEC_ID_RAWVIDEO
346                 && (   av_codec_get_tag(of->codec_tag, par->codec_id) == 0
347                     || av_codec_get_tag(of->codec_tag, par->codec_id) == MKTAG('r', 'a', 'w', ' '))
348                 && !validate_codec_tag(s, st)) {
349                 // the current rawvideo encoding system ends up setting
350                 // the wrong codec_tag for avi/mov, we override it here
351                 par->codec_tag = 0;
352             }
353             if (par->codec_tag) {
354                 if (!validate_codec_tag(s, st)) {
355                     const uint32_t otag = av_codec_get_tag(s->oformat->codec_tag, par->codec_id);
356                     av_log(s, AV_LOG_ERROR,
357                            "Tag %s incompatible with output codec id '%d' (%s)\n",
358                            av_fourcc2str(par->codec_tag), par->codec_id, av_fourcc2str(otag));
359                     ret = AVERROR_INVALIDDATA;
360                     goto fail;
361                 }
362             } else
363                 par->codec_tag = av_codec_get_tag(of->codec_tag, par->codec_id);
364         }
365 
366         if (par->codec_type != AVMEDIA_TYPE_ATTACHMENT)
367             si->nb_interleaved_streams++;
368     }
369     si->interleave_packet = of->interleave_packet;
370     if (!si->interleave_packet)
371         si->interleave_packet = si->nb_interleaved_streams > 1 ?
372                                     ff_interleave_packet_per_dts :
373                                     ff_interleave_packet_passthrough;
374 
375     if (!s->priv_data && of->priv_data_size > 0) {
376         s->priv_data = av_mallocz(of->priv_data_size);
377         if (!s->priv_data) {
378             ret = AVERROR(ENOMEM);
379             goto fail;
380         }
381         if (of->priv_class) {
382             *(const AVClass **)s->priv_data = of->priv_class;
383             av_opt_set_defaults(s->priv_data);
384             if ((ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
385                 goto fail;
386         }
387     }
388 
389     /* set muxer identification string */
390     if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
391         av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
392     } else {
393         av_dict_set(&s->metadata, "encoder", NULL, 0);
394     }
395 
396     for (e = NULL; e = av_dict_get(s->metadata, "encoder-", e, AV_DICT_IGNORE_SUFFIX); ) {
397         av_dict_set(&s->metadata, e->key, NULL, 0);
398     }
399 
400     if (options) {
401          av_dict_free(options);
402          *options = tmp;
403     }
404 
405     if (s->oformat->init) {
406         if ((ret = s->oformat->init(s)) < 0) {
407             if (s->oformat->deinit)
408                 s->oformat->deinit(s);
409             return ret;
410         }
411         return ret == 0;
412     }
413 
414     return 0;
415 
416 fail:
417     av_dict_free(&tmp);
418     return ret;
419 }
420 
init_pts(AVFormatContext * s)421 static int init_pts(AVFormatContext *s)
422 {
423     FFFormatContext *const si = ffformatcontext(s);
424 
425     /* init PTS generation */
426     for (unsigned i = 0; i < s->nb_streams; i++) {
427         AVStream *const st = s->streams[i];
428         FFStream *const sti = ffstream(st);
429         int64_t den = AV_NOPTS_VALUE;
430 
431         switch (st->codecpar->codec_type) {
432         case AVMEDIA_TYPE_AUDIO:
433             den = (int64_t)st->time_base.num * st->codecpar->sample_rate;
434             break;
435         case AVMEDIA_TYPE_VIDEO:
436             den = (int64_t)st->time_base.num * st->time_base.den;
437             break;
438 #ifdef OHOS_TIMED_META_TRACK
439         case AVMEDIA_TYPE_TIMEDMETA:
440             den = (int64_t)st->time_base.num * st->time_base.den;
441             break;
442 #endif
443 #ifdef OHOS_AUXILIARY_TRACK
444         case AVMEDIA_TYPE_AUXILIARY:
445             if (st->codecpar->codec_id == AV_CODEC_ID_AAC || st->codecpar->codec_id == AV_CODEC_ID_MP3) {
446                 den = (int64_t)st->time_base.num * st->codecpar->sample_rate;
447             } else if (st->codecpar->codec_id == AV_CODEC_ID_H264 || st->codecpar->codec_id == AV_CODEC_ID_H265) {
448                 den = (int64_t)st->time_base.num * st->time_base.den;
449             }
450             break;
451 #endif
452         default:
453             break;
454         }
455 
456         if (!sti->priv_pts)
457             sti->priv_pts = av_mallocz(sizeof(*sti->priv_pts));
458         if (!sti->priv_pts)
459             return AVERROR(ENOMEM);
460 
461         if (den != AV_NOPTS_VALUE) {
462             if (den <= 0)
463                 return AVERROR_INVALIDDATA;
464 
465             frac_init(sti->priv_pts, 0, 0, den);
466         }
467     }
468 
469     si->avoid_negative_ts_status = AVOID_NEGATIVE_TS_UNKNOWN;
470     if (s->avoid_negative_ts < 0) {
471         av_assert2(s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO);
472         if (s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS)) {
473             s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_DISABLED;
474             si->avoid_negative_ts_status = AVOID_NEGATIVE_TS_DISABLED;
475         } else
476             s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE;
477     } else if (s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_DISABLED)
478         si->avoid_negative_ts_status = AVOID_NEGATIVE_TS_DISABLED;
479 
480     return 0;
481 }
482 
flush_if_needed(AVFormatContext * s)483 static void flush_if_needed(AVFormatContext *s)
484 {
485     if (s->pb && s->pb->error >= 0) {
486         if (s->flush_packets == 1 || s->flags & AVFMT_FLAG_FLUSH_PACKETS)
487             avio_flush(s->pb);
488         else if (s->flush_packets && !(s->oformat->flags & AVFMT_NOFILE))
489             avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT);
490     }
491 }
492 
deinit_muxer(AVFormatContext * s)493 static void deinit_muxer(AVFormatContext *s)
494 {
495     FFFormatContext *const si = ffformatcontext(s);
496     if (s->oformat && s->oformat->deinit && si->initialized)
497         s->oformat->deinit(s);
498     si->initialized =
499     si->streams_initialized = 0;
500 }
501 
avformat_init_output(AVFormatContext * s,AVDictionary ** options)502 int avformat_init_output(AVFormatContext *s, AVDictionary **options)
503 {
504     FFFormatContext *const si = ffformatcontext(s);
505     int ret = 0;
506 
507     if ((ret = init_muxer(s, options)) < 0)
508         return ret;
509 
510     si->initialized = 1;
511     si->streams_initialized = ret;
512 
513     if (s->oformat->init && ret) {
514         if ((ret = init_pts(s)) < 0)
515             return ret;
516 
517         return AVSTREAM_INIT_IN_INIT_OUTPUT;
518     }
519 
520     return AVSTREAM_INIT_IN_WRITE_HEADER;
521 }
522 
avformat_write_header(AVFormatContext * s,AVDictionary ** options)523 int avformat_write_header(AVFormatContext *s, AVDictionary **options)
524 {
525     FFFormatContext *const si = ffformatcontext(s);
526     int already_initialized = si->initialized;
527     int streams_already_initialized = si->streams_initialized;
528     int ret = 0;
529 
530     if (!already_initialized)
531         if ((ret = avformat_init_output(s, options)) < 0)
532             return ret;
533 
534     if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
535         avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_HEADER);
536     if (s->oformat->write_header) {
537         ret = s->oformat->write_header(s);
538         if (ret >= 0 && s->pb && s->pb->error < 0)
539             ret = s->pb->error;
540         if (ret < 0)
541             goto fail;
542         flush_if_needed(s);
543     }
544     if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
545         avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_UNKNOWN);
546 
547     if (!si->streams_initialized) {
548         if ((ret = init_pts(s)) < 0)
549             goto fail;
550     }
551 
552     return streams_already_initialized;
553 
554 fail:
555     deinit_muxer(s);
556     return ret;
557 }
558 
559 #define AV_PKT_FLAG_UNCODED_FRAME 0x2000
560 
561 
562 #if FF_API_COMPUTE_PKT_FIELDS2
563 FF_DISABLE_DEPRECATION_WARNINGS
564 //FIXME merge with compute_pkt_fields
compute_muxer_pkt_fields(AVFormatContext * s,AVStream * st,AVPacket * pkt)565 static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *pkt)
566 {
567     FFFormatContext *const si = ffformatcontext(s);
568     FFStream *const sti = ffstream(st);
569     int delay = st->codecpar->video_delay;
570     int frame_size;
571 
572     if (!si->missing_ts_warning &&
573         !(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
574         (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC) || (st->disposition & AV_DISPOSITION_TIMED_THUMBNAILS)) &&
575         (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE)) {
576         av_log(s, AV_LOG_WARNING,
577                "Timestamps are unset in a packet for stream %d. "
578                "This is deprecated and will stop working in the future. "
579                "Fix your code to set the timestamps properly\n", st->index);
580         si->missing_ts_warning = 1;
581     }
582 
583     if (s->debug & FF_FDEBUG_TS)
584         av_log(s, AV_LOG_DEBUG, "compute_muxer_pkt_fields: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
585             av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(sti->cur_dts), delay, pkt->size, pkt->stream_index);
586 
587     if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0)
588         pkt->pts = pkt->dts;
589 
590     //XXX/FIXME this is a temporary hack until all encoders output pts
591     if ((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay) {
592         static int warned;
593         if (!warned) {
594             av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
595             warned = 1;
596         }
597         pkt->dts =
598 //        pkt->pts= st->cur_dts;
599             pkt->pts = sti->priv_pts->val;
600     }
601 
602     //calculate dts from pts
603     if (pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
604         sti->pts_buffer[0] = pkt->pts;
605         for (int i = 1; i < delay + 1 && sti->pts_buffer[i] == AV_NOPTS_VALUE; i++)
606             sti->pts_buffer[i] = pkt->pts + (i - delay - 1) * pkt->duration;
607         for (int i = 0; i<delay && sti->pts_buffer[i] > sti->pts_buffer[i + 1]; i++)
608             FFSWAP(int64_t, sti->pts_buffer[i], sti->pts_buffer[i + 1]);
609 
610         pkt->dts = sti->pts_buffer[0];
611     }
612 
613     if (sti->cur_dts && sti->cur_dts != AV_NOPTS_VALUE &&
614         ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
615           st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE &&
616           st->codecpar->codec_type != AVMEDIA_TYPE_DATA &&
617           sti->cur_dts >= pkt->dts) || sti->cur_dts > pkt->dts)) {
618         av_log(s, AV_LOG_ERROR,
619                "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
620                st->index, av_ts2str(sti->cur_dts), av_ts2str(pkt->dts));
621         return AVERROR(EINVAL);
622     }
623     if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) {
624         av_log(s, AV_LOG_ERROR,
625                "pts (%s) < dts (%s) in stream %d\n",
626                av_ts2str(pkt->pts), av_ts2str(pkt->dts),
627                st->index);
628         return AVERROR(EINVAL);
629     }
630 
631     if (s->debug & FF_FDEBUG_TS)
632         av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n",
633             av_ts2str(pkt->pts), av_ts2str(pkt->dts));
634 
635     sti->cur_dts      = pkt->dts;
636     sti->priv_pts->val = pkt->dts;
637 
638     /* update pts */
639     switch (st->codecpar->codec_type) {
640     case AVMEDIA_TYPE_AUDIO:
641         frame_size = (pkt->flags & AV_PKT_FLAG_UNCODED_FRAME) ?
642                      (*(AVFrame **)pkt->data)->nb_samples :
643                      av_get_audio_frame_duration2(st->codecpar, pkt->size);
644 
645         /* HACK/FIXME, we skip the initial 0 size packets as they are most
646          * likely equal to the encoder delay, but it would be better if we
647          * had the real timestamps from the encoder */
648         if (frame_size >= 0 && (pkt->size || sti->priv_pts->num != sti->priv_pts->den >> 1 || sti->priv_pts->val)) {
649             frac_add(sti->priv_pts, (int64_t)st->time_base.den * frame_size);
650         }
651         break;
652     case AVMEDIA_TYPE_VIDEO:
653         frac_add(sti->priv_pts, (int64_t)st->time_base.den * st->time_base.num);
654         break;
655     }
656     return 0;
657 }
658 FF_ENABLE_DEPRECATION_WARNINGS
659 #endif
660 
guess_pkt_duration(AVFormatContext * s,AVStream * st,AVPacket * pkt)661 static void guess_pkt_duration(AVFormatContext *s, AVStream *st, AVPacket *pkt)
662 {
663     if (pkt->duration < 0 && st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
664         av_log(s, AV_LOG_WARNING, "Packet with invalid duration %"PRId64" in stream %d\n",
665                pkt->duration, pkt->stream_index);
666         pkt->duration = 0;
667     }
668 
669     if (pkt->duration)
670         return;
671 
672     switch (st->codecpar->codec_type) {
673     case AVMEDIA_TYPE_VIDEO:
674         if (st->avg_frame_rate.num > 0 && st->avg_frame_rate.den > 0) {
675             pkt->duration = av_rescale_q(1, av_inv_q(st->avg_frame_rate),
676                                          st->time_base);
677         } else if (st->time_base.num * 1000LL > st->time_base.den)
678             pkt->duration = 1;
679         break;
680     case AVMEDIA_TYPE_AUDIO: {
681         int frame_size = av_get_audio_frame_duration2(st->codecpar, pkt->size);
682         if (frame_size && st->codecpar->sample_rate) {
683             pkt->duration = av_rescale_q(frame_size,
684                                          (AVRational){1, st->codecpar->sample_rate},
685                                          st->time_base);
686         }
687         break;
688         }
689 #ifdef OHOS_AUXILIARY_TRACK
690     case AVMEDIA_TYPE_AUXILIARY:
691         if (st->codecpar->codec_id == AV_CODEC_ID_AAC || st->codecpar->codec_id == AV_CODEC_ID_MP3) {
692             int frame_size = av_get_audio_frame_duration2(st->codecpar, pkt->size);
693             if (frame_size && st->codecpar->sample_rate) {
694                 pkt->duration = av_rescale_q(frame_size,
695                                             (AVRational){1, st->codecpar->sample_rate},
696                                             st->time_base);
697             }
698         } else if (st->codecpar->codec_id == AV_CODEC_ID_H264 || st->codecpar->codec_id == AV_CODEC_ID_H265) {
699             if (st->avg_frame_rate.num > 0 && st->avg_frame_rate.den > 0) {
700                 pkt->duration = av_rescale_q(1, av_inv_q(st->avg_frame_rate), st->time_base);
701             } else if (st->time_base.num * 1000LL > st->time_base.den) { // 1000
702                 pkt->duration = 1;
703             }
704         }
705         break;
706 #endif
707     }
708 }
709 
handle_avoid_negative_ts(FFFormatContext * si,FFStream * sti,AVPacket * pkt)710 static void handle_avoid_negative_ts(FFFormatContext *si, FFStream *sti,
711                                      AVPacket *pkt)
712 {
713     AVFormatContext *const s = &si->pub;
714     int64_t offset;
715 
716     if (!AVOID_NEGATIVE_TS_ENABLED(si->avoid_negative_ts_status))
717         return;
718 
719     if (si->avoid_negative_ts_status == AVOID_NEGATIVE_TS_UNKNOWN) {
720         int use_pts = si->avoid_negative_ts_use_pts;
721         int64_t ts = use_pts ? pkt->pts : pkt->dts;
722         AVRational tb = sti->pub.time_base;
723 
724         if (ts == AV_NOPTS_VALUE)
725             return;
726 
727         /* Peek into the muxing queue to improve our estimate
728          * of the lowest timestamp if av_interleaved_write_frame() is used. */
729         for (const PacketListEntry *pktl = si->packet_buffer.head;
730              pktl; pktl = pktl->next) {
731             AVRational cmp_tb = s->streams[pktl->pkt.stream_index]->time_base;
732             int64_t cmp_ts = use_pts ? pktl->pkt.pts : pktl->pkt.dts;
733             if (cmp_ts == AV_NOPTS_VALUE)
734                 continue;
735             if (s->output_ts_offset)
736                 cmp_ts += av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, cmp_tb);
737             if (av_compare_ts(cmp_ts, cmp_tb, ts, tb) < 0) {
738                 ts = cmp_ts;
739                 tb = cmp_tb;
740             }
741         }
742 
743         if (ts < 0 ||
744             ts > 0 && s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) {
745             for (unsigned i = 0; i < s->nb_streams; i++) {
746                 AVStream *const st2  = s->streams[i];
747                 FFStream *const sti2 = ffstream(st2);
748                 sti2->mux_ts_offset = av_rescale_q_rnd(-ts, tb,
749                                                        st2->time_base,
750                                                        AV_ROUND_UP);
751             }
752         }
753         si->avoid_negative_ts_status = AVOID_NEGATIVE_TS_KNOWN;
754     }
755 
756     offset = sti->mux_ts_offset;
757 
758     if (pkt->dts != AV_NOPTS_VALUE)
759         pkt->dts += offset;
760     if (pkt->pts != AV_NOPTS_VALUE)
761         pkt->pts += offset;
762 
763     if (si->avoid_negative_ts_use_pts) {
764         if (pkt->pts != AV_NOPTS_VALUE && pkt->pts < 0) {
765             av_log(s, AV_LOG_WARNING, "failed to avoid negative "
766                    "pts %s in stream %d.\n"
767                    "Try -avoid_negative_ts 1 as a possible workaround.\n",
768                    av_ts2str(pkt->pts),
769                    pkt->stream_index
770             );
771         }
772     } else {
773         if (pkt->dts != AV_NOPTS_VALUE && pkt->dts < 0) {
774             av_log(s, AV_LOG_WARNING,
775                    "Packets poorly interleaved, failed to avoid negative "
776                    "timestamp %s in stream %d.\n"
777                    "Try -max_interleave_delta 0 as a possible workaround.\n",
778                    av_ts2str(pkt->dts),
779                    pkt->stream_index
780             );
781         }
782     }
783 }
784 
785 /**
786  * Shift timestamps and call muxer; the original pts/dts are not kept.
787  *
788  * FIXME: this function should NEVER get undefined pts/dts beside when the
789  * AVFMT_NOTIMESTAMPS is set.
790  * Those additional safety checks should be dropped once the correct checks
791  * are set in the callers.
792  */
write_packet(AVFormatContext * s,AVPacket * pkt)793 static int write_packet(AVFormatContext *s, AVPacket *pkt)
794 {
795     FFFormatContext *const si = ffformatcontext(s);
796     AVStream *const st = s->streams[pkt->stream_index];
797     FFStream *const sti = ffstream(st);
798     int ret;
799 
800     // If the timestamp offsetting below is adjusted, adjust
801     // ff_interleaved_peek similarly.
802     if (s->output_ts_offset) {
803         int64_t offset = av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);
804 
805         if (pkt->dts != AV_NOPTS_VALUE)
806             pkt->dts += offset;
807         if (pkt->pts != AV_NOPTS_VALUE)
808             pkt->pts += offset;
809     }
810     handle_avoid_negative_ts(si, sti, pkt);
811 
812     if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) {
813         AVFrame **frame = (AVFrame **)pkt->data;
814         av_assert0(pkt->size == sizeof(*frame));
815         ret = s->oformat->write_uncoded_frame(s, pkt->stream_index, frame, 0);
816     } else {
817         ret = s->oformat->write_packet(s, pkt);
818     }
819 
820     if (s->pb && ret >= 0) {
821         flush_if_needed(s);
822         if (s->pb->error < 0)
823             ret = s->pb->error;
824     }
825 
826     if (ret >= 0)
827         st->nb_frames++;
828 
829     return ret;
830 }
831 
check_packet(AVFormatContext * s,AVPacket * pkt)832 static int check_packet(AVFormatContext *s, AVPacket *pkt)
833 {
834     if (pkt->stream_index < 0 || pkt->stream_index >= s->nb_streams) {
835         av_log(s, AV_LOG_ERROR, "Invalid packet stream index: %d\n",
836                pkt->stream_index);
837         return AVERROR(EINVAL);
838     }
839 
840     if (s->streams[pkt->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_ATTACHMENT) {
841         av_log(s, AV_LOG_ERROR, "Received a packet for an attachment stream.\n");
842         return AVERROR(EINVAL);
843     }
844 
845     return 0;
846 }
847 
prepare_input_packet(AVFormatContext * s,AVStream * st,AVPacket * pkt)848 static int prepare_input_packet(AVFormatContext *s, AVStream *st, AVPacket *pkt)
849 {
850     FFStream *const sti = ffstream(st);
851 #if !FF_API_COMPUTE_PKT_FIELDS2
852     /* sanitize the timestamps */
853     if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
854 
855         /* when there is no reordering (so dts is equal to pts), but
856          * only one of them is set, set the other as well */
857         if (!sti->reorder) {
858             if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE)
859                 pkt->pts = pkt->dts;
860             if (pkt->dts == AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE)
861                 pkt->dts = pkt->pts;
862         }
863 
864         /* check that the timestamps are set */
865         if (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE) {
866             av_log(s, AV_LOG_ERROR,
867                    "Timestamps are unset in a packet for stream %d\n", st->index);
868             return AVERROR(EINVAL);
869         }
870 
871         /* check that the dts are increasing (or at least non-decreasing,
872          * if the format allows it */
873         if (sti->cur_dts != AV_NOPTS_VALUE &&
874             ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && sti->cur_dts >= pkt->dts) ||
875              sti->cur_dts > pkt->dts)) {
876             av_log(s, AV_LOG_ERROR,
877                    "Application provided invalid, non monotonically increasing "
878                    "dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n",
879                    st->index, sti->cur_dts, pkt->dts);
880             return AVERROR(EINVAL);
881         }
882 
883         if (pkt->pts < pkt->dts) {
884             av_log(s, AV_LOG_ERROR, "pts %" PRId64 " < dts %" PRId64 " in stream %d\n",
885                    pkt->pts, pkt->dts, st->index);
886             return AVERROR(EINVAL);
887         }
888     }
889 #endif
890     /* update flags */
891     if (sti->is_intra_only)
892         pkt->flags |= AV_PKT_FLAG_KEY;
893 
894     if (!pkt->data && !pkt->side_data_elems) {
895         /* Such empty packets signal EOS for the BSF API; so sanitize
896          * the packet by allocating data of size 0 (+ padding). */
897         av_buffer_unref(&pkt->buf);
898         return av_packet_make_refcounted(pkt);
899     }
900 
901     return 0;
902 }
903 
904 #define CHUNK_START 0x1000
905 
ff_interleave_add_packet(AVFormatContext * s,AVPacket * pkt,int (* compare)(AVFormatContext *,const AVPacket *,const AVPacket *))906 int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
907                              int (*compare)(AVFormatContext *, const AVPacket *, const AVPacket *))
908 {
909     int ret;
910     FFFormatContext *const si = ffformatcontext(s);
911     PacketListEntry **next_point, *this_pktl;
912     AVStream *st = s->streams[pkt->stream_index];
913     FFStream *const sti = ffstream(st);
914     int chunked  = s->max_chunk_size || s->max_chunk_duration;
915 
916     this_pktl    = av_malloc(sizeof(*this_pktl));
917     if (!this_pktl) {
918         av_packet_unref(pkt);
919         return AVERROR(ENOMEM);
920     }
921     if ((ret = av_packet_make_refcounted(pkt)) < 0) {
922         av_free(this_pktl);
923         av_packet_unref(pkt);
924         return ret;
925     }
926 
927     av_packet_move_ref(&this_pktl->pkt, pkt);
928     pkt = &this_pktl->pkt;
929 
930     if (sti->last_in_packet_buffer) {
931         next_point = &(sti->last_in_packet_buffer->next);
932     } else {
933         next_point = &si->packet_buffer.head;
934     }
935 
936     if (chunked) {
937         uint64_t max= av_rescale_q_rnd(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base, AV_ROUND_UP);
938         sti->interleaver_chunk_size     += pkt->size;
939         sti->interleaver_chunk_duration += pkt->duration;
940         if (   (s->max_chunk_size && sti->interleaver_chunk_size > s->max_chunk_size)
941             || (max && sti->interleaver_chunk_duration           > max)) {
942             sti->interleaver_chunk_size = 0;
943             pkt->flags |= CHUNK_START;
944             if (max && sti->interleaver_chunk_duration > max) {
945                 int64_t syncoffset = (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)*max/2;
946                 int64_t syncto = av_rescale(pkt->dts + syncoffset, 1, max)*max - syncoffset;
947 
948                 sti->interleaver_chunk_duration += (pkt->dts - syncto)/8 - max;
949             } else
950                 sti->interleaver_chunk_duration  = 0;
951         }
952     }
953     if (*next_point) {
954         if (chunked && !(pkt->flags & CHUNK_START))
955             goto next_non_null;
956 
957         if (compare(s, &si->packet_buffer.tail->pkt, pkt)) {
958             while (   *next_point
959                    && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
960                        || !compare(s, &(*next_point)->pkt, pkt)))
961                 next_point = &(*next_point)->next;
962             if (*next_point)
963                 goto next_non_null;
964         } else {
965             next_point = &(si->packet_buffer.tail->next);
966         }
967     }
968     av_assert1(!*next_point);
969 
970     si->packet_buffer.tail = this_pktl;
971 next_non_null:
972 
973     this_pktl->next = *next_point;
974 
975     sti->last_in_packet_buffer = *next_point = this_pktl;
976 
977     return 0;
978 }
979 
interleave_compare_dts(AVFormatContext * s,const AVPacket * next,const AVPacket * pkt)980 static int interleave_compare_dts(AVFormatContext *s, const AVPacket *next,
981                                                       const AVPacket *pkt)
982 {
983     AVStream *st  = s->streams[pkt->stream_index];
984     AVStream *st2 = s->streams[next->stream_index];
985     int comp      = av_compare_ts(next->dts, st2->time_base, pkt->dts,
986                                   st->time_base);
987     if (s->audio_preload) {
988         int preload  = st ->codecpar->codec_type == AVMEDIA_TYPE_AUDIO;
989         int preload2 = st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO;
990         if (preload != preload2) {
991             int64_t ts, ts2;
992             preload  *= s->audio_preload;
993             preload2 *= s->audio_preload;
994             ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - preload;
995             ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - preload2;
996             if (ts == ts2) {
997                 ts  = ((uint64_t)pkt ->dts*st ->time_base.num*AV_TIME_BASE - (uint64_t)preload *st ->time_base.den)*st2->time_base.den
998                     - ((uint64_t)next->dts*st2->time_base.num*AV_TIME_BASE - (uint64_t)preload2*st2->time_base.den)*st ->time_base.den;
999                 ts2 = 0;
1000             }
1001             comp = (ts2 > ts) - (ts2 < ts);
1002         }
1003     }
1004 
1005     if (comp == 0)
1006         return pkt->stream_index < next->stream_index;
1007     return comp > 0;
1008 }
1009 
ff_interleave_packet_per_dts(AVFormatContext * s,AVPacket * pkt,int flush,int has_packet)1010 int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *pkt,
1011                                  int flush, int has_packet)
1012 {
1013     FFFormatContext *const si = ffformatcontext(s);
1014     int stream_count = 0;
1015     int noninterleaved_count = 0;
1016     int ret;
1017     int eof = flush;
1018 
1019     if (has_packet) {
1020         if ((ret = ff_interleave_add_packet(s, pkt, interleave_compare_dts)) < 0)
1021             return ret;
1022     }
1023 
1024     for (unsigned i = 0; i < s->nb_streams; i++) {
1025         const AVStream *const st  = s->streams[i];
1026         const FFStream *const sti = cffstream(st);
1027         const AVCodecParameters *const par = st->codecpar;
1028         if (sti->last_in_packet_buffer) {
1029             ++stream_count;
1030         } else if (par->codec_type != AVMEDIA_TYPE_ATTACHMENT &&
1031                    par->codec_id != AV_CODEC_ID_VP8 &&
1032                    par->codec_id != AV_CODEC_ID_VP9) {
1033             ++noninterleaved_count;
1034         }
1035     }
1036 
1037     if (si->nb_interleaved_streams == stream_count)
1038         flush = 1;
1039 
1040     if (s->max_interleave_delta > 0 &&
1041         si->packet_buffer.head &&
1042         !flush &&
1043         si->nb_interleaved_streams == stream_count+noninterleaved_count
1044     ) {
1045         AVPacket *const top_pkt = &si->packet_buffer.head->pkt;
1046         int64_t delta_dts = INT64_MIN;
1047         int64_t top_dts = av_rescale_q(top_pkt->dts,
1048                                        s->streams[top_pkt->stream_index]->time_base,
1049                                        AV_TIME_BASE_Q);
1050 
1051         for (unsigned i = 0; i < s->nb_streams; i++) {
1052             const AVStream *const st  = s->streams[i];
1053             const FFStream *const sti = cffstream(st);
1054             const PacketListEntry *const last = sti->last_in_packet_buffer;
1055             int64_t last_dts;
1056 
1057             if (!last)
1058                 continue;
1059 
1060             last_dts = av_rescale_q(last->pkt.dts,
1061                                     st->time_base,
1062                                     AV_TIME_BASE_Q);
1063             delta_dts = FFMAX(delta_dts, last_dts - top_dts);
1064         }
1065 
1066         if (delta_dts > s->max_interleave_delta) {
1067             av_log(s, AV_LOG_DEBUG,
1068                    "Delay between the first packet and last packet in the "
1069                    "muxing queue is %"PRId64" > %"PRId64": forcing output\n",
1070                    delta_dts, s->max_interleave_delta);
1071             flush = 1;
1072         }
1073     }
1074 
1075     if (si->packet_buffer.head &&
1076         eof &&
1077         (s->flags & AVFMT_FLAG_SHORTEST) &&
1078         si->shortest_end == AV_NOPTS_VALUE) {
1079         AVPacket *const top_pkt = &si->packet_buffer.head->pkt;
1080 
1081         si->shortest_end = av_rescale_q(top_pkt->dts,
1082                                        s->streams[top_pkt->stream_index]->time_base,
1083                                        AV_TIME_BASE_Q);
1084     }
1085 
1086     if (si->shortest_end != AV_NOPTS_VALUE) {
1087         while (si->packet_buffer.head) {
1088             PacketListEntry *pktl = si->packet_buffer.head;
1089             AVPacket *const top_pkt = &pktl->pkt;
1090             AVStream *const st = s->streams[top_pkt->stream_index];
1091             FFStream *const sti = ffstream(st);
1092             int64_t top_dts = av_rescale_q(top_pkt->dts, st->time_base,
1093                                         AV_TIME_BASE_Q);
1094 
1095             if (si->shortest_end + 1 >= top_dts)
1096                 break;
1097 
1098             si->packet_buffer.head = pktl->next;
1099             if (!si->packet_buffer.head)
1100                 si->packet_buffer.tail = NULL;
1101 
1102             if (sti->last_in_packet_buffer == pktl)
1103                 sti->last_in_packet_buffer = NULL;
1104 
1105             av_packet_unref(&pktl->pkt);
1106             av_freep(&pktl);
1107             flush = 0;
1108         }
1109     }
1110 
1111     if (stream_count && flush) {
1112         PacketListEntry *pktl = si->packet_buffer.head;
1113         AVStream *const st = s->streams[pktl->pkt.stream_index];
1114         FFStream *const sti = ffstream(st);
1115 
1116         if (sti->last_in_packet_buffer == pktl)
1117             sti->last_in_packet_buffer = NULL;
1118         avpriv_packet_list_get(&si->packet_buffer, pkt);
1119 
1120         return 1;
1121     } else {
1122         return 0;
1123     }
1124 }
1125 
ff_interleave_packet_passthrough(AVFormatContext * s,AVPacket * pkt,int flush,int has_packet)1126 int ff_interleave_packet_passthrough(AVFormatContext *s, AVPacket *pkt,
1127                                      int flush, int has_packet)
1128 {
1129     return has_packet;
1130 }
1131 
ff_get_muxer_ts_offset(AVFormatContext * s,int stream_index,int64_t * offset)1132 int ff_get_muxer_ts_offset(AVFormatContext *s, int stream_index, int64_t *offset)
1133 {
1134     AVStream *st;
1135 
1136     if (stream_index < 0 || stream_index >= s->nb_streams)
1137         return AVERROR(EINVAL);
1138 
1139     st = s->streams[stream_index];
1140     *offset = ffstream(st)->mux_ts_offset;
1141 
1142     if (s->output_ts_offset)
1143         *offset += av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);
1144 
1145     return 0;
1146 }
1147 
ff_interleaved_peek(AVFormatContext * s,int stream)1148 const AVPacket *ff_interleaved_peek(AVFormatContext *s, int stream)
1149 {
1150     FFFormatContext *const si = ffformatcontext(s);
1151     PacketListEntry *pktl = si->packet_buffer.head;
1152     while (pktl) {
1153         if (pktl->pkt.stream_index == stream) {
1154             return &pktl->pkt;
1155         }
1156         pktl = pktl->next;
1157     }
1158     return NULL;
1159 }
1160 
check_bitstream(AVFormatContext * s,FFStream * sti,AVPacket * pkt)1161 static int check_bitstream(AVFormatContext *s, FFStream *sti, AVPacket *pkt)
1162 {
1163     int ret;
1164 
1165     if (!(s->flags & AVFMT_FLAG_AUTO_BSF))
1166         return 1;
1167 
1168     if (s->oformat->check_bitstream) {
1169         if (!sti->bitstream_checked) {
1170             if ((ret = s->oformat->check_bitstream(s, &sti->pub, pkt)) < 0)
1171                 return ret;
1172             else if (ret == 1)
1173                 sti->bitstream_checked = 1;
1174         }
1175     }
1176 
1177     return 1;
1178 }
1179 
interleaved_write_packet(AVFormatContext * s,AVPacket * pkt,int flush,int has_packet)1180 static int interleaved_write_packet(AVFormatContext *s, AVPacket *pkt,
1181                                     int flush, int has_packet)
1182 {
1183     FFFormatContext *const si = ffformatcontext(s);
1184     for (;; ) {
1185         int ret = si->interleave_packet(s, pkt, flush, has_packet);
1186         if (ret <= 0)
1187             return ret;
1188 
1189         has_packet = 0;
1190 
1191         ret = write_packet(s, pkt);
1192         av_packet_unref(pkt);
1193         if (ret < 0)
1194             return ret;
1195     }
1196 }
1197 
write_packet_common(AVFormatContext * s,AVStream * st,AVPacket * pkt,int interleaved)1198 static int write_packet_common(AVFormatContext *s, AVStream *st, AVPacket *pkt, int interleaved)
1199 {
1200     int ret;
1201 
1202     if (s->debug & FF_FDEBUG_TS)
1203         av_log(s, AV_LOG_DEBUG, "%s size:%d dts:%s pts:%s\n", __FUNCTION__,
1204                pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
1205 
1206     guess_pkt_duration(s, st, pkt);
1207 
1208 #if FF_API_COMPUTE_PKT_FIELDS2
1209     if ((ret = compute_muxer_pkt_fields(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
1210         return ret;
1211 #endif
1212 
1213     if (interleaved) {
1214         if (pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
1215             return AVERROR(EINVAL);
1216         return interleaved_write_packet(s, pkt, 0, 1);
1217     } else {
1218         return write_packet(s, pkt);
1219     }
1220 }
1221 
write_packets_from_bsfs(AVFormatContext * s,AVStream * st,AVPacket * pkt,int interleaved)1222 static int write_packets_from_bsfs(AVFormatContext *s, AVStream *st, AVPacket *pkt, int interleaved)
1223 {
1224     FFStream *const sti = ffstream(st);
1225     AVBSFContext *const bsfc = sti->bsfc;
1226     int ret;
1227 
1228     if ((ret = av_bsf_send_packet(bsfc, pkt)) < 0) {
1229         av_log(s, AV_LOG_ERROR,
1230                 "Failed to send packet to filter %s for stream %d\n",
1231                 bsfc->filter->name, st->index);
1232         return ret;
1233     }
1234 
1235     do {
1236         ret = av_bsf_receive_packet(bsfc, pkt);
1237         if (ret < 0) {
1238             if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
1239                 return 0;
1240             av_log(s, AV_LOG_ERROR, "Error applying bitstream filters to an output "
1241                    "packet for stream #%d: %s\n", st->index, av_err2str(ret));
1242             if (!(s->error_recognition & AV_EF_EXPLODE) && ret != AVERROR(ENOMEM))
1243                 continue;
1244             return ret;
1245         }
1246         av_packet_rescale_ts(pkt, bsfc->time_base_out, st->time_base);
1247         ret = write_packet_common(s, st, pkt, interleaved);
1248         if (ret >= 0 && !interleaved) // a successful write_packet_common already unrefed pkt for interleaved
1249             av_packet_unref(pkt);
1250     } while (ret >= 0);
1251 
1252     return ret;
1253 }
1254 
write_packets_common(AVFormatContext * s,AVPacket * pkt,int interleaved)1255 static int write_packets_common(AVFormatContext *s, AVPacket *pkt, int interleaved)
1256 {
1257     AVStream *st;
1258     FFStream *sti;
1259     int ret = check_packet(s, pkt);
1260     if (ret < 0)
1261         return ret;
1262     st = s->streams[pkt->stream_index];
1263     sti = ffstream(st);
1264 
1265     ret = prepare_input_packet(s, st, pkt);
1266     if (ret < 0)
1267         return ret;
1268 
1269     ret = check_bitstream(s, sti, pkt);
1270     if (ret < 0)
1271         return ret;
1272 
1273     if (sti->bsfc) {
1274         return write_packets_from_bsfs(s, st, pkt, interleaved);
1275     } else {
1276         return write_packet_common(s, st, pkt, interleaved);
1277     }
1278 }
1279 
av_write_frame(AVFormatContext * s,AVPacket * in)1280 int av_write_frame(AVFormatContext *s, AVPacket *in)
1281 {
1282     FFFormatContext *const si = ffformatcontext(s);
1283     AVPacket *pkt = si->parse_pkt;
1284     int ret;
1285 
1286     if (!in) {
1287         if (s->oformat->flags & AVFMT_ALLOW_FLUSH) {
1288             ret = s->oformat->write_packet(s, NULL);
1289             flush_if_needed(s);
1290             if (ret >= 0 && s->pb && s->pb->error < 0)
1291                 ret = s->pb->error;
1292             return ret;
1293         }
1294         return 1;
1295     }
1296 
1297     if (in->flags & AV_PKT_FLAG_UNCODED_FRAME) {
1298         pkt = in;
1299     } else {
1300         /* We don't own in, so we have to make sure not to modify it.
1301          * (ff_write_chained() relies on this fact.)
1302          * The following avoids copying in's data unnecessarily.
1303          * Copying side data is unavoidable as a bitstream filter
1304          * may change it, e.g. free it on errors. */
1305         pkt->data = in->data;
1306         pkt->size = in->size;
1307         ret = av_packet_copy_props(pkt, in);
1308         if (ret < 0)
1309             return ret;
1310         if (in->buf) {
1311             pkt->buf = av_buffer_ref(in->buf);
1312             if (!pkt->buf) {
1313                 ret = AVERROR(ENOMEM);
1314                 goto fail;
1315             }
1316         }
1317     }
1318 
1319     ret = write_packets_common(s, pkt, 0/*non-interleaved*/);
1320 
1321 fail:
1322     // Uncoded frames using the noninterleaved codepath are also freed here
1323     av_packet_unref(pkt);
1324     return ret;
1325 }
1326 
av_interleaved_write_frame(AVFormatContext * s,AVPacket * pkt)1327 int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
1328 {
1329     int ret;
1330 
1331     if (pkt) {
1332         ret = write_packets_common(s, pkt, 1/*interleaved*/);
1333         if (ret < 0)
1334             av_packet_unref(pkt);
1335         return ret;
1336     } else {
1337         av_log(s, AV_LOG_TRACE, "av_interleaved_write_frame FLUSH\n");
1338         return interleaved_write_packet(s, ffformatcontext(s)->parse_pkt, 1/*flush*/, 0);
1339     }
1340 }
1341 
av_write_trailer(AVFormatContext * s)1342 int av_write_trailer(AVFormatContext *s)
1343 {
1344     FFFormatContext *const si = ffformatcontext(s);
1345     AVPacket *const pkt = si->parse_pkt;
1346     int ret1, ret = 0;
1347 
1348     for (unsigned i = 0; i < s->nb_streams; i++) {
1349         AVStream *const st  = s->streams[i];
1350         FFStream *const sti = ffstream(st);
1351         if (sti->bsfc) {
1352             ret1 = write_packets_from_bsfs(s, st, pkt, 1/*interleaved*/);
1353             if (ret1 < 0)
1354                 av_packet_unref(pkt);
1355             if (ret >= 0)
1356                 ret = ret1;
1357         }
1358     }
1359     ret1 = interleaved_write_packet(s, pkt, 1, 0);
1360     if (ret >= 0)
1361         ret = ret1;
1362 
1363     if (s->oformat->write_trailer) {
1364         if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
1365             avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_TRAILER);
1366         if (ret >= 0) {
1367         ret = s->oformat->write_trailer(s);
1368         } else {
1369             s->oformat->write_trailer(s);
1370         }
1371     }
1372 
1373     deinit_muxer(s);
1374 
1375     if (s->pb)
1376        avio_flush(s->pb);
1377     if (ret == 0)
1378        ret = s->pb ? s->pb->error : 0;
1379     for (unsigned i = 0; i < s->nb_streams; i++) {
1380         av_freep(&s->streams[i]->priv_data);
1381         av_freep(&ffstream(s->streams[i])->index_entries);
1382     }
1383     if (s->oformat->priv_class)
1384         av_opt_free(s->priv_data);
1385     av_freep(&s->priv_data);
1386     av_packet_unref(si->pkt);
1387     return ret;
1388 }
1389 
av_get_output_timestamp(struct AVFormatContext * s,int stream,int64_t * dts,int64_t * wall)1390 int av_get_output_timestamp(struct AVFormatContext *s, int stream,
1391                             int64_t *dts, int64_t *wall)
1392 {
1393     if (!s->oformat || !s->oformat->get_output_timestamp)
1394         return AVERROR(ENOSYS);
1395     s->oformat->get_output_timestamp(s, stream, dts, wall);
1396     return 0;
1397 }
1398 
ff_stream_add_bitstream_filter(AVStream * st,const char * name,const char * args)1399 int ff_stream_add_bitstream_filter(AVStream *st, const char *name, const char *args)
1400 {
1401     int ret;
1402     const AVBitStreamFilter *bsf;
1403     FFStream *const sti = ffstream(st);
1404     AVBSFContext *bsfc;
1405 
1406     av_assert0(!sti->bsfc);
1407 
1408     if (!(bsf = av_bsf_get_by_name(name))) {
1409         av_log(NULL, AV_LOG_ERROR, "Unknown bitstream filter '%s'\n", name);
1410         return AVERROR_BSF_NOT_FOUND;
1411     }
1412 
1413     if ((ret = av_bsf_alloc(bsf, &bsfc)) < 0)
1414         return ret;
1415 
1416     bsfc->time_base_in = st->time_base;
1417     if ((ret = avcodec_parameters_copy(bsfc->par_in, st->codecpar)) < 0) {
1418         av_bsf_free(&bsfc);
1419         return ret;
1420     }
1421 
1422     if (args && bsfc->filter->priv_class) {
1423         if ((ret = av_set_options_string(bsfc->priv_data, args, "=", ":")) < 0) {
1424             av_bsf_free(&bsfc);
1425             return ret;
1426         }
1427     }
1428 
1429     if ((ret = av_bsf_init(bsfc)) < 0) {
1430         av_bsf_free(&bsfc);
1431         return ret;
1432     }
1433 
1434     sti->bsfc = bsfc;
1435 
1436     av_log(NULL, AV_LOG_VERBOSE,
1437            "Automatically inserted bitstream filter '%s'; args='%s'\n",
1438            name, args ? args : "");
1439     return 1;
1440 }
1441 
ff_write_chained(AVFormatContext * dst,int dst_stream,AVPacket * pkt,AVFormatContext * src,int interleave)1442 int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
1443                      AVFormatContext *src, int interleave)
1444 {
1445     int64_t pts = pkt->pts, dts = pkt->dts, duration = pkt->duration;
1446     int stream_index = pkt->stream_index;
1447     AVRational time_base = pkt->time_base;
1448     int ret;
1449 
1450     pkt->stream_index = dst_stream;
1451 
1452     av_packet_rescale_ts(pkt,
1453                          src->streams[stream_index]->time_base,
1454                          dst->streams[dst_stream]->time_base);
1455 
1456     if (!interleave) {
1457         ret = av_write_frame(dst, pkt);
1458         /* We only have to backup and restore the fields that
1459          * we changed ourselves, because av_write_frame() does not
1460          * modify the packet given to it. */
1461         pkt->pts          = pts;
1462         pkt->dts          = dts;
1463         pkt->duration     = duration;
1464         pkt->stream_index = stream_index;
1465         pkt->time_base    = time_base;
1466     } else
1467         ret = av_interleaved_write_frame(dst, pkt);
1468 
1469     return ret;
1470 }
1471 
uncoded_frame_free(void * unused,uint8_t * data)1472 static void uncoded_frame_free(void *unused, uint8_t *data)
1473 {
1474     av_frame_free((AVFrame **)data);
1475     av_free(data);
1476 }
1477 
write_uncoded_frame_internal(AVFormatContext * s,int stream_index,AVFrame * frame,int interleaved)1478 static int write_uncoded_frame_internal(AVFormatContext *s, int stream_index,
1479                                         AVFrame *frame, int interleaved)
1480 {
1481     FFFormatContext *const si = ffformatcontext(s);
1482     AVPacket *pkt = si->parse_pkt;
1483 
1484     av_assert0(s->oformat);
1485     if (!s->oformat->write_uncoded_frame) {
1486         av_frame_free(&frame);
1487         return AVERROR(ENOSYS);
1488     }
1489 
1490     if (!frame) {
1491         pkt = NULL;
1492     } else {
1493         size_t   bufsize = sizeof(frame) + AV_INPUT_BUFFER_PADDING_SIZE;
1494         AVFrame **framep = av_mallocz(bufsize);
1495 
1496         if (!framep)
1497             goto fail;
1498         pkt->buf = av_buffer_create((void *)framep, bufsize,
1499                                    uncoded_frame_free, NULL, 0);
1500         if (!pkt->buf) {
1501             av_free(framep);
1502     fail:
1503             av_frame_free(&frame);
1504             return AVERROR(ENOMEM);
1505         }
1506         *framep = frame;
1507 
1508         pkt->data         = (void *)framep;
1509         pkt->size         = sizeof(frame);
1510         pkt->pts          =
1511         pkt->dts          = frame->pts;
1512         pkt->duration     = frame->pkt_duration;
1513         pkt->stream_index = stream_index;
1514         pkt->flags |= AV_PKT_FLAG_UNCODED_FRAME;
1515     }
1516 
1517     return interleaved ? av_interleaved_write_frame(s, pkt) :
1518                          av_write_frame(s, pkt);
1519 }
1520 
av_write_uncoded_frame(AVFormatContext * s,int stream_index,AVFrame * frame)1521 int av_write_uncoded_frame(AVFormatContext *s, int stream_index,
1522                            AVFrame *frame)
1523 {
1524     return write_uncoded_frame_internal(s, stream_index, frame, 0);
1525 }
1526 
av_interleaved_write_uncoded_frame(AVFormatContext * s,int stream_index,AVFrame * frame)1527 int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index,
1528                                        AVFrame *frame)
1529 {
1530     return write_uncoded_frame_internal(s, stream_index, frame, 1);
1531 }
1532 
av_write_uncoded_frame_query(AVFormatContext * s,int stream_index)1533 int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index)
1534 {
1535     av_assert0(s->oformat);
1536     if (!s->oformat->write_uncoded_frame)
1537         return AVERROR(ENOSYS);
1538     return s->oformat->write_uncoded_frame(s, stream_index, NULL,
1539                                            AV_WRITE_UNCODED_FRAME_QUERY);
1540 }
1541