• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * OpenH264 video encoder
3  * Copyright (C) 2014 Martin Storsjo
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include <wels/codec_api.h>
23 #include <wels/codec_ver.h>
24 
25 #include "libavutil/attributes.h"
26 #include "libavutil/common.h"
27 #include "libavutil/opt.h"
28 #include "libavutil/internal.h"
29 #include "libavutil/intreadwrite.h"
30 #include "libavutil/mathematics.h"
31 
32 #include "avcodec.h"
33 #include "codec_internal.h"
34 #include "encode.h"
35 #include "internal.h"
36 #include "libopenh264.h"
37 
38 #if !OPENH264_VER_AT_LEAST(1, 6)
39 #define SM_SIZELIMITED_SLICE SM_DYN_SLICE
40 #endif
41 
42 #define TARGET_BITRATE_DEFAULT 2*1000*1000
43 
44 typedef struct SVCContext {
45     const AVClass *av_class;
46     ISVCEncoder *encoder;
47     int slice_mode;
48     int loopfilter;
49     int profile;
50     int max_nal_size;
51     int skip_frames;
52     int skipped;
53 #if FF_API_OPENH264_CABAC
54     int cabac;                      // deprecated
55 #endif
56     int coder;
57 
58     // rate control mode
59     int rc_mode;
60 } SVCContext;
61 
62 #define OFFSET(x) offsetof(SVCContext, x)
63 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
64 #define DEPRECATED AV_OPT_FLAG_DEPRECATED
65 static const AVOption options[] = {
66 #if FF_API_OPENH264_SLICE_MODE
67 #if OPENH264_VER_AT_LEAST(1, 6)
68     { "slice_mode", "set slice mode, use slices/max_nal_size", OFFSET(slice_mode), AV_OPT_TYPE_INT, { .i64 = SM_FIXEDSLCNUM_SLICE }, SM_SINGLE_SLICE, SM_RESERVED, VE|DEPRECATED, "slice_mode" },
69 #else
70     { "slice_mode", "set slice mode, use slices/max_nal_size", OFFSET(slice_mode), AV_OPT_TYPE_INT, { .i64 = SM_AUTO_SLICE }, SM_SINGLE_SLICE, SM_RESERVED, VE|DEPRECATED, "slice_mode" },
71 #endif
72         { "fixed", "a fixed number of slices", 0, AV_OPT_TYPE_CONST, { .i64 = SM_FIXEDSLCNUM_SLICE }, 0, 0, VE, "slice_mode" },
73 #if OPENH264_VER_AT_LEAST(1, 6)
74         { "dyn", "Size limited (compatibility name)", 0, AV_OPT_TYPE_CONST, { .i64 = SM_SIZELIMITED_SLICE }, 0, 0, VE, "slice_mode" },
75         { "sizelimited", "Size limited", 0, AV_OPT_TYPE_CONST, { .i64 = SM_SIZELIMITED_SLICE }, 0, 0, VE, "slice_mode" },
76 #else
77         { "rowmb", "one slice per row of macroblocks", 0, AV_OPT_TYPE_CONST, { .i64 = SM_ROWMB_SLICE }, 0, 0, VE, "slice_mode" },
78         { "auto", "automatic number of slices according to number of threads", 0, AV_OPT_TYPE_CONST, { .i64 = SM_AUTO_SLICE }, 0, 0, VE, "slice_mode" },
79         { "dyn", "Dynamic slicing", 0, AV_OPT_TYPE_CONST, { .i64 = SM_DYN_SLICE }, 0, 0, VE, "slice_mode" },
80 #endif
81 #endif
82     { "loopfilter", "enable loop filter", OFFSET(loopfilter), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, VE },
83     { "profile", "set profile restrictions", OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = FF_PROFILE_UNKNOWN }, FF_PROFILE_UNKNOWN, 0xffff, VE, "profile" },
84 #define PROFILE(name, value)  name, NULL, 0, AV_OPT_TYPE_CONST, { .i64 = value }, 0, 0, VE, "profile"
85         { PROFILE("constrained_baseline", FF_PROFILE_H264_CONSTRAINED_BASELINE) },
86         { PROFILE("main",                 FF_PROFILE_H264_MAIN) },
87         { PROFILE("high",                 FF_PROFILE_H264_HIGH) },
88 #undef PROFILE
89     { "max_nal_size", "set maximum NAL size in bytes", OFFSET(max_nal_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
90     { "allow_skip_frames", "allow skipping frames to hit the target bitrate", OFFSET(skip_frames), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
91 #if FF_API_OPENH264_CABAC
92     { "cabac", "Enable cabac(deprecated, use coder)", OFFSET(cabac), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE|DEPRECATED },
93 #endif
94     { "coder", "Coder type",  OFFSET(coder), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE, "coder" },
95         { "default",          NULL, 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, INT_MIN, INT_MAX, VE, "coder" },
96         { "cavlc",            NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 },  INT_MIN, INT_MAX, VE, "coder" },
97         { "cabac",            NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 },  INT_MIN, INT_MAX, VE, "coder" },
98         { "vlc",              NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0 },  INT_MIN, INT_MAX, VE, "coder" },
99         { "ac",               NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1 },  INT_MIN, INT_MAX, VE, "coder" },
100 
101     { "rc_mode", "Select rate control mode", OFFSET(rc_mode), AV_OPT_TYPE_INT, { .i64 = RC_QUALITY_MODE }, RC_OFF_MODE, RC_TIMESTAMP_MODE, VE, "rc_mode" },
102         { "off",       "bit rate control off",                                                 0, AV_OPT_TYPE_CONST, { .i64 = RC_OFF_MODE },         0, 0, VE, "rc_mode" },
103         { "quality",   "quality mode",                                                         0, AV_OPT_TYPE_CONST, { .i64 = RC_QUALITY_MODE },     0, 0, VE, "rc_mode" },
104         { "bitrate",   "bitrate mode",                                                         0, AV_OPT_TYPE_CONST, { .i64 = RC_BITRATE_MODE },     0, 0, VE, "rc_mode" },
105         { "buffer",    "using buffer status to adjust the video quality (no bitrate control)", 0, AV_OPT_TYPE_CONST, { .i64 = RC_BUFFERBASED_MODE }, 0, 0, VE, "rc_mode" },
106 #if OPENH264_VER_AT_LEAST(1, 4)
107         { "timestamp", "bit rate control based on timestamp",                                  0, AV_OPT_TYPE_CONST, { .i64 = RC_TIMESTAMP_MODE },   0, 0, VE, "rc_mode" },
108 #endif
109 
110     { NULL }
111 };
112 
113 static const AVClass class = {
114     .class_name = "libopenh264enc",
115     .item_name  = av_default_item_name,
116     .option     = options,
117     .version    = LIBAVUTIL_VERSION_INT,
118 };
119 
svc_encode_close(AVCodecContext * avctx)120 static av_cold int svc_encode_close(AVCodecContext *avctx)
121 {
122     SVCContext *s = avctx->priv_data;
123 
124     if (s->encoder)
125         WelsDestroySVCEncoder(s->encoder);
126     if (s->skipped > 0)
127         av_log(avctx, AV_LOG_WARNING, "%d frames skipped\n", s->skipped);
128     return 0;
129 }
130 
svc_encode_init(AVCodecContext * avctx)131 static av_cold int svc_encode_init(AVCodecContext *avctx)
132 {
133     SVCContext *s = avctx->priv_data;
134     SEncParamExt param = { 0 };
135     int err;
136     int log_level;
137     WelsTraceCallback callback_function;
138     AVCPBProperties *props;
139 
140     if ((err = ff_libopenh264_check_version(avctx)) < 0)
141         return AVERROR_ENCODER_NOT_FOUND;
142 
143     if (WelsCreateSVCEncoder(&s->encoder)) {
144         av_log(avctx, AV_LOG_ERROR, "Unable to create encoder\n");
145         return AVERROR_UNKNOWN;
146     }
147 
148     // Pass all libopenh264 messages to our callback, to allow ourselves to filter them.
149     log_level = WELS_LOG_DETAIL;
150     (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_LEVEL, &log_level);
151 
152     // Set the logging callback function to one that uses av_log() (see implementation above).
153     callback_function = (WelsTraceCallback) ff_libopenh264_trace_callback;
154     (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_CALLBACK, &callback_function);
155 
156     // Set the AVCodecContext as the libopenh264 callback context so that it can be passed to av_log().
157     (*s->encoder)->SetOption(s->encoder, ENCODER_OPTION_TRACE_CALLBACK_CONTEXT, &avctx);
158 
159     (*s->encoder)->GetDefaultParams(s->encoder, &param);
160 
161     if (avctx->framerate.num > 0 && avctx->framerate.den > 0) {
162         param.fMaxFrameRate = av_q2d(avctx->framerate);
163     } else {
164         if (avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
165             av_log(avctx, AV_LOG_ERROR,
166                    "Could not set framerate for libopenh264enc: integer overflow\n");
167             return AVERROR(EINVAL);
168         }
169         param.fMaxFrameRate = 1.0 / av_q2d(avctx->time_base) / FFMAX(avctx->ticks_per_frame, 1);
170     }
171     param.iPicWidth                  = avctx->width;
172     param.iPicHeight                 = avctx->height;
173     param.iTargetBitrate             = avctx->bit_rate > 0 ? avctx->bit_rate : TARGET_BITRATE_DEFAULT;
174     param.iMaxBitrate                = FFMAX(avctx->rc_max_rate, avctx->bit_rate);
175     param.iRCMode                    = s->rc_mode;
176     if (avctx->qmax >= 0)
177         param.iMaxQp                 = av_clip(avctx->qmax, 1, 51);
178     if (avctx->qmin >= 0)
179         param.iMinQp                 = av_clip(avctx->qmin, 1, param.iMaxQp);
180     param.iTemporalLayerNum          = 1;
181     param.iSpatialLayerNum           = 1;
182     param.bEnableDenoise             = 0;
183     param.bEnableBackgroundDetection = 1;
184     param.bEnableAdaptiveQuant       = 1;
185     param.bEnableFrameSkip           = s->skip_frames;
186     param.bEnableLongTermReference   = 0;
187     param.iLtrMarkPeriod             = 30;
188     if (avctx->gop_size >= 0)
189         param.uiIntraPeriod          = avctx->gop_size;
190 #if OPENH264_VER_AT_LEAST(1, 4)
191     param.eSpsPpsIdStrategy          = CONSTANT_ID;
192 #else
193     param.bEnableSpsPpsIdAddition    = 0;
194 #endif
195     param.bPrefixNalAddingCtrl       = 0;
196     param.iLoopFilterDisableIdc      = !s->loopfilter;
197     param.iEntropyCodingModeFlag     = s->coder >= 0 ? s->coder : 1;
198     param.iMultipleThreadIdc         = avctx->thread_count;
199 
200     /* Allow specifying the libopenh264 profile through AVCodecContext. */
201     if (FF_PROFILE_UNKNOWN == s->profile &&
202         FF_PROFILE_UNKNOWN != avctx->profile)
203         switch (avctx->profile) {
204         case FF_PROFILE_H264_HIGH:
205         case FF_PROFILE_H264_MAIN:
206         case FF_PROFILE_H264_CONSTRAINED_BASELINE:
207             s->profile = avctx->profile;
208             break;
209         default:
210             av_log(avctx, AV_LOG_WARNING,
211                    "Unsupported avctx->profile: %d.\n", avctx->profile);
212             break;
213         }
214 
215     if (s->profile == FF_PROFILE_UNKNOWN && s->coder >= 0)
216         s->profile = s->coder == 0 ? FF_PROFILE_H264_CONSTRAINED_BASELINE :
217 #if OPENH264_VER_AT_LEAST(1, 8)
218                                      FF_PROFILE_H264_HIGH;
219 #else
220                                      FF_PROFILE_H264_MAIN;
221 #endif
222 
223     switch (s->profile) {
224     case FF_PROFILE_H264_HIGH:
225         av_log(avctx, AV_LOG_VERBOSE, "Using %s, "
226                 "select EProfileIdc PRO_HIGH in libopenh264.\n",
227                 param.iEntropyCodingModeFlag ? "CABAC" : "CAVLC");
228         break;
229     case FF_PROFILE_H264_MAIN:
230         av_log(avctx, AV_LOG_VERBOSE, "Using %s, "
231                 "select EProfileIdc PRO_MAIN in libopenh264.\n",
232                 param.iEntropyCodingModeFlag ? "CABAC" : "CAVLC");
233         break;
234     case FF_PROFILE_H264_CONSTRAINED_BASELINE:
235     case FF_PROFILE_UNKNOWN:
236         s->profile = FF_PROFILE_H264_CONSTRAINED_BASELINE;
237         param.iEntropyCodingModeFlag = 0;
238         av_log(avctx, AV_LOG_VERBOSE, "Using CAVLC, "
239                "select EProfileIdc PRO_BASELINE in libopenh264.\n");
240         break;
241     default:
242         s->profile = FF_PROFILE_H264_CONSTRAINED_BASELINE;
243         param.iEntropyCodingModeFlag = 0;
244         av_log(avctx, AV_LOG_WARNING, "Unsupported profile, "
245                "select EProfileIdc PRO_BASELINE in libopenh264.\n");
246         break;
247     }
248 
249     param.sSpatialLayers[0].iVideoWidth         = param.iPicWidth;
250     param.sSpatialLayers[0].iVideoHeight        = param.iPicHeight;
251     param.sSpatialLayers[0].fFrameRate          = param.fMaxFrameRate;
252     param.sSpatialLayers[0].iSpatialBitrate     = param.iTargetBitrate;
253     param.sSpatialLayers[0].iMaxSpatialBitrate  = param.iMaxBitrate;
254     param.sSpatialLayers[0].uiProfileIdc        = s->profile;
255 
256 #if OPENH264_VER_AT_LEAST(1, 7)
257     if (avctx->sample_aspect_ratio.num && avctx->sample_aspect_ratio.den) {
258         // Table E-1.
259         static const AVRational sar_idc[] = {
260             {   0,  0 }, // Unspecified (never written here).
261             {   1,  1 }, {  12, 11 }, {  10, 11 }, {  16, 11 },
262             {  40, 33 }, {  24, 11 }, {  20, 11 }, {  32, 11 },
263             {  80, 33 }, {  18, 11 }, {  15, 11 }, {  64, 33 },
264             { 160, 99 }, // Last 3 are unknown to openh264: {   4,  3 }, {   3,  2 }, {   2,  1 },
265         };
266         static const ESampleAspectRatio asp_idc[] = {
267             ASP_UNSPECIFIED,
268             ASP_1x1,      ASP_12x11,   ASP_10x11,   ASP_16x11,
269             ASP_40x33,    ASP_24x11,   ASP_20x11,   ASP_32x11,
270             ASP_80x33,    ASP_18x11,   ASP_15x11,   ASP_64x33,
271             ASP_160x99,
272         };
273         int num, den, i;
274 
275         av_reduce(&num, &den, avctx->sample_aspect_ratio.num,
276                   avctx->sample_aspect_ratio.den, 65535);
277 
278         for (i = 1; i < FF_ARRAY_ELEMS(sar_idc); i++) {
279             if (num == sar_idc[i].num &&
280                 den == sar_idc[i].den)
281                 break;
282         }
283         if (i == FF_ARRAY_ELEMS(sar_idc)) {
284             param.sSpatialLayers[0].eAspectRatio = ASP_EXT_SAR;
285             param.sSpatialLayers[0].sAspectRatioExtWidth = num;
286             param.sSpatialLayers[0].sAspectRatioExtHeight = den;
287         } else {
288             param.sSpatialLayers[0].eAspectRatio = asp_idc[i];
289         }
290         param.sSpatialLayers[0].bAspectRatioPresent = true;
291     } else {
292         param.sSpatialLayers[0].bAspectRatioPresent = false;
293     }
294 #endif
295 
296     if ((avctx->slices > 1) && (s->max_nal_size)) {
297         av_log(avctx, AV_LOG_ERROR,
298                "Invalid combination -slices %d and -max_nal_size %d.\n",
299                avctx->slices, s->max_nal_size);
300         return AVERROR(EINVAL);
301     }
302 
303     if (avctx->slices > 1)
304         s->slice_mode = SM_FIXEDSLCNUM_SLICE;
305 
306     if (s->max_nal_size)
307         s->slice_mode = SM_SIZELIMITED_SLICE;
308 
309 #if OPENH264_VER_AT_LEAST(1, 6)
310     param.sSpatialLayers[0].sSliceArgument.uiSliceMode = s->slice_mode;
311     param.sSpatialLayers[0].sSliceArgument.uiSliceNum  = avctx->slices;
312 #else
313     param.sSpatialLayers[0].sSliceCfg.uiSliceMode               = s->slice_mode;
314     param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceNum = avctx->slices;
315 #endif
316     if (avctx->slices == 0 && s->slice_mode == SM_FIXEDSLCNUM_SLICE)
317         av_log(avctx, AV_LOG_WARNING, "Slice count will be set automatically\n");
318 
319     if (s->slice_mode == SM_SIZELIMITED_SLICE) {
320         if (s->max_nal_size) {
321             param.uiMaxNalSize = s->max_nal_size;
322 #if OPENH264_VER_AT_LEAST(1, 6)
323             param.sSpatialLayers[0].sSliceArgument.uiSliceSizeConstraint = s->max_nal_size;
324 #else
325             param.sSpatialLayers[0].sSliceCfg.sSliceArgument.uiSliceSizeConstraint = s->max_nal_size;
326 #endif
327         } else {
328             av_log(avctx, AV_LOG_ERROR, "Invalid -max_nal_size, "
329                    "specify a valid max_nal_size to use -slice_mode dyn\n");
330             return AVERROR(EINVAL);
331         }
332     }
333 
334 #if OPENH264_VER_AT_LEAST(1, 6)
335     param.sSpatialLayers[0].uiVideoFormat = VF_UNDEF;
336     if (avctx->color_range != AVCOL_RANGE_UNSPECIFIED) {
337         param.sSpatialLayers[0].bVideoSignalTypePresent = true;
338         param.sSpatialLayers[0].bFullRange = (avctx->color_range == AVCOL_RANGE_JPEG);
339     }
340 
341     if (avctx->colorspace != AVCOL_SPC_UNSPECIFIED      ||
342         avctx->color_primaries != AVCOL_PRI_UNSPECIFIED ||
343         avctx->color_trc != AVCOL_TRC_UNSPECIFIED) {
344         param.sSpatialLayers[0].bVideoSignalTypePresent = true;
345         param.sSpatialLayers[0].bColorDescriptionPresent = true;
346     }
347 
348     if (avctx->colorspace != AVCOL_SPC_UNSPECIFIED)
349         param.sSpatialLayers[0].uiColorMatrix = avctx->colorspace;
350     if (avctx->color_primaries != AVCOL_PRI_UNSPECIFIED)
351         param.sSpatialLayers[0].uiColorPrimaries = avctx->color_primaries;
352     if (avctx->color_trc != AVCOL_TRC_UNSPECIFIED)
353         param.sSpatialLayers[0].uiTransferCharacteristics = avctx->color_trc;
354 #endif
355 
356     if ((*s->encoder)->InitializeExt(s->encoder, &param) != cmResultSuccess) {
357         av_log(avctx, AV_LOG_ERROR, "Initialize failed\n");
358         return AVERROR_UNKNOWN;
359     }
360 
361     if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
362         SFrameBSInfo fbi = { 0 };
363         int i, size = 0;
364         (*s->encoder)->EncodeParameterSets(s->encoder, &fbi);
365         for (i = 0; i < fbi.sLayerInfo[0].iNalCount; i++)
366             size += fbi.sLayerInfo[0].pNalLengthInByte[i];
367         avctx->extradata = av_mallocz(size + AV_INPUT_BUFFER_PADDING_SIZE);
368         if (!avctx->extradata)
369             return AVERROR(ENOMEM);
370         avctx->extradata_size = size;
371         memcpy(avctx->extradata, fbi.sLayerInfo[0].pBsBuf, size);
372     }
373 
374     props = ff_add_cpb_side_data(avctx);
375     if (!props)
376         return AVERROR(ENOMEM);
377     props->max_bitrate = param.iMaxBitrate;
378     props->avg_bitrate = param.iTargetBitrate;
379 
380     return 0;
381 }
382 
svc_encode_frame(AVCodecContext * avctx,AVPacket * avpkt,const AVFrame * frame,int * got_packet)383 static int svc_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
384                             const AVFrame *frame, int *got_packet)
385 {
386     SVCContext *s = avctx->priv_data;
387     SFrameBSInfo fbi = { 0 };
388     int i, ret;
389     int encoded;
390     SSourcePicture sp = { 0 };
391     int size = 0, layer, first_layer = 0;
392     int layer_size[MAX_LAYER_NUM_OF_FRAME] = { 0 };
393 
394     sp.iColorFormat = videoFormatI420;
395     for (i = 0; i < 3; i++) {
396         sp.iStride[i] = frame->linesize[i];
397         sp.pData[i]   = frame->data[i];
398     }
399     sp.iPicWidth  = avctx->width;
400     sp.iPicHeight = avctx->height;
401 
402     if (frame->pict_type == AV_PICTURE_TYPE_I) {
403         (*s->encoder)->ForceIntraFrame(s->encoder, true);
404     }
405 
406     encoded = (*s->encoder)->EncodeFrame(s->encoder, &sp, &fbi);
407     if (encoded != cmResultSuccess) {
408         av_log(avctx, AV_LOG_ERROR, "EncodeFrame failed\n");
409         return AVERROR_UNKNOWN;
410     }
411     if (fbi.eFrameType == videoFrameTypeSkip) {
412         s->skipped++;
413         av_log(avctx, AV_LOG_DEBUG, "frame skipped\n");
414         return 0;
415     }
416     first_layer = 0;
417     // Normal frames are returned with one single layer, while IDR
418     // frames have two layers, where the first layer contains the SPS/PPS.
419     // If using global headers, don't include the SPS/PPS in the returned
420     // packet - thus, only return one layer.
421     if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)
422         first_layer = fbi.iLayerNum - 1;
423 
424     for (layer = first_layer; layer < fbi.iLayerNum; layer++) {
425         for (i = 0; i < fbi.sLayerInfo[layer].iNalCount; i++)
426             layer_size[layer] += fbi.sLayerInfo[layer].pNalLengthInByte[i];
427         size += layer_size[layer];
428     }
429     av_log(avctx, AV_LOG_DEBUG, "%d slices\n", fbi.sLayerInfo[fbi.iLayerNum - 1].iNalCount);
430 
431     if ((ret = ff_get_encode_buffer(avctx, avpkt, size, 0)))
432         return ret;
433 
434     size = 0;
435     for (layer = first_layer; layer < fbi.iLayerNum; layer++) {
436         memcpy(avpkt->data + size, fbi.sLayerInfo[layer].pBsBuf, layer_size[layer]);
437         size += layer_size[layer];
438     }
439     avpkt->pts = frame->pts;
440     if (fbi.eFrameType == videoFrameTypeIDR)
441         avpkt->flags |= AV_PKT_FLAG_KEY;
442     *got_packet = 1;
443     return 0;
444 }
445 
446 static const FFCodecDefault svc_enc_defaults[] = {
447     { "b",         "0"     },
448     { "g",         "-1"    },
449     { "qmin",      "-1"    },
450     { "qmax",      "-1"    },
451     { NULL },
452 };
453 
454 const FFCodec ff_libopenh264_encoder = {
455     .p.name         = "libopenh264",
456     .p.long_name    = NULL_IF_CONFIG_SMALL("OpenH264 H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
457     .p.type         = AVMEDIA_TYPE_VIDEO,
458     .p.id           = AV_CODEC_ID_H264,
459     .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_OTHER_THREADS,
460     .priv_data_size = sizeof(SVCContext),
461     .init           = svc_encode_init,
462     FF_CODEC_ENCODE_CB(svc_encode_frame),
463     .close          = svc_encode_close,
464     .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP |
465                       FF_CODEC_CAP_AUTO_THREADS,
466     .p.pix_fmts     = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P,
467                                                     AV_PIX_FMT_NONE },
468     .defaults       = svc_enc_defaults,
469     .p.priv_class   = &class,
470     .p.wrapper_name = "libopenh264",
471 };
472